1 // btree version threads2i sched_yield version
4 // author: karl malbrain, malbrain@cal.berkeley.edu
7 This work, including the source code, documentation
8 and related data, is placed into the public domain.
10 The orginal author is Karl Malbrain.
12 THIS SOFTWARE IS PROVIDED AS-IS WITHOUT WARRANTY
13 OF ANY KIND, NOT EVEN THE IMPLIED WARRANTY OF
14 MERCHANTABILITY. THE AUTHOR OF THIS SOFTWARE,
15 ASSUMES _NO_ RESPONSIBILITY FOR ANY CONSEQUENCE
16 RESULTING FROM THE USE, MODIFICATION, OR
17 REDISTRIBUTION OF THIS SOFTWARE.
20 // Please see the project home page for documentation
21 // code.google.com/p/high-concurrency-btree
23 #define _FILE_OFFSET_BITS 64
24 #define _LARGEFILE64_SOURCE
40 #define WIN32_LEAN_AND_MEAN
53 typedef unsigned long long uid;
56 typedef unsigned long long off64_t;
57 typedef unsigned short ushort;
58 typedef unsigned int uint;
61 #define BT_latchtable 128 // number of latch manager slots
63 #define BT_ro 0x6f72 // ro
64 #define BT_rw 0x7772 // rw
66 #define BT_maxbits 24 // maximum page size in bits
67 #define BT_minbits 9 // minimum page size in bits
68 #define BT_minpage (1 << BT_minbits) // minimum page size
69 #define BT_maxpage (1 << BT_maxbits) // maximum page size
72 There are five lock types for each node in three independent sets:
73 1. (set 1) AccessIntent: Sharable. Going to Read the node. Incompatible with NodeDelete.
74 2. (set 1) NodeDelete: Exclusive. About to release the node. Incompatible with AccessIntent.
75 3. (set 2) ReadLock: Sharable. Read the node. Incompatible with WriteLock.
76 4. (set 2) WriteLock: Exclusive. Modify the node. Incompatible with ReadLock and other WriteLocks.
77 5. (set 3) ParentModification: Exclusive. Change the node's parent keys. Incompatible with another ParentModification.
88 // mode & definition for latch implementation
97 // exclusive is set for write access
98 // share is count of read accessors
99 // grant write lock when share == 0
102 volatile ushort mutex:1;
103 volatile ushort exclusive:1;
104 volatile ushort pending:1;
105 volatile ushort share:13;
108 // hash table entries
111 BtSpinLatch latch[1];
112 volatile ushort slot; // Latch table entry at head of chain
115 // latch manager table structure
118 BtSpinLatch readwr[1]; // read/write page lock
119 BtSpinLatch access[1]; // Access Intent/Page delete
120 BtSpinLatch parent[1]; // adoption of foster children
121 BtSpinLatch busy[1]; // slot is being moved between chains
122 volatile ushort next; // next entry in hash table chain
123 volatile ushort prev; // prev entry in hash table chain
124 volatile ushort pin; // number of outstanding locks
125 volatile ushort hash; // hash slot entry is under
126 volatile uid page_no; // latch set page number
129 // Define the length of the page and key pointers
133 // Page key slot definition.
135 // If BT_maxbits is 15 or less, you can save 4 bytes
136 // for each key stored by making the first two uints
137 // into ushorts. You can also save 4 bytes by removing
138 // the tod field from the key.
140 // Keys are marked dead, but remain on the page until
141 // it cleanup is called. The fence key (highest key) for
142 // the page is always present, even after cleanup.
145 uint off:BT_maxbits; // page offset for key start
146 uint dead:1; // set for deleted key
147 uint tod; // time-stamp for key
148 unsigned char id[BtId]; // id associated with key
151 // The key structure occupies space at the upper end of
152 // each page. It's a length byte followed by the value
157 unsigned char key[1];
160 // The first part of an index page.
161 // It is immediately followed
162 // by the BtSlot array of keys.
164 typedef struct Page {
165 BtLatchSet latch[1]; // Set of three latches
166 uint cnt; // count of keys in page
167 uint act; // count of active keys
168 uint min; // next key offset
169 unsigned char bits; // page size in bits
170 unsigned char lvl:6; // level of page
171 unsigned char kill:1; // page is being deleted
172 unsigned char dirty:1; // page has deleted keys
173 unsigned char right[BtId]; // page number to right
174 BtSlot table[0]; // array of key slots
177 // The memory mapping pool table buffer manager entry
180 unsigned long long int lru; // number of times accessed
181 uid basepage; // mapped base page number
182 char *map; // mapped memory pointer
183 ushort slot; // slot index in this array
184 ushort pin; // mapped page pin counter
185 void *hashprev; // previous pool entry for the same hash idx
186 void *hashnext; // next pool entry for the same hash idx
192 // structure for latch manager on ALLOC_page
195 struct Page alloc[2]; // next & free page_nos in right ptr
196 BtSpinLatch lock[1]; // allocation area lite latch
197 ushort latchdeployed; // highest number of latch entries deployed
198 ushort nlatchpage; // number of latch pages at BT_latch
199 ushort latchtotal; // number of page latch entries
200 ushort latchhash; // number of latch hash table slots
201 ushort latchvictim; // next latch entry to examine
202 BtHashEntry table[0]; // the hash table
205 // The object structure for Btree access
208 uint page_size; // page size
209 uint page_bits; // page size in bits
210 uint seg_bits; // seg size in pages in bits
211 uint mode; // read-write mode
213 char *pooladvise; // bit maps for pool page advisements
218 ushort poolcnt; // highest page pool node in use
219 ushort poolmax; // highest page pool node allocated
220 ushort poolmask; // total number of pages in mmap segment - 1
221 ushort hashsize; // size of Hash Table for pool entries
222 volatile uint evicted; // last evicted hash table slot
223 ushort *hash; // pool index for hash entries
224 BtSpinLatch *latch; // latches for hash table slots
225 BtLatchMgr *latchmgr; // mapped latch page from allocation page
226 BtLatchSet *latchsets; // mapped latch set from latch pages
227 BtPool *pool; // memory pool page segments
229 HANDLE halloc; // allocation and latch table handle
234 BtMgr *mgr; // buffer manager for thread
235 BtPage cursor; // cached frame for start/next (never mapped)
236 BtPage frame; // spare frame for the page split (never mapped)
237 BtPage zero; // page frame for zeroes at end of file
238 BtPage page; // current page
239 uid page_no; // current page number
240 uid cursor_page; // current cursor page number
241 BtLatchSet *set; // current page latch set
242 BtPool *pool; // current page pool
243 unsigned char *mem; // frame, cursor, page memory buffer
244 int found; // last delete or insert was found
245 int err; // last error
259 extern void bt_close (BtDb *bt);
260 extern BtDb *bt_open (BtMgr *mgr);
261 extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod);
262 extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl);
263 extern uid bt_findkey (BtDb *bt, unsigned char *key, uint len);
264 extern uint bt_startkey (BtDb *bt, unsigned char *key, uint len);
265 extern uint bt_nextkey (BtDb *bt, uint slot);
268 extern BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolsize, uint segsize, uint hashsize);
269 void bt_mgrclose (BtMgr *mgr);
271 // Helper functions to return slot values
273 extern BtKey bt_key (BtDb *bt, uint slot);
274 extern uid bt_uid (BtDb *bt, uint slot);
275 extern uint bt_tod (BtDb *bt, uint slot);
277 // BTree page number constants
278 #define ALLOC_page 0 // allocation & lock manager hash table
279 #define ROOT_page 1 // root of the btree
280 #define LEAF_page 2 // first page of leaves
281 #define LATCH_page 3 // pages for lock manager
283 // Number of levels to create in a new BTree
287 // The page is allocated from low and hi ends.
288 // The key offsets and row-id's are allocated
289 // from the bottom, while the text of the key
290 // is allocated from the top. When the two
291 // areas meet, the page is split into two.
293 // A key consists of a length byte, two bytes of
294 // index number (0 - 65534), and up to 253 bytes
295 // of key value. Duplicate keys are discarded.
296 // Associated with each key is a 48 bit row-id.
298 // The b-tree root is always located at page 1.
299 // The first leaf page of level zero is always
300 // located on page 2.
302 // The b-tree pages are linked with next
303 // pointers to facilitate enumerators,
304 // and provide for concurrency.
306 // When to root page fills, it is split in two and
307 // the tree height is raised by a new root at page
308 // one with two keys.
310 // Deleted keys are marked with a dead bit until
311 // page cleanup The fence key for a node is always
312 // present, even after deletion and cleanup.
314 // Groups of pages called segments from the btree are optionally
315 // cached with a memory mapped pool. A hash table is used to keep
316 // track of the cached segments. This behaviour is controlled
317 // by the cache block size parameter to bt_open.
319 // To achieve maximum concurrency one page is locked at a time
320 // as the tree is traversed to find leaf key in question. The right
321 // page numbers are used in cases where the page is being split,
324 // Page 0 is dedicated to lock for new page extensions,
325 // and chains empty pages together for reuse.
327 // The ParentModification lock on a node is obtained to prevent resplitting
328 // or deleting a node before its fence is posted into its upper level.
330 // Empty pages are chained together through the ALLOC page and reused.
332 // Access macros to address slot and key values from the page
334 #define slotptr(page, slot) (page->table + slot-1)
335 #define keyptr(page, slot) ((BtKey)((unsigned char*)(page) + slotptr(page, slot)->off))
337 void bt_putid(unsigned char *dest, uid id)
342 dest[i] = (unsigned char)id, id >>= 8;
345 uid bt_getid(unsigned char *src)
350 for( i = 0; i < BtId; i++ )
351 id <<= 8, id |= *src++;
358 // wait until write lock mode is clear
359 // and add 1 to the share count
361 void bt_spinreadlock(BtSpinLatch *latch)
366 // obtain latch mutex
368 if( __sync_fetch_and_or((ushort *)latch, Mutex) & Mutex )
371 if( prev = _InterlockedOr16((ushort *)latch, Mutex) & Mutex )
374 // see if exclusive request is granted or pending
376 if( prev = !(latch->exclusive | latch->pending) )
378 __sync_fetch_and_add((ushort *)latch, Share);
380 _InterlockedExchangeAdd16 ((ushort *)latch, Share);
384 __sync_fetch_and_and ((ushort *)latch, ~Mutex);
386 _InterlockedAnd16((ushort *)latch, ~Mutex);
392 } while( sched_yield(), 1 );
394 } while( SwitchToThread(), 1 );
398 // wait for other read and write latches to relinquish
400 void bt_spinwritelock(BtSpinLatch *latch)
404 if( __sync_fetch_and_or((ushort *)latch, Mutex | Pending) & Mutex )
407 if( _InterlockedOr16((ushort *)latch, Mutex | Pending) & Mutex )
410 if( !(latch->share | latch->exclusive) ) {
412 __sync_fetch_and_or((ushort *)latch, Write);
413 __sync_fetch_and_and ((ushort *)latch, ~(Mutex | Pending));
415 _InterlockedOr16((ushort *)latch, Write);
416 _InterlockedAnd16((ushort *)latch, ~(Mutex | Pending));
422 __sync_fetch_and_and ((ushort *)latch, ~Mutex);
424 _InterlockedAnd16((ushort *)latch, ~Mutex);
428 } while( sched_yield(), 1 );
430 } while( SwitchToThread(), 1 );
434 // try to obtain write lock
436 // return 1 if obtained,
439 int bt_spinwritetry(BtSpinLatch *latch)
444 if( prev = __sync_fetch_and_or((ushort *)latch, Mutex), prev & Mutex )
447 if( prev = _InterlockedOr16((ushort *)latch, Mutex), prev & Mutex )
450 // take write access if all bits are clear
454 __sync_fetch_and_or ((ushort *)latch, Write);
456 _InterlockedOr16((ushort *)latch, Write);
460 __sync_fetch_and_and ((ushort *)latch, ~Mutex);
462 _InterlockedAnd16((ushort *)latch, ~Mutex);
469 void bt_spinreleasewrite(BtSpinLatch *latch)
472 __sync_fetch_and_and ((ushort *)latch, ~Write);
474 _InterlockedAnd16((ushort *)latch, ~Write);
478 // decrement reader count
480 void bt_spinreleaseread(BtSpinLatch *latch)
483 __sync_fetch_and_add((ushort *)latch, -Share);
485 _InterlockedExchangeAdd16 ((ushort *)latch, -Share);
489 // link latch table entry into latch hash table
491 void bt_latchlink (BtDb *bt, ushort hashidx, ushort victim, uid page_no)
493 BtLatchSet *set = bt->mgr->latchsets + victim;
495 if( set->next = bt->mgr->latchmgr->table[hashidx].slot )
496 bt->mgr->latchsets[set->next].prev = victim;
498 bt->mgr->latchmgr->table[hashidx].slot = victim;
499 set->page_no = page_no;
506 void bt_unpinlatch (BtLatchSet *set)
509 __sync_fetch_and_add(&set->pin, -1);
511 _InterlockedDecrement16 (&set->pin);
515 // find existing latchset or inspire new one
516 // return with latchset pinned
518 BtLatchSet *bt_pinlatch (BtDb *bt, uid page_no)
520 ushort hashidx = page_no % bt->mgr->latchmgr->latchhash;
521 ushort slot, avail = 0, victim, idx;
524 // obtain read lock on hash table entry
526 bt_spinreadlock(bt->mgr->latchmgr->table[hashidx].latch);
528 if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do
530 set = bt->mgr->latchsets + slot;
531 if( page_no == set->page_no )
533 } while( slot = set->next );
537 __sync_fetch_and_add(&set->pin, 1);
539 _InterlockedIncrement16 (&set->pin);
543 bt_spinreleaseread (bt->mgr->latchmgr->table[hashidx].latch);
548 // try again, this time with write lock
550 bt_spinwritelock(bt->mgr->latchmgr->table[hashidx].latch);
552 if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do
554 set = bt->mgr->latchsets + slot;
555 if( page_no == set->page_no )
557 if( !set->pin && !avail )
559 } while( slot = set->next );
561 // found our entry, or take over an unpinned one
563 if( slot || (slot = avail) ) {
564 set = bt->mgr->latchsets + slot;
566 __sync_fetch_and_add(&set->pin, 1);
568 _InterlockedIncrement16 (&set->pin);
570 set->page_no = page_no;
571 bt_spinreleasewrite(bt->mgr->latchmgr->table[hashidx].latch);
575 // see if there are any unused entries
577 victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, 1) + 1;
579 victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchdeployed);
582 if( victim < bt->mgr->latchmgr->latchtotal ) {
583 set = bt->mgr->latchsets + victim;
585 __sync_fetch_and_add(&set->pin, 1);
587 _InterlockedIncrement16 (&set->pin);
589 bt_latchlink (bt, hashidx, victim, page_no);
590 bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch);
595 victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, -1);
597 victim = _InterlockedDecrement16 (&bt->mgr->latchmgr->latchdeployed);
599 // find and reuse previous lock entry
603 victim = __sync_fetch_and_add(&bt->mgr->latchmgr->latchvictim, 1);
605 victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchvictim) - 1;
607 // we don't use slot zero
609 if( victim %= bt->mgr->latchmgr->latchtotal )
610 set = bt->mgr->latchsets + victim;
614 // take control of our slot
615 // from other threads
617 if( set->pin || !bt_spinwritetry (set->busy) )
622 // try to get write lock on hash chain
623 // skip entry if not obtained
624 // or has outstanding locks
626 if( !bt_spinwritetry (bt->mgr->latchmgr->table[idx].latch) ) {
627 bt_spinreleasewrite (set->busy);
632 bt_spinreleasewrite (set->busy);
633 bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch);
637 // unlink our available victim from its hash chain
640 bt->mgr->latchsets[set->prev].next = set->next;
642 bt->mgr->latchmgr->table[idx].slot = set->next;
645 bt->mgr->latchsets[set->next].prev = set->prev;
647 bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch);
649 __sync_fetch_and_add(&set->pin, 1);
651 _InterlockedIncrement16 (&set->pin);
653 bt_latchlink (bt, hashidx, victim, page_no);
654 bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch);
655 bt_spinreleasewrite (set->busy);
660 void bt_mgrclose (BtMgr *mgr)
665 // release mapped pages
666 // note that slot zero is never used
668 for( slot = 1; slot < mgr->poolmax; slot++ ) {
669 pool = mgr->pool + slot;
672 munmap (pool->map, (mgr->poolmask+1) << mgr->page_bits);
675 FlushViewOfFile(pool->map, 0);
676 UnmapViewOfFile(pool->map);
677 CloseHandle(pool->hmap);
683 munmap (mgr->latchsets, mgr->latchmgr->nlatchpage * mgr->page_size);
684 munmap (mgr->latchmgr, mgr->page_size);
686 FlushViewOfFile(mgr->latchmgr, 0);
687 UnmapViewOfFile(mgr->latchmgr);
688 CloseHandle(mgr->halloc);
695 free (mgr->pooladvise);
698 FlushFileBuffers(mgr->idx);
699 CloseHandle(mgr->idx);
700 GlobalFree (mgr->pool);
701 GlobalFree (mgr->hash);
702 GlobalFree (mgr->latch);
707 // close and release memory
709 void bt_close (BtDb *bt)
716 VirtualFree (bt->mem, 0, MEM_RELEASE);
721 // open/create new btree buffer manager
723 // call with file_name, BT_openmode, bits in page size (e.g. 16),
724 // size of mapped page pool (e.g. 8192)
726 BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolmax, uint segsize, uint hashsize)
728 uint lvl, attr, cacheblk, last, slot, idx;
729 uint nlatchpage, latchhash;
730 BtLatchMgr *latchmgr;
738 SYSTEM_INFO sysinfo[1];
741 // determine sanity of page size and buffer pool
743 if( bits > BT_maxbits )
745 else if( bits < BT_minbits )
749 return NULL; // must have buffer pool
752 mgr = calloc (1, sizeof(BtMgr));
754 mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666);
757 return free(mgr), NULL;
759 cacheblk = 4096; // minimum mmap segment size for unix
762 mgr = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, sizeof(BtMgr));
763 attr = FILE_ATTRIBUTE_NORMAL;
764 mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL);
766 if( mgr->idx == INVALID_HANDLE_VALUE )
767 return GlobalFree(mgr), NULL;
769 // normalize cacheblk to multiple of sysinfo->dwAllocationGranularity
770 GetSystemInfo(sysinfo);
771 cacheblk = sysinfo->dwAllocationGranularity;
775 latchmgr = malloc (BT_maxpage);
778 // read minimum page size to get root info
780 if( size = lseek (mgr->idx, 0L, 2) ) {
781 if( pread(mgr->idx, latchmgr, BT_minpage, 0) == BT_minpage )
782 bits = latchmgr->alloc->bits;
784 return free(mgr), free(latchmgr), NULL;
785 } else if( mode == BT_ro )
786 return free(latchmgr), bt_mgrclose (mgr), NULL;
788 latchmgr = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE);
789 size = GetFileSize(mgr->idx, amt);
792 if( !ReadFile(mgr->idx, (char *)latchmgr, BT_minpage, amt, NULL) )
793 return bt_mgrclose (mgr), NULL;
794 bits = latchmgr->alloc->bits;
795 } else if( mode == BT_ro )
796 return bt_mgrclose (mgr), NULL;
799 mgr->page_size = 1 << bits;
800 mgr->page_bits = bits;
802 mgr->poolmax = poolmax;
805 if( cacheblk < mgr->page_size )
806 cacheblk = mgr->page_size;
808 // mask for partial memmaps
810 mgr->poolmask = (cacheblk >> bits) - 1;
812 // see if requested size of pages per memmap is greater
814 if( (1 << segsize) > mgr->poolmask )
815 mgr->poolmask = (1 << segsize) - 1;
819 while( (1 << mgr->seg_bits) <= mgr->poolmask )
822 mgr->hashsize = hashsize;
825 mgr->pool = calloc (poolmax, sizeof(BtPool));
826 mgr->hash = calloc (hashsize, sizeof(ushort));
827 mgr->latch = calloc (hashsize, sizeof(BtSpinLatch));
828 mgr->pooladvise = calloc (poolmax, (mgr->poolmask + 8) / 8);
830 mgr->pool = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, poolmax * sizeof(BtPool));
831 mgr->hash = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(ushort));
832 mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtSpinLatch));
838 // initialize an empty b-tree with latch page, root page, page of leaves
839 // and page(s) of latches
841 memset (latchmgr, 0, 1 << bits);
842 nlatchpage = BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1;
843 bt_putid(latchmgr->alloc->right, MIN_lvl+1+nlatchpage);
844 latchmgr->alloc->bits = mgr->page_bits;
846 latchmgr->nlatchpage = nlatchpage;
847 latchmgr->latchtotal = nlatchpage * (mgr->page_size / sizeof(BtLatchSet));
849 // initialize latch manager
851 latchhash = (mgr->page_size - sizeof(BtLatchMgr)) / sizeof(BtHashEntry);
853 // size of hash table = total number of latchsets
855 if( latchhash > latchmgr->latchtotal )
856 latchhash = latchmgr->latchtotal;
858 latchmgr->latchhash = latchhash;
861 if( write (mgr->idx, latchmgr, mgr->page_size) < mgr->page_size )
862 return bt_mgrclose (mgr), NULL;
864 if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) )
865 return bt_mgrclose (mgr), NULL;
867 if( *amt < mgr->page_size )
868 return bt_mgrclose (mgr), NULL;
871 memset (latchmgr, 0, 1 << bits);
872 latchmgr->alloc->bits = mgr->page_bits;
874 for( lvl=MIN_lvl; lvl--; ) {
875 slotptr(latchmgr->alloc, 1)->off = mgr->page_size - 3;
876 bt_putid(slotptr(latchmgr->alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number
877 key = keyptr(latchmgr->alloc, 1);
878 key->len = 2; // create stopper key
881 latchmgr->alloc->min = mgr->page_size - 3;
882 latchmgr->alloc->lvl = lvl;
883 latchmgr->alloc->cnt = 1;
884 latchmgr->alloc->act = 1;
886 if( write (mgr->idx, latchmgr, mgr->page_size) < mgr->page_size )
887 return bt_mgrclose (mgr), NULL;
889 if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) )
890 return bt_mgrclose (mgr), NULL;
892 if( *amt < mgr->page_size )
893 return bt_mgrclose (mgr), NULL;
897 // clear out latch manager locks
898 // and rest of pages to round out segment
900 memset(latchmgr, 0, mgr->page_size);
903 while( last <= ((MIN_lvl + 1 + nlatchpage) | mgr->poolmask) ) {
905 pwrite(mgr->idx, latchmgr, mgr->page_size, last << mgr->page_bits);
907 SetFilePointer (mgr->idx, last << mgr->page_bits, NULL, FILE_BEGIN);
908 if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) )
909 return bt_mgrclose (mgr), NULL;
910 if( *amt < mgr->page_size )
911 return bt_mgrclose (mgr), NULL;
918 flag = PROT_READ | PROT_WRITE;
919 mgr->latchmgr = mmap (0, mgr->page_size, flag, MAP_SHARED, mgr->idx, ALLOC_page * mgr->page_size);
920 if( mgr->latchmgr == MAP_FAILED )
921 return bt_mgrclose (mgr), NULL;
922 mgr->latchsets = (BtLatchSet *)mmap (0, mgr->latchmgr->nlatchpage * mgr->page_size, flag, MAP_SHARED, mgr->idx, LATCH_page * mgr->page_size);
923 if( mgr->latchsets == MAP_FAILED )
924 return bt_mgrclose (mgr), NULL;
926 flag = PAGE_READWRITE;
927 mgr->halloc = CreateFileMapping(mgr->idx, NULL, flag, 0, (BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1 + LATCH_page) * mgr->page_size, NULL);
929 return bt_mgrclose (mgr), NULL;
931 flag = FILE_MAP_WRITE;
932 mgr->latchmgr = MapViewOfFile(mgr->halloc, flag, 0, 0, (BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1 + LATCH_page) * mgr->page_size);
934 return GetLastError(), bt_mgrclose (mgr), NULL;
936 mgr->latchsets = (void *)((char *)mgr->latchmgr + LATCH_page * mgr->page_size);
942 VirtualFree (latchmgr, 0, MEM_RELEASE);
947 // open BTree access method
948 // based on buffer manager
950 BtDb *bt_open (BtMgr *mgr)
952 BtDb *bt = malloc (sizeof(*bt));
954 memset (bt, 0, sizeof(*bt));
957 bt->mem = malloc (3 *mgr->page_size);
959 bt->mem = VirtualAlloc(NULL, 3 * mgr->page_size, MEM_COMMIT, PAGE_READWRITE);
961 bt->frame = (BtPage)bt->mem;
962 bt->zero = (BtPage)(bt->mem + 1 * mgr->page_size);
963 bt->cursor = (BtPage)(bt->mem + 2 * mgr->page_size);
965 memset (bt->zero, 0, mgr->page_size);
969 // compare two keys, returning > 0, = 0, or < 0
970 // as the comparison value
972 int keycmp (BtKey key1, unsigned char *key2, uint len2)
974 uint len1 = key1->len;
977 if( ans = memcmp (key1->key, key2, len1 > len2 ? len2 : len1) )
990 // find segment in pool
991 // must be called with hashslot idx locked
992 // return NULL if not there
993 // otherwise return node
995 BtPool *bt_findpool(BtDb *bt, uid page_no, uint idx)
1000 // compute start of hash chain in pool
1002 if( slot = bt->mgr->hash[idx] )
1003 pool = bt->mgr->pool + slot;
1007 page_no &= ~bt->mgr->poolmask;
1009 while( pool->basepage != page_no )
1010 if( pool = pool->hashnext )
1018 // add segment to hash table
1020 void bt_linkhash(BtDb *bt, BtPool *pool, uid page_no, int idx)
1025 pool->hashprev = pool->hashnext = NULL;
1026 pool->basepage = page_no & ~bt->mgr->poolmask;
1029 if( slot = bt->mgr->hash[idx] ) {
1030 node = bt->mgr->pool + slot;
1031 pool->hashnext = node;
1032 node->hashprev = pool;
1035 bt->mgr->hash[idx] = pool->slot;
1038 // find best segment to evict from buffer pool
1040 BtPool *bt_findlru (BtDb *bt, uint hashslot)
1042 unsigned long long int target = ~0LL;
1043 BtPool *pool = NULL, *node;
1048 node = bt->mgr->pool + hashslot;
1050 // scan pool entries under hash table slot
1055 if( node->lru > target )
1059 } while( node = node->hashnext );
1064 // map new buffer pool segment to virtual memory
1066 BTERR bt_mapsegment(BtDb *bt, BtPool *pool, uid page_no)
1068 off64_t off = (page_no & ~bt->mgr->poolmask) << bt->mgr->page_bits;
1069 off64_t limit = off + ((bt->mgr->poolmask+1) << bt->mgr->page_bits);
1073 flag = PROT_READ | ( bt->mgr->mode == BT_ro ? 0 : PROT_WRITE );
1074 pool->map = mmap (0, (bt->mgr->poolmask+1) << bt->mgr->page_bits, flag, MAP_SHARED, bt->mgr->idx, off);
1075 if( pool->map == MAP_FAILED )
1076 return bt->err = BTERR_map;
1078 // clear out madvise issued bits
1079 memset (bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8) / 8), 0, (bt->mgr->poolmask + 8)/8);
1081 flag = ( bt->mgr->mode == BT_ro ? PAGE_READONLY : PAGE_READWRITE );
1082 pool->hmap = CreateFileMapping(bt->mgr->idx, NULL, flag, (DWORD)(limit >> 32), (DWORD)limit, NULL);
1084 return bt->err = BTERR_map;
1086 flag = ( bt->mgr->mode == BT_ro ? FILE_MAP_READ : FILE_MAP_WRITE );
1087 pool->map = MapViewOfFile(pool->hmap, flag, (DWORD)(off >> 32), (DWORD)off, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
1089 return bt->err = BTERR_map;
1094 // calculate page within pool
1096 BtPage bt_page (BtDb *bt, BtPool *pool, uid page_no)
1098 uint subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping
1101 page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits));
1104 uint idx = subpage / 8;
1105 uint bit = subpage % 8;
1107 if( ~((bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8)/8))[idx] >> bit) & 1 ) {
1108 madvise (page, bt->mgr->page_size, MADV_WILLNEED);
1109 (bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8)/8))[idx] |= 1 << bit;
1118 void bt_unpinpool (BtPool *pool)
1121 __sync_fetch_and_add(&pool->pin, -1);
1123 _InterlockedDecrement16 (&pool->pin);
1127 // find or place requested page in segment-pool
1128 // return pool table entry, incrementing pin
1130 BtPool *bt_pinpool(BtDb *bt, uid page_no)
1132 BtPool *pool, *node, *next;
1133 uint slot, idx, victim;
1135 // lock hash table chain
1137 idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
1138 bt_spinreadlock (&bt->mgr->latch[idx]);
1140 // look up in hash table
1142 if( pool = bt_findpool(bt, page_no, idx) ) {
1144 __sync_fetch_and_add(&pool->pin, 1);
1146 _InterlockedIncrement16 (&pool->pin);
1148 bt_spinreleaseread (&bt->mgr->latch[idx]);
1153 // upgrade to write lock
1155 bt_spinreleaseread (&bt->mgr->latch[idx]);
1156 bt_spinwritelock (&bt->mgr->latch[idx]);
1158 // try to find page in pool with write lock
1160 if( pool = bt_findpool(bt, page_no, idx) ) {
1162 __sync_fetch_and_add(&pool->pin, 1);
1164 _InterlockedIncrement16 (&pool->pin);
1166 bt_spinreleasewrite (&bt->mgr->latch[idx]);
1171 // allocate a new pool node
1172 // and add to hash table
1175 slot = __sync_fetch_and_add(&bt->mgr->poolcnt, 1);
1177 slot = _InterlockedIncrement16 (&bt->mgr->poolcnt) - 1;
1180 if( ++slot < bt->mgr->poolmax ) {
1181 pool = bt->mgr->pool + slot;
1184 if( bt_mapsegment(bt, pool, page_no) )
1187 bt_linkhash(bt, pool, page_no, idx);
1189 __sync_fetch_and_add(&pool->pin, 1);
1191 _InterlockedIncrement16 (&pool->pin);
1193 bt_spinreleasewrite (&bt->mgr->latch[idx]);
1197 // pool table is full
1198 // find best pool entry to evict
1201 __sync_fetch_and_add(&bt->mgr->poolcnt, -1);
1203 _InterlockedDecrement16 (&bt->mgr->poolcnt);
1208 victim = __sync_fetch_and_add(&bt->mgr->evicted, 1);
1210 victim = _InterlockedIncrement (&bt->mgr->evicted) - 1;
1212 victim %= bt->mgr->hashsize;
1214 // try to get write lock
1215 // skip entry if not obtained
1217 if( !bt_spinwritetry (&bt->mgr->latch[victim]) )
1220 // if pool entry is empty
1221 // or any pages are pinned
1224 if( !(pool = bt_findlru(bt, bt->mgr->hash[victim])) ) {
1225 bt_spinreleasewrite (&bt->mgr->latch[victim]);
1229 // unlink victim pool node from hash table
1231 if( node = pool->hashprev )
1232 node->hashnext = pool->hashnext;
1233 else if( node = pool->hashnext )
1234 bt->mgr->hash[victim] = node->slot;
1236 bt->mgr->hash[victim] = 0;
1238 if( node = pool->hashnext )
1239 node->hashprev = pool->hashprev;
1241 bt_spinreleasewrite (&bt->mgr->latch[victim]);
1243 // remove old file mapping
1245 munmap (pool->map, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
1247 FlushViewOfFile(pool->map, 0);
1248 UnmapViewOfFile(pool->map);
1249 CloseHandle(pool->hmap);
1253 // create new pool mapping
1254 // and link into hash table
1256 if( bt_mapsegment(bt, pool, page_no) )
1259 bt_linkhash(bt, pool, page_no, idx);
1261 __sync_fetch_and_add(&pool->pin, 1);
1263 _InterlockedIncrement16 (&pool->pin);
1265 bt_spinreleasewrite (&bt->mgr->latch[idx]);
1270 // place write, read, or parent lock on requested page_no.
1272 void bt_lockpage(BtLock mode, BtLatchSet *set)
1276 bt_spinreadlock (set->readwr);
1279 bt_spinwritelock (set->readwr);
1282 bt_spinreadlock (set->access);
1285 bt_spinwritelock (set->access);
1288 bt_spinwritelock (set->parent);
1293 // remove write, read, or parent lock on requested page
1295 void bt_unlockpage(BtLock mode, BtLatchSet *set)
1299 bt_spinreleaseread (set->readwr);
1302 bt_spinreleasewrite (set->readwr);
1305 bt_spinreleaseread (set->access);
1308 bt_spinreleasewrite (set->access);
1311 bt_spinreleasewrite (set->parent);
1316 // allocate a new page and write page into it
1318 uid bt_newpage(BtDb *bt, BtPage page)
1326 // lock allocation page
1328 bt_spinwritelock(bt->mgr->latchmgr->lock);
1330 // use empty chain first
1331 // else allocate empty page
1333 if( new_page = bt_getid(bt->mgr->latchmgr->alloc[1].right) ) {
1334 if( pool = bt_pinpool (bt, new_page) )
1335 pmap = bt_page (bt, pool, new_page);
1338 bt_putid(bt->mgr->latchmgr->alloc[1].right, bt_getid(pmap->right));
1339 bt_unpinpool (pool);
1342 new_page = bt_getid(bt->mgr->latchmgr->alloc->right);
1343 bt_putid(bt->mgr->latchmgr->alloc->right, new_page+1);
1347 if ( pwrite(bt->mgr->idx, page, bt->mgr->page_size, new_page << bt->mgr->page_bits) < bt->mgr->page_size )
1348 return bt->err = BTERR_wrt, 0;
1350 // if writing first page of pool block, zero last page in the block
1352 if ( !reuse && bt->mgr->poolmask > 0 && (new_page & bt->mgr->poolmask) == 0 )
1354 // use zero buffer to write zeros
1355 memset(bt->zero, 0, bt->mgr->page_size);
1356 if ( pwrite(bt->mgr->idx,bt->zero, bt->mgr->page_size, (new_page | bt->mgr->poolmask) << bt->mgr->page_bits) < bt->mgr->page_size )
1357 return bt->err = BTERR_wrt, 0;
1360 // bring new page into pool and copy page.
1361 // this will extend the file into the new pages.
1363 if( pool = bt_pinpool (bt, new_page) )
1364 pmap = bt_page (bt, pool, new_page);
1368 memcpy(pmap, page, bt->mgr->page_size);
1369 bt_unpinpool (pool);
1371 // unlock allocation latch and return new page no
1373 bt_spinreleasewrite(bt->mgr->latchmgr->lock);
1377 // find slot in page for given key at a given level
1379 int bt_findslot (BtDb *bt, unsigned char *key, uint len)
1381 uint diff, higher = bt->page->cnt, low = 1, slot;
1384 // make stopper key an infinite fence value
1386 if( bt_getid (bt->page->right) )
1391 // low is the next candidate, higher is already
1392 // tested as .ge. the given key, loop ends when they meet
1394 while( diff = higher - low ) {
1395 slot = low + ( diff >> 1 );
1396 if( keycmp (keyptr(bt->page, slot), key, len) < 0 )
1399 higher = slot, good++;
1402 // return zero if key is on right link page
1404 return good ? higher : 0;
1407 // find and load page at given level for given key
1408 // leave page rd or wr locked as requested
1410 int bt_loadpage (BtDb *bt, unsigned char *key, uint len, uint lvl, uint lock)
1412 uid page_no = ROOT_page, prevpage = 0;
1413 BtLatchSet *set, *prevset;
1414 uint drill = 0xff, slot;
1415 uint mode, prevmode;
1418 // start at root of btree and drill down
1423 // determine lock mode of drill level
1424 mode = (lock == BtLockWrite) && (drill == lvl) ? BtLockWrite : BtLockRead;
1426 bt->set = bt_pinlatch (bt, page_no);
1427 bt->page_no = page_no;
1429 // pin page contents
1431 if( bt->pool = bt_pinpool (bt, page_no) )
1432 bt->page = bt_page (bt, bt->pool, page_no);
1436 // obtain access lock using lock chaining with Access mode
1438 if( page_no > ROOT_page )
1439 bt_lockpage(BtLockAccess, bt->set);
1441 // release & unpin parent page
1444 bt_unlockpage(prevmode, prevset);
1445 bt_unpinlatch (prevset);
1446 bt_unpinpool (prevpool);
1450 // obtain read lock using lock chaining
1452 bt_lockpage(mode, bt->set);
1454 if( page_no > ROOT_page )
1455 bt_unlockpage(BtLockAccess, bt->set);
1457 // re-read and re-lock root after determining actual level of root
1459 if( bt->page->lvl != drill) {
1460 if ( bt->page_no != ROOT_page )
1461 return bt->err = BTERR_struct, 0;
1463 drill = bt->page->lvl;
1465 if( lock == BtLockWrite && drill == lvl ) {
1466 bt_unlockpage(mode, bt->set);
1467 bt_unpinlatch (bt->set);
1468 bt_unpinpool (bt->pool);
1473 // find key on page at this level
1474 // and descend to requested level
1476 if( !bt->page->kill && (slot = bt_findslot (bt, key, len)) ) {
1480 while( slotptr(bt->page, slot)->dead )
1481 if( slot++ < bt->page->cnt )
1484 page_no = bt_getid(bt->page->right);
1488 page_no = bt_getid(slotptr(bt->page, slot)->id);
1492 // or slide right into next page
1493 // (slide left from deleted page)
1496 page_no = bt_getid(bt->page->right);
1498 // continue down / right using overlapping locks
1499 // to protect pages being killed or split.
1502 prevpage = bt->page_no;
1503 prevpool = bt->pool;
1508 // return error on end of right chain
1510 bt->err = BTERR_struct;
1511 return 0; // return error
1514 // find and delete key on page by marking delete flag bit
1515 // when page becomes empty, delete it
1517 BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl)
1519 unsigned char lowerkey[256], higherkey[256];
1520 BtLatchSet *rset, *set;
1521 BtPool *pool, *rpool;
1527 if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) )
1528 ptr = keyptr(bt->page, slot);
1532 // if key is found delete it, otherwise ignore request
1534 if( bt->found = !keycmp (ptr, key, len) )
1535 if( bt->found = slotptr(bt->page, slot)->dead == 0 ) {
1536 slotptr(bt->page,slot)->dead = 1;
1537 if( slot < bt->page->cnt )
1538 bt->page->dirty = 1;
1542 // return if page is not empty, or it has no right sibling
1544 right = bt_getid(bt->page->right);
1545 page_no = bt->page_no;
1549 if( !right || bt->page->act ) {
1550 bt_unlockpage(BtLockWrite, set);
1551 bt_unpinlatch (set);
1552 bt_unpinpool (pool);
1556 // obtain Parent lock over write lock
1558 bt_lockpage(BtLockParent, set);
1560 // keep copy of key to delete
1562 ptr = keyptr(bt->page, bt->page->cnt);
1563 memcpy(lowerkey, ptr, ptr->len + 1);
1565 // lock and map right page
1567 if( rpool = bt_pinpool (bt, right) )
1568 rpage = bt_page (bt, rpool, right);
1572 rset = bt_pinlatch (bt, right);
1573 bt_lockpage(BtLockWrite, rset);
1575 // pull contents of next page into current empty page
1577 memcpy (bt->page, rpage, bt->mgr->page_size);
1579 // keep copy of key to update
1581 ptr = keyptr(rpage, rpage->cnt);
1582 memcpy(higherkey, ptr, ptr->len + 1);
1584 // Mark right page as deleted and point it to left page
1585 // until we can post updates at higher level.
1587 bt_putid(rpage->right, page_no);
1591 bt_unlockpage(BtLockWrite, rset);
1592 bt_unlockpage(BtLockWrite, set);
1594 // delete old lower key to consolidated node
1596 if( bt_deletekey (bt, lowerkey + 1, *lowerkey, lvl + 1) )
1599 // redirect higher key directly to consolidated node
1601 tod = (uint)time(NULL);
1603 if( bt_insertkey (bt, higherkey+1, *higherkey, lvl + 1, page_no, tod) )
1606 // add killed right block to free chain
1609 bt_spinwritelock(bt->mgr->latchmgr->lock);
1611 // store free chain in allocation page second right
1612 bt_putid(rpage->right, bt_getid(bt->mgr->latchmgr->alloc[1].right));
1613 bt_putid(bt->mgr->latchmgr->alloc[1].right, right);
1615 // unlock latch mgr and unpin right page
1617 bt_spinreleasewrite(bt->mgr->latchmgr->lock);
1618 bt_unpinlatch (rset);
1619 bt_unpinpool (rpool);
1621 // remove ParentModify lock
1623 bt_unlockpage(BtLockParent, set);
1624 bt_unpinlatch (set);
1625 bt_unpinpool (pool);
1629 // find key in leaf level and return row-id
1631 uid bt_findkey (BtDb *bt, unsigned char *key, uint len)
1637 if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) )
1638 ptr = keyptr(bt->page, slot);
1642 // if key exists, return row-id
1643 // otherwise return 0
1645 if( ptr->len == len && !memcmp (ptr->key, key, len) )
1646 id = bt_getid(slotptr(bt->page,slot)->id);
1650 bt_unlockpage (BtLockRead, bt->set);
1651 bt_unpinlatch (bt->set);
1652 bt_unpinpool (bt->pool);
1656 // check page for space available,
1657 // clean if necessary and return
1658 // =0 - page needs splitting
1659 // >0 - go ahead at returned slot
1661 uint bt_cleanpage(BtDb *bt, uint amt, uint slot)
1663 uint nxt = bt->mgr->page_size;
1664 BtPage page = bt->page;
1665 uint cnt = 0, idx = 0;
1666 uint max = page->cnt;
1670 if( page->min >= (max+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1673 // skip cleanup if nothing to reclaim
1678 memcpy (bt->frame, page, bt->mgr->page_size);
1680 // skip page info and set rest of page to zero
1682 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1686 // always leave fence key in list
1688 while( cnt++ < max ) {
1691 else if( cnt < max && slotptr(bt->frame,cnt)->dead )
1695 key = keyptr(bt->frame, cnt);
1696 nxt -= key->len + 1;
1697 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1700 memcpy(slotptr(page, ++idx)->id, slotptr(bt->frame, cnt)->id, BtId);
1701 if( !(slotptr(page, idx)->dead = slotptr(bt->frame, cnt)->dead) )
1703 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1704 slotptr(page, idx)->off = nxt;
1709 if( page->min >= (idx+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1715 // split the root and raise the height of the btree
1717 BTERR bt_splitroot(BtDb *bt, unsigned char *newkey, unsigned char *oldkey, uid page_no2)
1719 uint nxt = bt->mgr->page_size;
1720 BtPage root = bt->page;
1723 // Obtain an empty page to use, and copy the current
1724 // root contents into it which is the lower half of
1727 if( !(new_page = bt_newpage(bt, root)) )
1730 // preserve the page info at the bottom
1731 // and set rest to zero
1733 memset(root+1, 0, bt->mgr->page_size - sizeof(*root));
1735 // insert first key on newroot page
1738 memcpy ((unsigned char *)root + nxt, newkey, *newkey + 1);
1739 bt_putid(slotptr(root, 1)->id, new_page);
1740 slotptr(root, 1)->off = nxt;
1742 // insert second key on newroot page
1743 // and increase the root height
1746 memcpy ((unsigned char *)root + nxt, oldkey, *oldkey + 1);
1747 bt_putid(slotptr(root, 2)->id, page_no2);
1748 slotptr(root, 2)->off = nxt;
1750 bt_putid(root->right, 0);
1751 root->min = nxt; // reset lowest used offset and key count
1756 // release and unpin root (bt->page)
1758 bt_unlockpage(BtLockWrite, bt->set);
1759 bt_unpinlatch (bt->set);
1760 bt_unpinpool (bt->pool);
1764 // split already locked full node
1767 BTERR bt_splitpage (BtDb *bt)
1769 uint cnt = 0, idx = 0, max, nxt = bt->mgr->page_size;
1770 unsigned char oldkey[256], lowerkey[256];
1771 uid page_no = bt->page_no, right;
1772 BtLatchSet *nset, *set = bt->set;
1773 BtPool *pool = bt->pool;
1774 BtPage page = bt->page;
1775 uint lvl = page->lvl;
1780 // split higher half of keys to bt->frame
1781 // the last key (fence key) might be dead
1783 tod = (uint)time(NULL);
1785 memset (bt->frame, 0, bt->mgr->page_size);
1786 max = (int)page->cnt;
1790 while( cnt++ < max ) {
1791 key = keyptr(page, cnt);
1792 nxt -= key->len + 1;
1793 memcpy ((unsigned char *)bt->frame + nxt, key, key->len + 1);
1794 memcpy(slotptr(bt->frame,++idx)->id, slotptr(page,cnt)->id, BtId);
1795 if( !(slotptr(bt->frame, idx)->dead = slotptr(page, cnt)->dead) )
1797 slotptr(bt->frame, idx)->tod = slotptr(page, cnt)->tod;
1798 slotptr(bt->frame, idx)->off = nxt;
1801 // remember existing fence key for new page to the right
1803 memcpy (oldkey, key, key->len + 1);
1805 bt->frame->bits = bt->mgr->page_bits;
1806 bt->frame->min = nxt;
1807 bt->frame->cnt = idx;
1808 bt->frame->lvl = lvl;
1812 if( page_no > ROOT_page ) {
1813 right = bt_getid (page->right);
1814 bt_putid(bt->frame->right, right);
1817 // get new free page and write frame to it.
1819 if( !(new_page = bt_newpage(bt, bt->frame)) )
1822 // update lower keys to continue in old page
1824 memcpy (bt->frame, page, bt->mgr->page_size);
1825 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1826 nxt = bt->mgr->page_size;
1831 // assemble page of smaller keys
1832 // (they're all active keys)
1834 while( cnt++ < max / 2 ) {
1835 key = keyptr(bt->frame, cnt);
1836 nxt -= key->len + 1;
1837 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1838 memcpy(slotptr(page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId);
1839 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1840 slotptr(page, idx)->off = nxt;
1844 // remember fence key for old page
1846 memcpy(lowerkey, key, key->len + 1);
1847 bt_putid(page->right, new_page);
1851 // if current page is the root page, split it
1853 if( page_no == ROOT_page )
1854 return bt_splitroot (bt, lowerkey, oldkey, new_page);
1856 // obtain Parent/Write locks
1857 // for left and right node pages
1859 nset = bt_pinlatch (bt, new_page);
1861 bt_lockpage (BtLockParent, nset);
1862 bt_lockpage (BtLockParent, set);
1864 // release wr lock on left page
1865 // (keep the SMO in sequence)
1867 bt_unlockpage (BtLockWrite, set);
1869 // insert new fence for reformulated left block
1871 if( bt_insertkey (bt, lowerkey+1, *lowerkey, lvl + 1, page_no, tod) )
1874 // fix old fence for newly allocated right block page
1876 if( bt_insertkey (bt, oldkey+1, *oldkey, lvl + 1, new_page, tod) )
1879 // release Parent locks
1881 bt_unlockpage (BtLockParent, nset);
1882 bt_unlockpage (BtLockParent, set);
1883 bt_unpinlatch (nset);
1884 bt_unpinlatch (set);
1885 bt_unpinpool (pool);
1889 // Insert new key into the btree at requested level.
1890 // Level zero pages are leaf pages. Page is unlocked at exit.
1892 BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod)
1899 if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) )
1900 ptr = keyptr(bt->page, slot);
1904 bt->err = BTERR_ovflw;
1908 // if key already exists, update id and return
1912 if( bt->found = !keycmp (ptr, key, len) ) {
1913 slotptr(page, slot)->dead = 0;
1914 slotptr(page, slot)->tod = tod;
1915 bt_putid(slotptr(page,slot)->id, id);
1916 bt_unlockpage(BtLockWrite, bt->set);
1917 bt_unpinlatch(bt->set);
1918 bt_unpinpool (bt->pool);
1922 // check if page has enough space
1924 if( slot = bt_cleanpage (bt, len, slot) )
1927 if( bt_splitpage (bt) )
1931 // calculate next available slot and copy key into page
1933 page->min -= len + 1; // reset lowest used offset
1934 ((unsigned char *)page)[page->min] = len;
1935 memcpy ((unsigned char *)page + page->min +1, key, len );
1937 for( idx = slot; idx < page->cnt; idx++ )
1938 if( slotptr(page, idx)->dead )
1941 // now insert key into array before slot
1942 // preserving the fence slot
1944 if( idx == page->cnt )
1950 *slotptr(page, idx) = *slotptr(page, idx -1), idx--;
1952 bt_putid(slotptr(page,slot)->id, id);
1953 slotptr(page, slot)->off = page->min;
1954 slotptr(page, slot)->tod = tod;
1955 slotptr(page, slot)->dead = 0;
1957 bt_unlockpage (BtLockWrite, bt->set);
1958 bt_unpinlatch (bt->set);
1959 bt_unpinpool (bt->pool);
1963 // cache page of keys into cursor and return starting slot for given key
1965 uint bt_startkey (BtDb *bt, unsigned char *key, uint len)
1969 // cache page for retrieval
1970 if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) )
1971 memcpy (bt->cursor, bt->page, bt->mgr->page_size);
1972 bt->cursor_page = bt->page_no;
1973 bt_unlockpage(BtLockRead, bt->set);
1974 bt_unpinlatch (bt->set);
1975 bt_unpinpool (bt->pool);
1979 // return next slot for cursor page
1980 // or slide cursor right into next page
1982 uint bt_nextkey (BtDb *bt, uint slot)
1989 right = bt_getid(bt->cursor->right);
1990 while( slot++ < bt->cursor->cnt )
1991 if( slotptr(bt->cursor,slot)->dead )
1993 else if( right || (slot < bt->cursor->cnt))
2001 bt->cursor_page = right;
2003 if( pool = bt_pinpool (bt, right) )
2004 page = bt_page (bt, pool, right);
2008 bt->set = bt_pinlatch (bt, right);
2009 bt_lockpage(BtLockRead, bt->set);
2011 memcpy (bt->cursor, page, bt->mgr->page_size);
2013 bt_unlockpage(BtLockRead, bt->set);
2014 bt_unpinlatch (bt->set);
2015 bt_unpinpool (pool);
2022 BtKey bt_key(BtDb *bt, uint slot)
2024 return keyptr(bt->cursor, slot);
2027 uid bt_uid(BtDb *bt, uint slot)
2029 return bt_getid(slotptr(bt->cursor,slot)->id);
2032 uint bt_tod(BtDb *bt, uint slot)
2034 return slotptr(bt->cursor,slot)->tod;
2039 void bt_latchaudit (BtDb *bt)
2041 ushort idx, hashidx;
2048 for( idx = 1; idx < bt->mgr->latchmgr->latchdeployed; idx++ ) {
2049 set = bt->mgr->latchsets + idx;
2050 if( *(ushort *)set->readwr || *(ushort *)set->access || *(ushort *)set->parent ) {
2051 fprintf(stderr, "latchset %d locked for page %6x\n", idx, set->page_no);
2052 *(ushort *)set->readwr = 0;
2053 *(ushort *)set->access = 0;
2054 *(ushort *)set->parent = 0;
2057 fprintf(stderr, "latchset %d pinned\n", idx);
2062 for( hashidx = 0; hashidx < bt->mgr->latchmgr->latchhash; hashidx++ ) {
2063 if( *(uint *)bt->mgr->latchmgr->table[hashidx].latch )
2064 fprintf(stderr, "latchmgr locked\n");
2065 if( idx = bt->mgr->latchmgr->table[hashidx].slot ) do {
2066 set = bt->mgr->latchsets + idx;
2067 if( *(uint *)set->readwr || *(ushort *)set->access || *(ushort *)set->parent )
2068 fprintf(stderr, "latchset %d locked\n", idx);
2069 if( set->hash != hashidx )
2070 fprintf(stderr, "latchset %d wrong hashidx\n", idx);
2072 fprintf(stderr, "latchset %d pinned\n", idx);
2073 } while( idx = set->next );
2075 page_no = bt_getid(bt->mgr->latchmgr->alloc[1].right);
2078 fprintf(stderr, "free: %.6x\n", (uint)page_no);
2079 pool = bt_pinpool (bt, page_no);
2080 page = bt_page (bt, pool, page_no);
2081 page_no = bt_getid(page->right);
2082 bt_unpinpool (pool);
2094 // standalone program to index file of keys
2095 // then list them onto std-out
2098 void *index_file (void *arg)
2100 uint __stdcall index_file (void *arg)
2103 int line = 0, found = 0, cnt = 0;
2104 uid next, page_no = LEAF_page; // start on first page of leaves
2105 unsigned char key[256];
2106 ThreadArg *args = arg;
2107 int ch, len = 0, slot;
2115 bt = bt_open (args->mgr);
2118 switch(args->type | 0x20)
2121 fprintf(stderr, "started latch mgr audit\n");
2123 fprintf(stderr, "finished latch mgr audit\n");
2127 fprintf(stderr, "started indexing for %s\n", args->infile);
2128 if( in = fopen (args->infile, "rb") )
2129 while( ch = getc(in), ch != EOF )
2134 if( args->num == 1 )
2135 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
2137 else if( args->num )
2138 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
2140 if( bt_insertkey (bt, key, len, 0, line, *tod) )
2141 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
2144 else if( len < 255 )
2146 fprintf(stderr, "finished %s for %d keys\n", args->infile, line);
2150 fprintf(stderr, "started deleting keys for %s\n", args->infile);
2151 if( in = fopen (args->infile, "rb") )
2152 while( ch = getc(in), ch != EOF )
2156 if( args->num == 1 )
2157 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
2159 else if( args->num )
2160 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
2162 if( bt_deletekey (bt, key, len, 0) )
2163 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
2166 else if( len < 255 )
2168 fprintf(stderr, "finished %s for keys, %d \n", args->infile, line);
2172 fprintf(stderr, "started finding keys for %s\n", args->infile);
2173 if( in = fopen (args->infile, "rb") )
2174 while( ch = getc(in), ch != EOF )
2178 if( args->num == 1 )
2179 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
2181 else if( args->num )
2182 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
2184 if( bt_findkey (bt, key, len) )
2187 fprintf(stderr, "Error %d Syserr %d Line: %d\n", bt->err, errno, line), exit(0);
2190 else if( len < 255 )
2192 fprintf(stderr, "finished %s for %d keys, found %d\n", args->infile, line, found);
2198 fprintf(stderr, "started reading\n");
2200 if( slot = bt_startkey (bt, key, len) )
2203 fprintf(stderr, "Error %d in StartKey. Syserror: %d\n", bt->err, errno), exit(0);
2205 while( slot = bt_nextkey (bt, slot) ) {
2206 ptr = bt_key(bt, slot);
2207 fwrite (ptr->key, ptr->len, 1, stdout);
2208 fputc ('\n', stdout);
2214 fprintf(stderr, "started reading\n");
2217 if( bt->pool = bt_pinpool (bt, page_no) )
2218 page = bt_page (bt, bt->pool, page_no);
2221 bt->set = bt_pinlatch (bt, page_no);
2222 bt_lockpage (BtLockRead, bt->set);
2224 next = bt_getid (page->right);
2225 bt_unlockpage (BtLockRead, bt->set);
2226 bt_unpinlatch (bt->set);
2227 bt_unpinpool (bt->pool);
2228 } while( page_no = next );
2230 cnt--; // remove stopper key
2231 fprintf(stderr, " Total keys read %d\n", cnt);
2243 typedef struct timeval timer;
2245 int main (int argc, char **argv)
2247 int idx, cnt, len, slot, err;
2248 int segsize, bits = 16;
2253 time_t start[1], stop[1];
2266 fprintf (stderr, "Usage: %s idx_file Read/Write/Scan/Delete/Find [page_bits mapped_segments seg_bits line_numbers src_file1 src_file2 ... ]\n", argv[0]);
2267 fprintf (stderr, " where page_bits is the page size in bits\n");
2268 fprintf (stderr, " mapped_segments is the number of mmap segments in buffer pool\n");
2269 fprintf (stderr, " seg_bits is the size of individual segments in buffer pool in pages in bits\n");
2270 fprintf (stderr, " line_numbers = 1 to append line numbers to keys\n");
2271 fprintf (stderr, " src_file1 thru src_filen are files of keys separated by newline\n");
2276 gettimeofday(&start, NULL);
2282 bits = atoi(argv[3]);
2285 poolsize = atoi(argv[4]);
2288 fprintf (stderr, "Warning: no mapped_pool\n");
2290 if( poolsize > 65535 )
2291 fprintf (stderr, "Warning: mapped_pool > 65535 segments\n");
2294 segsize = atoi(argv[5]);
2296 segsize = 4; // 16 pages per mmap segment
2299 num = atoi(argv[6]);
2303 threads = malloc (cnt * sizeof(pthread_t));
2305 threads = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, cnt * sizeof(HANDLE));
2307 args = malloc (cnt * sizeof(ThreadArg));
2309 mgr = bt_mgr ((argv[1]), BT_rw, bits, poolsize, segsize, poolsize / 8);
2312 fprintf(stderr, "Index Open Error %s\n", argv[1]);
2318 for( idx = 0; idx < cnt; idx++ ) {
2319 args[idx].infile = argv[idx + 7];
2320 args[idx].type = argv[2][0];
2321 args[idx].mgr = mgr;
2322 args[idx].num = num;
2323 args[idx].idx = idx;
2325 if( err = pthread_create (threads + idx, NULL, index_file, args + idx) )
2326 fprintf(stderr, "Error creating thread %d\n", err);
2328 threads[idx] = (HANDLE)_beginthreadex(NULL, 65536, index_file, args + idx, 0, NULL);
2332 // wait for termination
2335 for( idx = 0; idx < cnt; idx++ )
2336 pthread_join (threads[idx], NULL);
2337 gettimeofday(&stop, NULL);
2338 real_time = 1000.0 * ( stop.tv_sec - start.tv_sec ) + 0.001 * (stop.tv_usec - start.tv_usec );
2340 WaitForMultipleObjects (cnt, threads, TRUE, INFINITE);
2342 for( idx = 0; idx < cnt; idx++ )
2343 CloseHandle(threads[idx]);
2346 real_time = 1000 * (*stop - *start);
2348 fprintf(stderr, " Time to complete: %.2f seconds\n", real_time/1000);