1 // btree version threads2j linux futex concurrency version
2 // with reworked bt_deletekey
5 // author: karl malbrain, malbrain@cal.berkeley.edu
8 This work, including the source code, documentation
9 and related data, is placed into the public domain.
11 The orginal author is Karl Malbrain.
13 THIS SOFTWARE IS PROVIDED AS-IS WITHOUT WARRANTY
14 OF ANY KIND, NOT EVEN THE IMPLIED WARRANTY OF
15 MERCHANTABILITY. THE AUTHOR OF THIS SOFTWARE,
16 ASSUMES _NO_ RESPONSIBILITY FOR ANY CONSEQUENCE
17 RESULTING FROM THE USE, MODIFICATION, OR
18 REDISTRIBUTION OF THIS SOFTWARE.
21 // Please see the project home page for documentation
22 // code.google.com/p/high-concurrency-btree
24 #define _FILE_OFFSET_BITS 64
25 #define _LARGEFILE64_SOURCE
29 #include <linux/futex.h>
44 #define WIN32_LEAN_AND_MEAN
58 typedef unsigned long long uid;
61 typedef unsigned long long off64_t;
62 typedef unsigned short ushort;
63 typedef unsigned int uint;
66 #define BT_ro 0x6f72 // ro
67 #define BT_rw 0x7772 // rw
69 #define BT_latchtable 128 // number of latch manager slots
71 #define BT_maxbits 24 // maximum page size in bits
72 #define BT_minbits 9 // minimum page size in bits
73 #define BT_minpage (1 << BT_minbits) // minimum page size
74 #define BT_maxpage (1 << BT_maxbits) // maximum page size
77 There are five lock types for each node in three independent sets:
78 1. (set 1) AccessIntent: Sharable. Going to Read the node. Incompatible with NodeDelete.
79 2. (set 1) NodeDelete: Exclusive. About to release the node. Incompatible with AccessIntent.
80 3. (set 2) ReadLock: Sharable. Read the node. Incompatible with WriteLock.
81 4. (set 2) WriteLock: Exclusive. Modify the node. Incompatible with ReadLock and other WriteLocks.
82 5. (set 3) ParentModification: Exclusive. Change the node's parent keys. Incompatible with another ParentModification.
94 // mode & definition for latch implementation
97 QueRd = 1, // reader queue
98 QueWr = 2 // writer queue
101 // share is count of read accessors
102 // grant write lock when share == 0
104 volatile typedef struct {
105 unsigned char mutex[1]; // 1 = busy
106 unsigned char write:1; // 1 = exclusive
107 unsigned char readwait:1; // readers are waiting
108 unsigned char writewait:1; // writers are waiting
109 unsigned char filler:5;
110 ushort share; // count of readers holding locks
111 ushort rcnt; // count of waiting readers
112 ushort wcnt; // count of waiting writers
115 // Define the length of the page and key pointers
119 // Page key slot definition.
121 // If BT_maxbits is 15 or less, you can save 4 bytes
122 // for each key stored by making the first two uints
123 // into ushorts. You can also save 4 bytes by removing
124 // the tod field from the key.
126 // Keys are marked dead, but remain on the page until
127 // it cleanup is called. The fence key (highest key) for
128 // the page is always present, even after cleanup.
131 uint off:BT_maxbits; // page offset for key start
132 uint dead:1; // set for deleted key
133 uint tod; // time-stamp for key
134 unsigned char id[BtId]; // id associated with key
137 // The key structure occupies space at the upper end of
138 // each page. It's a length byte followed by the key
143 unsigned char key[1];
146 // The first part of an index page.
147 // It is immediately followed
148 // by the BtSlot array of keys.
150 typedef struct BtPage_ {
151 uint cnt; // count of keys in page
152 uint act; // count of active keys
153 uint min; // next key offset
154 unsigned char bits:7; // page size in bits
155 unsigned char free:1; // page is on free list
156 unsigned char lvl:6; // level of page
157 unsigned char kill:1; // page is being deleted
158 unsigned char dirty:1; // page has deleted keys
159 unsigned char right[BtId]; // page number to right
162 // hash table entries
166 volatile ushort slot; // Latch table entry at head of chain
169 // latch manager table structure
172 BtLatch readwr[1]; // read/write page lock
173 BtLatch access[1]; // Access Intent/Page delete
174 BtLatch parent[1]; // adoption of foster children
175 BtLatch busy[1]; // slot is being moved between chains
176 volatile ushort next; // next entry in hash table chain
177 volatile ushort prev; // prev entry in hash table chain
178 volatile ushort pin; // number of outstanding locks
179 volatile ushort hash; // hash slot entry is under
180 volatile uid page_no; // latch set page number
183 // The memory mapping pool table buffer manager entry
186 uid basepage; // mapped base page number
187 char *map; // mapped memory pointer
188 ushort slot; // slot index in this array
189 ushort pin; // mapped page pin counter
190 void *hashprev; // previous pool entry for the same hash idx
191 void *hashnext; // next pool entry for the same hash idx
193 HANDLE hmap; // Windows memory mapping handle
197 #define CLOCK_bit 0x8000 // bit in pool->pin
199 // The loadpage interface object
202 uid page_no; // current page number
203 BtPage page; // current page pointer
204 BtPool *pool; // current page pool
205 BtLatchSet *latch; // current page latch set
208 // structure for latch manager on ALLOC_page
211 struct BtPage_ alloc[2]; // next & free page_nos in right ptr
212 BtLatch lock[1]; // allocation area lite latch
213 ushort latchdeployed; // highest number of latch entries deployed
214 ushort nlatchpage; // number of latch pages at BT_latch
215 ushort latchtotal; // number of page latch entries
216 ushort latchhash; // number of latch hash table slots
217 ushort latchvictim; // next latch entry to examine
218 BtHashEntry table[0]; // the hash table
221 // The object structure for Btree access
224 uint page_size; // page size
225 uint page_bits; // page size in bits
226 uint seg_bits; // seg size in pages in bits
227 uint mode; // read-write mode
233 ushort poolcnt; // highest page pool node in use
234 ushort poolmax; // highest page pool node allocated
235 ushort poolmask; // total number of pages in mmap segment - 1
236 ushort evicted; // last evicted hash table slot
237 ushort hashsize; // size of Hash Table for pool entries
238 ushort *hash; // pool index for hash entries
239 BtLatch *latch; // latches for pool hash slots
240 BtLatchMgr *latchmgr; // mapped latch page from allocation page
241 BtLatchSet *latchsets; // mapped latch set from latch pages
242 BtPool *pool; // memory pool page segments
244 HANDLE halloc; // allocation and latch table handle
249 BtMgr *mgr; // buffer manager for thread
250 BtPage cursor; // cached frame for start/next (never mapped)
251 BtPage frame; // spare frame for the page split (never mapped)
252 BtPage zero; // page of zeroes to extend the file (never mapped)
253 uid cursor_page; // current cursor page number
254 unsigned char *mem; // frame, cursor, page memory buffer
255 int found; // last delete or insert was found
256 int err; // last error
270 extern void bt_close (BtDb *bt);
271 extern BtDb *bt_open (BtMgr *mgr);
272 extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod);
273 extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl);
274 extern uid bt_findkey (BtDb *bt, unsigned char *key, uint len);
275 extern uint bt_startkey (BtDb *bt, unsigned char *key, uint len);
276 extern uint bt_nextkey (BtDb *bt, uint slot);
279 extern BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolsize, uint segsize, uint hashsize);
280 void bt_mgrclose (BtMgr *mgr);
282 // Helper functions to return slot values
283 extern BtKey bt_key (BtDb *bt, uint slot);
284 extern uid bt_uid (BtDb *bt, uint slot);
285 extern uint bt_tod (BtDb *bt, uint slot);
287 // BTree page number constants
288 #define ALLOC_page 0 // allocation & lock manager hash table
289 #define ROOT_page 1 // root of the btree
290 #define LEAF_page 2 // first page of leaves
291 #define LATCH_page 3 // pages for lock manager
293 // Number of levels to create in a new BTree
297 // The page is allocated from low and hi ends.
298 // The key offsets and row-id's are allocated
299 // from the bottom, while the text of the key
300 // is allocated from the top. When the two
301 // areas meet, the page is split into two.
303 // A key consists of a length byte, two bytes of
304 // index number (0 - 65534), and up to 253 bytes
305 // of key value. Duplicate keys are discarded.
306 // Associated with each key is a 48 bit row-id,
307 // or any other value desired.
309 // The b-tree root is always located at page 1.
310 // The first leaf page of level zero is always
311 // located on page 2.
313 // The b-tree pages are linked with next
314 // pointers to facilitate enumerators,
315 // and provide for concurrency.
317 // When to root page fills, it is split in two and
318 // the tree height is raised by a new root at page
319 // one with two keys.
321 // Deleted keys are marked with a dead bit until
322 // page cleanup. The fence key for a node is
325 // Groups of pages called segments from the btree are optionally
326 // cached with a memory mapped pool. A hash table is used to keep
327 // track of the cached segments. This behaviour is controlled
328 // by the cache block size parameter to bt_open.
330 // To achieve maximum concurrency one page is locked at a time
331 // as the tree is traversed to find leaf key in question. The right
332 // page numbers are used in cases where the page is being split,
335 // Page 0 is dedicated to lock for new page extensions,
336 // and chains empty pages together for reuse.
338 // The ParentModification lock on a node is obtained to serialize posting
339 // or changing the fence key for a node.
341 // Empty pages are chained together through the ALLOC page and reused.
343 // Access macros to address slot and key values from the page
344 // Page slots use 1 based indexing.
346 #define slotptr(page, slot) (((BtSlot *)(page+1)) + (slot-1))
347 #define keyptr(page, slot) ((BtKey)((unsigned char*)(page) + slotptr(page, slot)->off))
349 void bt_putid(unsigned char *dest, uid id)
354 dest[i] = (unsigned char)id, id >>= 8;
357 uid bt_getid(unsigned char *src)
362 for( i = 0; i < BtId; i++ )
363 id <<= 8, id |= *src++;
370 int sys_futex(void *addr1, int op, int val1, struct timespec *timeout, void *addr2, int val3)
372 return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
375 // wait until write lock mode is clear
376 // and add 1 to the share count
378 void bt_spinreadlock(BtLatch *latch, int private)
384 private = FUTEX_PRIVATE_FLAG;
387 // obtain latch mutex
388 while( __sync_lock_test_and_set(latch->mutex, 1) )
392 latch->rcnt--, decr = 0;
394 // wait for writers to clear
395 // increment read waiters and wait
397 if( latch->write || latch->writewait ) {
400 prev = *(uint *)latch & ~1;
401 __sync_lock_release (latch->mutex);
402 sys_futex( (uint *)latch, FUTEX_WAIT_BITSET | private, prev, NULL, NULL, QueRd );
407 // increment reader lock count
408 // and release latch mutex
412 __sync_lock_release (latch->mutex);
417 // wait for other read and write latches to relinquish
419 void bt_spinwritelock(BtLatch *latch, int private)
425 private = FUTEX_PRIVATE_FLAG;
428 // obtain latch mutex
429 while( __sync_lock_test_and_set(latch->mutex, 1) )
433 latch->wcnt--, decr = 0;
435 // wait for write and reader count to clear
437 if( latch->write || latch->share ) {
438 latch->writewait = 1;
440 prev = *(uint *)latch & ~1;
441 __sync_lock_release (latch->mutex);
442 sys_futex( (uint *)latch, FUTEX_WAIT_BITSET | private, prev, NULL, NULL, QueWr );
448 // release latch mutex
451 latch->writewait = 0;
454 __sync_lock_release (latch->mutex);
459 // try to obtain write lock
461 // return 1 if obtained,
464 int bt_spinwritetry(BtLatch *latch)
469 // abandon request if not taken
471 if( __sync_lock_test_and_set(latch->mutex, 1) )
474 // see if write mode is available
476 if( !latch->write && !latch->share )
477 ans = latch->write = 1;
481 // release latch mutex
483 __sync_lock_release (latch->mutex);
489 void bt_spinreleasewrite(BtLatch *latch, int private)
492 private = FUTEX_PRIVATE_FLAG;
494 // obtain latch mutex
496 while( __sync_lock_test_and_set(latch->mutex, 1) )
504 if( sys_futex( (uint *)latch, FUTEX_WAKE_BITSET | private, 1, NULL, NULL, QueWr ) )
508 sys_futex( (uint *)latch, FUTEX_WAKE_BITSET | private, INT_MAX, NULL, NULL, QueRd );
510 // release latch mutex
513 __sync_lock_release (latch->mutex);
516 // decrement reader count
518 void bt_spinreleaseread(BtLatch *latch, int private)
521 private = FUTEX_PRIVATE_FLAG;
523 // obtain latch mutex
525 while( __sync_lock_test_and_set(latch->mutex, 1) )
530 // wake one waiting writer
532 if( !latch->share && latch->wcnt )
533 sys_futex( (uint *)latch, FUTEX_WAKE_BITSET | private, 1, NULL, NULL, QueWr );
535 // release latch mutex
537 __sync_lock_release (latch->mutex);
540 // link latch table entry into latch hash table
542 void bt_latchlink (BtDb *bt, ushort hashidx, ushort victim, uid page_no)
544 BtLatchSet *set = bt->mgr->latchsets + victim;
546 if( set->next = bt->mgr->latchmgr->table[hashidx].slot )
547 bt->mgr->latchsets[set->next].prev = victim;
549 bt->mgr->latchmgr->table[hashidx].slot = victim;
550 set->page_no = page_no;
557 void bt_unpinlatch (BtLatchSet *set)
560 __sync_fetch_and_add(&set->pin, -1);
562 _InterlockedDecrement16 (&set->pin);
566 // find existing latchset or inspire new one
567 // return with latchset pinned
569 BtLatchSet *bt_pinlatch (BtDb *bt, uid page_no)
571 ushort hashidx = page_no % bt->mgr->latchmgr->latchhash;
572 ushort slot, avail = 0, victim, idx;
575 // obtain read lock on hash table entry
577 bt_spinreadlock(bt->mgr->latchmgr->table[hashidx].latch, 0);
579 if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do
581 set = bt->mgr->latchsets + slot;
582 if( page_no == set->page_no )
584 } while( slot = set->next );
588 __sync_fetch_and_add(&set->pin, 1);
590 _InterlockedIncrement16 (&set->pin);
594 bt_spinreleaseread (bt->mgr->latchmgr->table[hashidx].latch, 0);
599 // try again, this time with write lock
601 bt_spinwritelock(bt->mgr->latchmgr->table[hashidx].latch, 0);
603 if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do
605 set = bt->mgr->latchsets + slot;
606 if( page_no == set->page_no )
608 if( !set->pin && !avail )
610 } while( slot = set->next );
612 // found our entry, or take over an unpinned one
614 if( slot || (slot = avail) ) {
615 set = bt->mgr->latchsets + slot;
617 __sync_fetch_and_add(&set->pin, 1);
619 _InterlockedIncrement16 (&set->pin);
621 set->page_no = page_no;
622 bt_spinreleasewrite(bt->mgr->latchmgr->table[hashidx].latch, 0);
626 // see if there are any unused entries
628 victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, 1) + 1;
630 victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchdeployed);
633 if( victim < bt->mgr->latchmgr->latchtotal ) {
634 set = bt->mgr->latchsets + victim;
636 __sync_fetch_and_add(&set->pin, 1);
638 _InterlockedIncrement16 (&set->pin);
640 bt_latchlink (bt, hashidx, victim, page_no);
641 bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch, 0);
646 victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, -1);
648 victim = _InterlockedDecrement16 (&bt->mgr->latchmgr->latchdeployed);
650 // find and reuse previous lock entry
654 victim = __sync_fetch_and_add(&bt->mgr->latchmgr->latchvictim, 1);
656 victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchvictim) - 1;
658 // we don't use slot zero
660 if( victim %= bt->mgr->latchmgr->latchtotal )
661 set = bt->mgr->latchsets + victim;
665 // take control of our slot
666 // from other threads
668 if( set->pin || !bt_spinwritetry (set->busy) )
673 // try to get write lock on hash chain
674 // skip entry if not obtained
675 // or has outstanding locks
677 if( !bt_spinwritetry (bt->mgr->latchmgr->table[idx].latch) ) {
678 bt_spinreleasewrite (set->busy, 0);
683 bt_spinreleasewrite (set->busy, 0);
684 bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch, 0);
688 // unlink our available victim from its hash chain
691 bt->mgr->latchsets[set->prev].next = set->next;
693 bt->mgr->latchmgr->table[idx].slot = set->next;
696 bt->mgr->latchsets[set->next].prev = set->prev;
698 bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch, 0);
700 __sync_fetch_and_add(&set->pin, 1);
702 _InterlockedIncrement16 (&set->pin);
704 bt_latchlink (bt, hashidx, victim, page_no);
705 bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch, 0);
706 bt_spinreleasewrite (set->busy, 0);
711 void bt_mgrclose (BtMgr *mgr)
716 // release mapped pages
717 // note that slot zero is never used
719 for( slot = 1; slot < mgr->poolmax; slot++ ) {
720 pool = mgr->pool + slot;
723 munmap (pool->map, (mgr->poolmask+1) << mgr->page_bits);
726 FlushViewOfFile(pool->map, 0);
727 UnmapViewOfFile(pool->map);
728 CloseHandle(pool->hmap);
734 munmap (mgr->latchsets, mgr->latchmgr->nlatchpage * mgr->page_size);
735 munmap (mgr->latchmgr, mgr->page_size);
737 FlushViewOfFile(mgr->latchmgr, 0);
738 UnmapViewOfFile(mgr->latchmgr);
739 CloseHandle(mgr->halloc);
745 free ((void *)mgr->latch);
748 FlushFileBuffers(mgr->idx);
749 CloseHandle(mgr->idx);
750 GlobalFree (mgr->pool);
751 GlobalFree (mgr->hash);
752 GlobalFree ((void *)mgr->latch);
757 // close and release memory
759 void bt_close (BtDb *bt)
766 VirtualFree (bt->mem, 0, MEM_RELEASE);
771 // open/create new btree buffer manager
773 // call with file_name, BT_openmode, bits in page size (e.g. 16),
774 // size of mapped page pool (e.g. 8192)
776 BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolmax, uint segsize, uint hashsize)
778 uint lvl, attr, cacheblk, last, slot, idx;
779 uint nlatchpage, latchhash;
780 BtLatchMgr *latchmgr;
787 SYSTEM_INFO sysinfo[1];
790 // determine sanity of page size and buffer pool
792 if( bits > BT_maxbits )
794 else if( bits < BT_minbits )
798 return NULL; // must have buffer pool
801 mgr = calloc (1, sizeof(BtMgr));
802 mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666);
805 return free(mgr), NULL;
807 cacheblk = 4096; // minimum mmap segment size for unix
810 mgr = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, sizeof(BtMgr));
811 attr = FILE_ATTRIBUTE_NORMAL;
812 mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL);
814 if( mgr->idx == INVALID_HANDLE_VALUE )
815 return GlobalFree(mgr), NULL;
817 // normalize cacheblk to multiple of sysinfo->dwAllocationGranularity
818 GetSystemInfo(sysinfo);
819 cacheblk = sysinfo->dwAllocationGranularity;
823 latchmgr = malloc (BT_maxpage);
826 // read minimum page size to get root info
828 if( size = lseek (mgr->idx, 0L, 2) ) {
829 if( pread(mgr->idx, latchmgr, BT_minpage, 0) == BT_minpage )
830 bits = latchmgr->alloc->bits;
832 return free(mgr), free(latchmgr), NULL;
833 } else if( mode == BT_ro )
834 return free(latchmgr), free (mgr), NULL;
836 latchmgr = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE);
837 size = GetFileSize(mgr->idx, amt);
840 if( !ReadFile(mgr->idx, (char *)latchmgr, BT_minpage, amt, NULL) )
841 return bt_mgrclose (mgr), NULL;
842 bits = latchmgr->alloc->bits;
843 } else if( mode == BT_ro )
844 return bt_mgrclose (mgr), NULL;
847 mgr->page_size = 1 << bits;
848 mgr->page_bits = bits;
850 mgr->poolmax = poolmax;
853 if( cacheblk < mgr->page_size )
854 cacheblk = mgr->page_size;
856 // mask for partial memmaps
858 mgr->poolmask = (cacheblk >> bits) - 1;
860 // see if requested size of pages per memmap is greater
862 if( (1 << segsize) > mgr->poolmask )
863 mgr->poolmask = (1 << segsize) - 1;
867 while( (1 << mgr->seg_bits) <= mgr->poolmask )
870 mgr->hashsize = hashsize;
873 mgr->pool = calloc (poolmax, sizeof(BtPool));
874 mgr->hash = calloc (hashsize, sizeof(ushort));
875 mgr->latch = calloc (hashsize, sizeof(BtLatch));
877 mgr->pool = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, poolmax * sizeof(BtPool));
878 mgr->hash = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(ushort));
879 mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtLatch));
885 // initialize an empty b-tree with latch page, root page, page of leaves
886 // and page(s) of latches
888 memset (latchmgr, 0, 1 << bits);
889 nlatchpage = BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1;
890 bt_putid(latchmgr->alloc->right, MIN_lvl+1+nlatchpage);
891 latchmgr->alloc->bits = mgr->page_bits;
893 latchmgr->nlatchpage = nlatchpage;
894 latchmgr->latchtotal = nlatchpage * (mgr->page_size / sizeof(BtLatchSet));
896 // initialize latch manager
898 latchhash = (mgr->page_size - sizeof(BtLatchMgr)) / sizeof(BtHashEntry);
900 // size of hash table = total number of latchsets
902 if( latchhash > latchmgr->latchtotal )
903 latchhash = latchmgr->latchtotal;
905 latchmgr->latchhash = latchhash;
908 if( write (mgr->idx, latchmgr, mgr->page_size) < mgr->page_size )
909 return bt_mgrclose (mgr), NULL;
911 if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) )
912 return bt_mgrclose (mgr), NULL;
914 if( *amt < mgr->page_size )
915 return bt_mgrclose (mgr), NULL;
918 memset (latchmgr, 0, 1 << bits);
919 latchmgr->alloc->bits = mgr->page_bits;
921 for( lvl=MIN_lvl; lvl--; ) {
922 slotptr(latchmgr->alloc, 1)->off = mgr->page_size - 3;
923 bt_putid(slotptr(latchmgr->alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number
924 key = keyptr(latchmgr->alloc, 1);
925 key->len = 2; // create stopper key
928 latchmgr->alloc->min = mgr->page_size - 3;
929 latchmgr->alloc->lvl = lvl;
930 latchmgr->alloc->cnt = 1;
931 latchmgr->alloc->act = 1;
933 if( write (mgr->idx, latchmgr, mgr->page_size) < mgr->page_size )
934 return bt_mgrclose (mgr), NULL;
936 if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) )
937 return bt_mgrclose (mgr), NULL;
939 if( *amt < mgr->page_size )
940 return bt_mgrclose (mgr), NULL;
944 // clear out latch manager locks
945 // and rest of pages to round out segment
947 memset(latchmgr, 0, mgr->page_size);
950 while( last <= ((MIN_lvl + 1 + nlatchpage) | mgr->poolmask) ) {
952 pwrite(mgr->idx, latchmgr, mgr->page_size, last << mgr->page_bits);
954 SetFilePointer (mgr->idx, last << mgr->page_bits, NULL, FILE_BEGIN);
955 if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) )
956 return bt_mgrclose (mgr), NULL;
957 if( *amt < mgr->page_size )
958 return bt_mgrclose (mgr), NULL;
965 flag = PROT_READ | PROT_WRITE;
966 mgr->latchmgr = mmap (0, mgr->page_size, flag, MAP_SHARED, mgr->idx, ALLOC_page * mgr->page_size);
967 if( mgr->latchmgr == MAP_FAILED )
968 return bt_mgrclose (mgr), NULL;
969 mgr->latchsets = (BtLatchSet *)mmap (0, mgr->latchmgr->nlatchpage * mgr->page_size, flag, MAP_SHARED, mgr->idx, LATCH_page * mgr->page_size);
970 if( mgr->latchsets == MAP_FAILED )
971 return bt_mgrclose (mgr), NULL;
973 flag = PAGE_READWRITE;
974 mgr->halloc = CreateFileMapping(mgr->idx, NULL, flag, 0, (BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1 + LATCH_page) * mgr->page_size, NULL);
976 return bt_mgrclose (mgr), NULL;
978 flag = FILE_MAP_WRITE;
979 mgr->latchmgr = MapViewOfFile(mgr->halloc, flag, 0, 0, (BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1 + LATCH_page) * mgr->page_size);
981 return GetLastError(), bt_mgrclose (mgr), NULL;
983 mgr->latchsets = (void *)((char *)mgr->latchmgr + LATCH_page * mgr->page_size);
989 VirtualFree (latchmgr, 0, MEM_RELEASE);
994 // open BTree access method
995 // based on buffer manager
997 BtDb *bt_open (BtMgr *mgr)
999 BtDb *bt = malloc (sizeof(*bt));
1001 memset (bt, 0, sizeof(*bt));
1004 bt->mem = malloc (3 *mgr->page_size);
1006 bt->mem = VirtualAlloc(NULL, 3 * mgr->page_size, MEM_COMMIT, PAGE_READWRITE);
1008 bt->frame = (BtPage)bt->mem;
1009 bt->zero = (BtPage)(bt->mem + 1 * mgr->page_size);
1010 bt->cursor = (BtPage)(bt->mem + 2 * mgr->page_size);
1012 memset (bt->zero, 0, mgr->page_size);
1016 // compare two keys, returning > 0, = 0, or < 0
1017 // as the comparison value
1019 int keycmp (BtKey key1, unsigned char *key2, uint len2)
1021 uint len1 = key1->len;
1024 if( ans = memcmp (key1->key, key2, len1 > len2 ? len2 : len1) )
1037 // find segment in pool
1038 // must be called with hashslot idx locked
1039 // return NULL if not there
1040 // otherwise return node
1042 BtPool *bt_findpool(BtDb *bt, uid page_no, uint idx)
1047 // compute start of hash chain in pool
1049 if( slot = bt->mgr->hash[idx] )
1050 pool = bt->mgr->pool + slot;
1054 page_no &= ~bt->mgr->poolmask;
1056 while( pool->basepage != page_no )
1057 if( pool = pool->hashnext )
1065 // add segment to hash table
1067 void bt_linkhash(BtDb *bt, BtPool *pool, uid page_no, int idx)
1072 pool->hashprev = pool->hashnext = NULL;
1073 pool->basepage = page_no & ~bt->mgr->poolmask;
1074 pool->pin = CLOCK_bit + 1;
1076 if( slot = bt->mgr->hash[idx] ) {
1077 node = bt->mgr->pool + slot;
1078 pool->hashnext = node;
1079 node->hashprev = pool;
1082 bt->mgr->hash[idx] = pool->slot;
1085 // map new buffer pool segment to virtual memory
1087 BTERR bt_mapsegment(BtDb *bt, BtPool *pool, uid page_no)
1089 off64_t off = (page_no & ~bt->mgr->poolmask) << bt->mgr->page_bits;
1090 off64_t limit = off + ((bt->mgr->poolmask+1) << bt->mgr->page_bits);
1094 flag = PROT_READ | ( bt->mgr->mode == BT_ro ? 0 : PROT_WRITE );
1095 pool->map = mmap (0, (bt->mgr->poolmask+1) << bt->mgr->page_bits, flag, MAP_SHARED, bt->mgr->idx, off);
1096 if( pool->map == MAP_FAILED )
1097 return bt->err = BTERR_map;
1100 flag = ( bt->mgr->mode == BT_ro ? PAGE_READONLY : PAGE_READWRITE );
1101 pool->hmap = CreateFileMapping(bt->mgr->idx, NULL, flag, (DWORD)(limit >> 32), (DWORD)limit, NULL);
1103 return bt->err = BTERR_map;
1105 flag = ( bt->mgr->mode == BT_ro ? FILE_MAP_READ : FILE_MAP_WRITE );
1106 pool->map = MapViewOfFile(pool->hmap, flag, (DWORD)(off >> 32), (DWORD)off, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
1108 return bt->err = BTERR_map;
1113 // calculate page within pool
1115 BtPage bt_page (BtDb *bt, BtPool *pool, uid page_no)
1117 uint subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping
1120 page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits));
1126 void bt_unpinpool (BtPool *pool)
1129 __sync_fetch_and_add(&pool->pin, -1);
1131 _InterlockedDecrement16 (&pool->pin);
1135 // find or place requested page in segment-pool
1136 // return pool table entry, incrementing pin
1138 BtPool *bt_pinpool(BtDb *bt, uid page_no)
1140 uint slot, hashidx, idx, victim;
1141 BtPool *pool, *node, *next;
1143 // lock hash table chain
1145 hashidx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
1146 bt_spinreadlock (&bt->mgr->latch[hashidx], 1);
1148 // look up in hash table
1150 if( pool = bt_findpool(bt, page_no, hashidx) ) {
1152 __sync_fetch_and_or(&pool->pin, CLOCK_bit);
1153 __sync_fetch_and_add(&pool->pin, 1);
1155 _InterlockedOr16 (&pool->pin, CLOCK_bit);
1156 _InterlockedIncrement16 (&pool->pin);
1158 bt_spinreleaseread (&bt->mgr->latch[hashidx], 1);
1162 // allocate a new pool node
1163 // and add to hash table
1166 slot = __sync_fetch_and_add(&bt->mgr->poolcnt, 1);
1168 slot = _InterlockedIncrement16 (&bt->mgr->poolcnt) - 1;
1171 if( ++slot < bt->mgr->poolmax ) {
1172 pool = bt->mgr->pool + slot;
1175 if( bt_mapsegment(bt, pool, page_no) )
1178 bt_linkhash(bt, pool, page_no, hashidx);
1179 bt_spinreleasewrite (&bt->mgr->latch[hashidx], 1);
1183 // pool table is full
1184 // find best pool entry to evict
1187 __sync_fetch_and_add(&bt->mgr->poolcnt, -1);
1189 _InterlockedDecrement16 (&bt->mgr->poolcnt);
1194 victim = __sync_fetch_and_add(&bt->mgr->evicted, 1);
1196 victim = _InterlockedIncrement16 (&bt->mgr->evicted) - 1;
1198 victim %= bt->mgr->poolmax;
1199 pool = bt->mgr->pool + victim;
1200 idx = (uint)(pool->basepage >> bt->mgr->seg_bits) % bt->mgr->hashsize;
1205 // try to get write lock
1206 // skip entry if not obtained
1208 if( !bt_spinwritetry (&bt->mgr->latch[idx]) )
1211 // skip this entry if
1213 // or clock bit is set
1217 __sync_fetch_and_and(&pool->pin, ~CLOCK_bit);
1219 _InterlockedAnd16 (&pool->pin, ~CLOCK_bit);
1221 bt_spinreleasewrite (&bt->mgr->latch[idx], 1);
1225 // unlink victim pool node from hash table
1227 if( node = pool->hashprev )
1228 node->hashnext = pool->hashnext;
1229 else if( node = pool->hashnext )
1230 bt->mgr->hash[idx] = node->slot;
1232 bt->mgr->hash[idx] = 0;
1234 if( node = pool->hashnext )
1235 node->hashprev = pool->hashprev;
1237 bt_spinreleasewrite (&bt->mgr->latch[idx], 1);
1239 // remove old file mapping
1241 munmap (pool->map, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
1243 FlushViewOfFile(pool->map, 0);
1244 UnmapViewOfFile(pool->map);
1245 CloseHandle(pool->hmap);
1249 // create new pool mapping
1250 // and link into hash table
1252 if( bt_mapsegment(bt, pool, page_no) )
1255 bt_linkhash(bt, pool, page_no, hashidx);
1256 bt_spinreleasewrite (&bt->mgr->latch[hashidx], 1);
1261 // place write, read, or parent lock on requested page_no.
1263 void bt_lockpage(BtLock mode, BtLatchSet *set)
1267 bt_spinreadlock (set->readwr, 0);
1270 bt_spinwritelock (set->readwr, 0);
1273 bt_spinreadlock (set->access, 0);
1276 bt_spinwritelock (set->access, 0);
1279 bt_spinwritelock (set->parent, 0);
1281 case BtLockParentWrt:
1282 bt_spinwritelock (set->parent, 0);
1283 bt_spinwritelock (set->readwr, 0);
1288 // remove write, read, or parent lock on requested page
1290 void bt_unlockpage(BtLock mode, BtLatchSet *set)
1294 bt_spinreleaseread (set->readwr, 0);
1297 bt_spinreleasewrite (set->readwr, 0);
1300 bt_spinreleaseread (set->access, 0);
1303 bt_spinreleasewrite (set->access, 0);
1306 bt_spinreleasewrite (set->parent, 0);
1308 case BtLockParentWrt:
1309 bt_spinreleasewrite (set->parent, 0);
1310 bt_spinreleasewrite (set->readwr, 0);
1315 // allocate a new page and write page into it
1317 uid bt_newpage(BtDb *bt, BtPage page)
1323 // lock allocation page
1325 bt_spinwritelock(bt->mgr->latchmgr->lock, 0);
1327 // use empty chain first
1328 // else allocate empty page
1330 if( new_page = bt_getid(bt->mgr->latchmgr->alloc[1].right) ) {
1331 if( set->pool = bt_pinpool (bt, new_page) )
1332 set->page = bt_page (bt, set->pool, new_page);
1336 bt_putid(bt->mgr->latchmgr->alloc[1].right, bt_getid(set->page->right));
1337 bt_unpinpool (set->pool);
1340 new_page = bt_getid(bt->mgr->latchmgr->alloc->right);
1341 bt_putid(bt->mgr->latchmgr->alloc->right, new_page+1);
1345 if( pwrite(bt->mgr->idx, page, bt->mgr->page_size, new_page << bt->mgr->page_bits) < bt->mgr->page_size )
1346 return bt->err = BTERR_wrt, 0;
1348 // if writing first page of pool block, zero last page in the block
1350 if( !reuse && bt->mgr->poolmask > 0 && (new_page & bt->mgr->poolmask) == 0 )
1352 // use zero buffer to write zeros
1353 if( pwrite(bt->mgr->idx,bt->zero, bt->mgr->page_size, (new_page | bt->mgr->poolmask) << bt->mgr->page_bits) < bt->mgr->page_size )
1354 return bt->err = BTERR_wrt, 0;
1357 // bring new page into pool and copy page.
1358 // this will extend the file into the new pages.
1360 if( set->pool = bt_pinpool (bt, new_page) )
1361 set->page = bt_page (bt, set->pool, new_page);
1365 memcpy(set->page, page, bt->mgr->page_size);
1366 bt_unpinpool (set->pool);
1368 // unlock allocation latch and return new page no
1370 bt_spinreleasewrite(bt->mgr->latchmgr->lock, 0);
1374 // find slot in page for given key at a given level
1376 int bt_findslot (BtPageSet *set, unsigned char *key, uint len)
1378 uint diff, higher = set->page->cnt, low = 1, slot;
1381 // make stopper key an infinite fence value
1383 if( bt_getid (set->page->right) )
1388 // low is the lowest candidate.
1389 // loop ends when they meet
1391 // higher is already
1392 // tested as .ge. the passed key.
1394 while( diff = higher - low ) {
1395 slot = low + ( diff >> 1 );
1396 if( keycmp (keyptr(set->page, slot), key, len) < 0 )
1399 higher = slot, good++;
1402 // return zero if key is on right link page
1404 return good ? higher : 0;
1407 // find and load page at given level for given key
1408 // leave page rd or wr locked as requested
1410 int bt_loadpage (BtDb *bt, BtPageSet *set, unsigned char *key, uint len, uint lvl, BtLock lock)
1412 uid page_no = ROOT_page, prevpage = 0;
1413 uint drill = 0xff, slot;
1414 BtLatchSet *prevlatch;
1415 uint mode, prevmode;
1418 // start at root of btree and drill down
1421 // determine lock mode of drill level
1422 mode = (drill == lvl) ? lock : BtLockRead;
1424 set->latch = bt_pinlatch (bt, page_no);
1425 set->page_no = page_no;
1427 // pin page contents
1429 if( set->pool = bt_pinpool (bt, page_no) )
1430 set->page = bt_page (bt, set->pool, page_no);
1434 // obtain access lock using lock chaining with Access mode
1436 if( page_no > ROOT_page )
1437 bt_lockpage(BtLockAccess, set->latch);
1439 // release & unpin parent page
1442 bt_unlockpage(prevmode, prevlatch);
1443 bt_unpinlatch (prevlatch);
1444 bt_unpinpool (prevpool);
1448 // obtain read lock using lock chaining
1450 bt_lockpage(mode, set->latch);
1452 if( set->page->free )
1453 return bt->err = BTERR_struct, 0;
1455 if( page_no > ROOT_page )
1456 bt_unlockpage(BtLockAccess, set->latch);
1458 // re-read and re-lock root after determining actual level of root
1460 if( set->page->lvl != drill) {
1461 if( set->page_no != ROOT_page )
1462 return bt->err = BTERR_struct, 0;
1464 drill = set->page->lvl;
1466 if( lock != BtLockRead && drill == lvl ) {
1467 bt_unlockpage(mode, set->latch);
1468 bt_unpinlatch (set->latch);
1469 bt_unpinpool (set->pool);
1474 prevpage = set->page_no;
1475 prevlatch = set->latch;
1476 prevpool = set->pool;
1479 // find key on page at this level
1480 // and descend to requested level
1482 if( !set->page->kill )
1483 if( slot = bt_findslot (set, key, len) ) {
1487 while( slotptr(set->page, slot)->dead )
1488 if( slot++ < set->page->cnt )
1493 page_no = bt_getid(slotptr(set->page, slot)->id);
1498 // or slide right into next page
1501 page_no = bt_getid(set->page->right);
1505 // return error on end of right chain
1507 bt->err = BTERR_struct;
1508 return 0; // return error
1511 // return page to free list
1512 // page must be delete & write locked
1514 void bt_freepage (BtDb *bt, BtPageSet *set)
1516 // lock allocation page
1518 bt_spinwritelock (bt->mgr->latchmgr->lock, 0);
1520 // store chain in second right
1521 bt_putid(set->page->right, bt_getid(bt->mgr->latchmgr->alloc[1].right));
1522 bt_putid(bt->mgr->latchmgr->alloc[1].right, set->page_no);
1523 set->page->free = 1;
1525 // unlock released page
1527 bt_unlockpage (BtLockDelete, set->latch);
1528 bt_unlockpage (BtLockWrite, set->latch);
1529 bt_unpinlatch (set->latch);
1530 bt_unpinpool (set->pool);
1532 // unlock allocation page
1534 bt_spinreleasewrite (bt->mgr->latchmgr->lock, 0);
1537 // a fence key was deleted from a page
1538 // push new fence value upwards
1540 BTERR bt_fixfence (BtDb *bt, BtPageSet *set, uint lvl)
1542 unsigned char leftkey[256], rightkey[256];
1547 // remove the old fence value
1549 ptr = keyptr(set->page, set->page->cnt);
1550 memcpy (rightkey, ptr, ptr->len + 1);
1552 memset (slotptr(set->page, set->page->cnt--), 0, sizeof(BtSlot));
1553 set->page->dirty = 1;
1555 ptr = keyptr(set->page, set->page->cnt);
1556 memcpy (leftkey, ptr, ptr->len + 1);
1557 page_no = set->page_no;
1559 bt_unlockpage (BtLockWrite, set->latch);
1561 // insert new (now smaller) fence key
1563 if( bt_insertkey (bt, leftkey+1, *leftkey, lvl+1, page_no, time(NULL)) )
1566 // now delete old fence key
1568 if( bt_deletekey (bt, rightkey+1, *rightkey, lvl+1) )
1571 bt_unlockpage (BtLockParent, set->latch);
1572 bt_unpinlatch(set->latch);
1573 bt_unpinpool (set->pool);
1577 // root has a single child
1578 // collapse a level from the tree
1580 BTERR bt_collapseroot (BtDb *bt, BtPageSet *root)
1585 // find the child entry and promote as new root contents
1588 for( idx = 0; idx++ < root->page->cnt; )
1589 if( !slotptr(root->page, idx)->dead )
1592 child->page_no = bt_getid (slotptr(root->page, idx)->id);
1594 child->latch = bt_pinlatch (bt, child->page_no);
1595 bt_lockpage (BtLockDelete, child->latch);
1596 bt_lockpage (BtLockWrite, child->latch);
1598 if( child->pool = bt_pinpool (bt, child->page_no) )
1599 child->page = bt_page (bt, child->pool, child->page_no);
1603 memcpy (root->page, child->page, bt->mgr->page_size);
1604 bt_freepage (bt, child);
1606 } while( root->page->lvl > 1 && root->page->act == 1 );
1608 bt_unlockpage (BtLockParentWrt, root->latch);
1609 bt_unpinlatch (root->latch);
1610 bt_unpinpool (root->pool);
1614 // find and delete key on page by marking delete flag bit
1615 // if page becomes empty, delete it from the btree
1617 BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl)
1619 unsigned char lowerfence[256], higherfence[256];
1620 uint slot, idx, dirty = 0, fence, found;
1621 BtPageSet set[1], right[1];
1624 if( slot = bt_loadpage (bt, set, key, len, lvl, BtLockParentWrt) )
1625 ptr = keyptr(set->page, slot);
1629 // are we deleting a fence slot?
1631 fence = slot == set->page->cnt;
1633 // if key is found delete it, otherwise ignore request
1635 if( found = !keycmp (ptr, key, len) )
1636 if( found = slotptr(set->page, slot)->dead == 0 ) {
1637 dirty = slotptr(set->page, slot)->dead = 1;
1638 set->page->dirty = 1;
1641 // collapse empty slots
1643 while( idx = set->page->cnt - 1 )
1644 if( slotptr(set->page, idx)->dead ) {
1645 *slotptr(set->page, idx) = *slotptr(set->page, idx + 1);
1646 memset (slotptr(set->page, set->page->cnt--), 0, sizeof(BtSlot));
1651 // did we delete a fence key in an upper level?
1653 if( dirty && lvl && set->page->act && fence )
1654 if( bt_fixfence (bt, set, lvl) )
1657 return bt->found = found, 0;
1659 // is this a collapsed root?
1661 if( lvl > 1 && set->page_no == ROOT_page && set->page->act == 1 )
1662 if( bt_collapseroot (bt, set) )
1665 return bt->found = found, 0;
1667 // return if page is not empty
1669 if( set->page->act ) {
1670 bt_unlockpage(BtLockParentWrt, set->latch);
1671 bt_unpinlatch (set->latch);
1672 bt_unpinpool (set->pool);
1673 return bt->found = found, 0;
1676 // cache copy of fence key
1677 // to post in parent
1679 ptr = keyptr(set->page, set->page->cnt);
1680 memcpy (lowerfence, ptr, ptr->len + 1);
1682 // obtain lock on right page
1684 right->page_no = bt_getid(set->page->right);
1685 right->latch = bt_pinlatch (bt, right->page_no);
1686 bt_lockpage (BtLockParentWrt, right->latch);
1688 // pin page contents
1690 if( right->pool = bt_pinpool (bt, right->page_no) )
1691 right->page = bt_page (bt, right->pool, right->page_no);
1695 if( right->page->kill )
1696 return bt->err = BTERR_struct;
1698 // pull contents of right peer into our empty page
1700 memcpy (set->page, right->page, bt->mgr->page_size);
1702 // cache copy of key to update
1704 ptr = keyptr(right->page, right->page->cnt);
1705 memcpy (higherfence, ptr, ptr->len + 1);
1707 // mark right page deleted and point it to left page
1708 // until we can post parent updates
1710 bt_putid (right->page->right, set->page_no);
1711 right->page->kill = 1;
1713 bt_unlockpage (BtLockWrite, right->latch);
1714 bt_unlockpage (BtLockWrite, set->latch);
1716 // redirect higher key directly to our new node contents
1718 if( bt_insertkey (bt, higherfence+1, *higherfence, lvl+1, set->page_no, time(NULL)) )
1721 // delete old lower key to our node
1723 if( bt_deletekey (bt, lowerfence+1, *lowerfence, lvl+1) )
1726 // obtain delete and write locks to right node
1728 bt_unlockpage (BtLockParent, right->latch);
1729 bt_lockpage (BtLockDelete, right->latch);
1730 bt_lockpage (BtLockWrite, right->latch);
1731 bt_freepage (bt, right);
1733 bt_unlockpage (BtLockParent, set->latch);
1734 bt_unpinlatch (set->latch);
1735 bt_unpinpool (set->pool);
1740 // find key in leaf level and return row-id
1742 uid bt_findkey (BtDb *bt, unsigned char *key, uint len)
1749 if( slot = bt_loadpage (bt, set, key, len, 0, BtLockRead) )
1750 ptr = keyptr(set->page, slot);
1754 // if key exists, return row-id
1755 // otherwise return 0
1757 if( slot <= set->page->cnt )
1758 if( !keycmp (ptr, key, len) )
1759 id = bt_getid(slotptr(set->page,slot)->id);
1761 bt_unlockpage (BtLockRead, set->latch);
1762 bt_unpinlatch (set->latch);
1763 bt_unpinpool (set->pool);
1767 // check page for space available,
1768 // clean if necessary and return
1769 // 0 - page needs splitting
1770 // >0 new slot value
1772 uint bt_cleanpage(BtDb *bt, BtPage page, uint amt, uint slot)
1774 uint nxt = bt->mgr->page_size;
1775 uint cnt = 0, idx = 0;
1776 uint max = page->cnt;
1780 if( page->min >= (max+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1783 // skip cleanup if nothing to reclaim
1788 memcpy (bt->frame, page, bt->mgr->page_size);
1790 // skip page info and set rest of page to zero
1792 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1796 // try cleaning up page first
1797 // by removing deleted keys
1799 while( cnt++ < max ) {
1802 if( cnt < max && slotptr(bt->frame,cnt)->dead )
1805 // copy the key across
1807 key = keyptr(bt->frame, cnt);
1808 nxt -= key->len + 1;
1809 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1813 memcpy(slotptr(page, ++idx)->id, slotptr(bt->frame, cnt)->id, BtId);
1814 if( !(slotptr(page, idx)->dead = slotptr(bt->frame, cnt)->dead) )
1816 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1817 slotptr(page, idx)->off = nxt;
1823 // see if page has enough space now, or does it need splitting?
1825 if( page->min >= (idx+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1831 // split the root and raise the height of the btree
1833 BTERR bt_splitroot(BtDb *bt, BtPageSet *root, unsigned char *leftkey, uid page_no2)
1835 uint nxt = bt->mgr->page_size;
1838 // Obtain an empty page to use, and copy the current
1839 // root contents into it, e.g. lower keys
1841 if( !(left = bt_newpage(bt, root->page)) )
1844 // preserve the page info at the bottom
1845 // of higher keys and set rest to zero
1847 memset(root->page+1, 0, bt->mgr->page_size - sizeof(*root->page));
1849 // insert lower keys page fence key on newroot page as first key
1851 nxt -= *leftkey + 1;
1852 memcpy ((unsigned char *)root->page + nxt, leftkey, *leftkey + 1);
1853 bt_putid(slotptr(root->page, 1)->id, left);
1854 slotptr(root->page, 1)->off = nxt;
1856 // insert stopper key on newroot page
1857 // and increase the root height
1860 ((unsigned char *)root->page)[nxt] = 2;
1861 ((unsigned char *)root->page)[nxt+1] = 0xff;
1862 ((unsigned char *)root->page)[nxt+2] = 0xff;
1863 bt_putid(slotptr(root->page, 2)->id, page_no2);
1864 slotptr(root->page, 2)->off = nxt;
1866 bt_putid(root->page->right, 0);
1867 root->page->min = nxt; // reset lowest used offset and key count
1868 root->page->cnt = 2;
1869 root->page->act = 2;
1872 // release and unpin root
1874 bt_unlockpage(BtLockWrite, root->latch);
1875 bt_unpinlatch (root->latch);
1876 bt_unpinpool (root->pool);
1880 // split already locked full node
1883 BTERR bt_splitpage (BtDb *bt, BtPageSet *set)
1885 uint cnt = 0, idx = 0, max, nxt = bt->mgr->page_size;
1886 unsigned char fencekey[256], rightkey[256];
1887 uint lvl = set->page->lvl;
1892 // split higher half of keys to bt->frame
1894 memset (bt->frame, 0, bt->mgr->page_size);
1895 max = set->page->cnt;
1899 while( cnt++ < max ) {
1900 key = keyptr(set->page, cnt);
1901 nxt -= key->len + 1;
1902 memcpy ((unsigned char *)bt->frame + nxt, key, key->len + 1);
1904 memcpy(slotptr(bt->frame,++idx)->id, slotptr(set->page,cnt)->id, BtId);
1905 if( !(slotptr(bt->frame, idx)->dead = slotptr(set->page, cnt)->dead) )
1907 slotptr(bt->frame, idx)->tod = slotptr(set->page, cnt)->tod;
1908 slotptr(bt->frame, idx)->off = nxt;
1911 // remember existing fence key for new page to the right
1913 memcpy (rightkey, key, key->len + 1);
1915 bt->frame->bits = bt->mgr->page_bits;
1916 bt->frame->min = nxt;
1917 bt->frame->cnt = idx;
1918 bt->frame->lvl = lvl;
1922 if( set->page_no > ROOT_page )
1923 memcpy (bt->frame->right, set->page->right, BtId);
1925 // get new free page and write higher keys to it.
1927 if( !(right->page_no = bt_newpage(bt, bt->frame)) )
1930 // update lower keys to continue in old page
1932 memcpy (bt->frame, set->page, bt->mgr->page_size);
1933 memset (set->page+1, 0, bt->mgr->page_size - sizeof(*set->page));
1934 nxt = bt->mgr->page_size;
1935 set->page->dirty = 0;
1940 // assemble page of smaller keys
1942 while( cnt++ < max / 2 ) {
1943 key = keyptr(bt->frame, cnt);
1944 nxt -= key->len + 1;
1945 memcpy ((unsigned char *)set->page + nxt, key, key->len + 1);
1946 memcpy(slotptr(set->page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId);
1947 slotptr(set->page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1948 slotptr(set->page, idx)->off = nxt;
1952 // remember fence key for smaller page
1954 memcpy(fencekey, key, key->len + 1);
1956 bt_putid(set->page->right, right->page_no);
1957 set->page->min = nxt;
1958 set->page->cnt = idx;
1960 // if current page is the root page, split it
1962 if( set->page_no == ROOT_page )
1963 return bt_splitroot (bt, set, fencekey, right->page_no);
1965 // insert new fences in their parent pages
1967 right->latch = bt_pinlatch (bt, right->page_no);
1968 bt_lockpage (BtLockParent, right->latch);
1970 bt_lockpage (BtLockParent, set->latch);
1971 bt_unlockpage (BtLockWrite, set->latch);
1973 // insert new fence for reformulated left block of smaller keys
1975 if( bt_insertkey (bt, fencekey+1, *fencekey, lvl+1, set->page_no, time(NULL)) )
1978 // switch fence for right block of larger keys to new right page
1980 if( bt_insertkey (bt, rightkey+1, *rightkey, lvl+1, right->page_no, time(NULL)) )
1983 bt_unlockpage (BtLockParent, set->latch);
1984 bt_unpinlatch (set->latch);
1985 bt_unpinpool (set->pool);
1987 bt_unlockpage (BtLockParent, right->latch);
1988 bt_unpinlatch (right->latch);
1991 // Insert new key into the btree at given level.
1993 BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod)
2000 if( slot = bt_loadpage (bt, set, key, len, lvl, BtLockWrite) )
2001 ptr = keyptr(set->page, slot);
2005 bt->err = BTERR_ovflw;
2009 // if key already exists, update id and return
2011 if( !keycmp (ptr, key, len) ) {
2012 if( slotptr(set->page, slot)->dead )
2014 slotptr(set->page, slot)->dead = 0;
2015 slotptr(set->page, slot)->tod = tod;
2016 bt_putid(slotptr(set->page,slot)->id, id);
2017 bt_unlockpage(BtLockWrite, set->latch);
2018 bt_unpinlatch (set->latch);
2019 bt_unpinpool (set->pool);
2023 // check if page has enough space
2025 if( slot = bt_cleanpage (bt, set->page, len, slot) )
2028 if( bt_splitpage (bt, set) )
2032 // calculate next available slot and copy key into page
2034 set->page->min -= len + 1; // reset lowest used offset
2035 ((unsigned char *)set->page)[set->page->min] = len;
2036 memcpy ((unsigned char *)set->page + set->page->min +1, key, len );
2038 for( idx = slot; idx < set->page->cnt; idx++ )
2039 if( slotptr(set->page, idx)->dead )
2042 // now insert key into array before slot
2044 if( idx == set->page->cnt )
2045 idx++, set->page->cnt++;
2050 *slotptr(set->page, idx) = *slotptr(set->page, idx -1), idx--;
2052 bt_putid(slotptr(set->page,slot)->id, id);
2053 slotptr(set->page, slot)->off = set->page->min;
2054 slotptr(set->page, slot)->tod = tod;
2055 slotptr(set->page, slot)->dead = 0;
2057 bt_unlockpage (BtLockWrite, set->latch);
2058 bt_unpinlatch (set->latch);
2059 bt_unpinpool (set->pool);
2063 // cache page of keys into cursor and return starting slot for given key
2065 uint bt_startkey (BtDb *bt, unsigned char *key, uint len)
2070 // cache page for retrieval
2072 if( slot = bt_loadpage (bt, set, key, len, 0, BtLockRead) )
2073 memcpy (bt->cursor, set->page, bt->mgr->page_size);
2077 bt->cursor_page = set->page_no;
2079 bt_unlockpage(BtLockRead, set->latch);
2080 bt_unpinlatch (set->latch);
2081 bt_unpinpool (set->pool);
2085 // return next slot for cursor page
2086 // or slide cursor right into next page
2088 uint bt_nextkey (BtDb *bt, uint slot)
2094 right = bt_getid(bt->cursor->right);
2096 while( slot++ < bt->cursor->cnt )
2097 if( slotptr(bt->cursor,slot)->dead )
2099 else if( right || (slot < bt->cursor->cnt) ) // skip infinite stopper
2107 bt->cursor_page = right;
2109 if( set->pool = bt_pinpool (bt, right) )
2110 set->page = bt_page (bt, set->pool, right);
2114 set->latch = bt_pinlatch (bt, right);
2115 bt_lockpage(BtLockRead, set->latch);
2117 memcpy (bt->cursor, set->page, bt->mgr->page_size);
2119 bt_unlockpage(BtLockRead, set->latch);
2120 bt_unpinlatch (set->latch);
2121 bt_unpinpool (set->pool);
2129 BtKey bt_key(BtDb *bt, uint slot)
2131 return keyptr(bt->cursor, slot);
2134 uid bt_uid(BtDb *bt, uint slot)
2136 return bt_getid(slotptr(bt->cursor,slot)->id);
2139 uint bt_tod(BtDb *bt, uint slot)
2141 return slotptr(bt->cursor,slot)->tod;
2147 double getCpuTime(int type)
2150 FILETIME xittime[1];
2151 FILETIME systime[1];
2152 FILETIME usrtime[1];
2153 SYSTEMTIME timeconv[1];
2156 memset (timeconv, 0, sizeof(SYSTEMTIME));
2160 GetSystemTimeAsFileTime (xittime);
2161 FileTimeToSystemTime (xittime, timeconv);
2162 ans = (double)timeconv->wDayOfWeek * 3600 * 24;
2165 GetProcessTimes (GetCurrentProcess(), crtime, xittime, systime, usrtime);
2166 FileTimeToSystemTime (usrtime, timeconv);
2169 GetProcessTimes (GetCurrentProcess(), crtime, xittime, systime, usrtime);
2170 FileTimeToSystemTime (systime, timeconv);
2174 ans += (double)timeconv->wHour * 3600;
2175 ans += (double)timeconv->wMinute * 60;
2176 ans += (double)timeconv->wSecond;
2177 ans += (double)timeconv->wMilliseconds / 1000;
2182 #include <sys/resource.h>
2184 double getCpuTime(int type)
2186 struct rusage used[1];
2187 struct timeval tv[1];
2191 gettimeofday(tv, NULL);
2192 return (double)tv->tv_sec + (double)tv->tv_usec / 1000000;
2195 getrusage(RUSAGE_SELF, used);
2196 return (double)used->ru_utime.tv_sec + (double)used->ru_utime.tv_usec / 1000000;
2199 getrusage(RUSAGE_SELF, used);
2200 return (double)used->ru_stime.tv_sec + (double)used->ru_stime.tv_usec / 1000000;
2207 void bt_latchaudit (BtDb *bt)
2209 ushort idx, hashidx;
2215 if( *(uint *)(bt->mgr->latchmgr->lock) )
2216 fprintf(stderr, "Alloc page locked\n");
2217 *(uint *)(bt->mgr->latchmgr->lock) = 0;
2219 for( idx = 1; idx <= bt->mgr->latchmgr->latchdeployed; idx++ ) {
2220 latch = bt->mgr->latchsets + idx;
2221 if( *(uint *)latch->readwr )
2222 fprintf(stderr, "latchset %d rwlocked for page %.8x\n", idx, latch->page_no);
2223 *(uint *)latch->readwr = 0;
2225 if( *(uint *)latch->access )
2226 fprintf(stderr, "latchset %d accesslocked for page %.8x\n", idx, latch->page_no);
2227 *(uint *)latch->access = 0;
2229 if( *(uint *)latch->parent )
2230 fprintf(stderr, "latchset %d parentlocked for page %.8x\n", idx, latch->page_no);
2231 *(uint *)latch->parent = 0;
2234 fprintf(stderr, "latchset %d pinned for page %.8x\n", idx, latch->page_no);
2239 for( hashidx = 0; hashidx < bt->mgr->latchmgr->latchhash; hashidx++ ) {
2240 if( *(uint *)(bt->mgr->latchmgr->table[hashidx].latch) )
2241 fprintf(stderr, "hash entry %d locked\n", hashidx);
2243 *(uint *)(bt->mgr->latchmgr->table[hashidx].latch) = 0;
2245 if( idx = bt->mgr->latchmgr->table[hashidx].slot ) do {
2246 latch = bt->mgr->latchsets + idx;
2247 if( *(uint *)latch->busy )
2248 fprintf(stderr, "latchset %d busylocked for page %.8x\n", idx, latch->page_no);
2249 *(uint *)latch->busy = 0;
2250 if( latch->hash != hashidx )
2251 fprintf(stderr, "latchset %d wrong hashidx\n", idx);
2253 fprintf(stderr, "latchset %d pinned for page %.8x\n", idx, latch->page_no);
2254 } while( idx = latch->next );
2257 next = bt->mgr->latchmgr->nlatchpage + LATCH_page;
2258 page_no = LEAF_page;
2260 while( page_no < bt_getid(bt->mgr->latchmgr->alloc->right) ) {
2261 pread (bt->mgr->idx, bt->frame, bt->mgr->page_size, page_no << bt->mgr->page_bits);
2262 if( !bt->frame->free )
2263 for( idx = 0; idx++ < bt->frame->cnt - 1; ) {
2264 ptr = keyptr(bt->frame, idx+1);
2265 if( keycmp (keyptr(bt->frame, idx), ptr->key, ptr->len) >= 0 )
2266 fprintf(stderr, "page %.8x idx %.2x out of order\n", page_no, idx);
2269 if( page_no > LEAF_page )
2283 // standalone program to index file of keys
2284 // then list them onto std-out
2287 void *index_file (void *arg)
2289 uint __stdcall index_file (void *arg)
2292 int line = 0, found = 0, cnt = 0;
2293 uid next, page_no = LEAF_page; // start on first page of leaves
2294 unsigned char key[256];
2295 ThreadArg *args = arg;
2296 int ch, len = 0, slot;
2303 bt = bt_open (args->mgr);
2306 switch(args->type | 0x20)
2309 fprintf(stderr, "started latch mgr audit\n");
2311 fprintf(stderr, "finished latch mgr audit\n");
2315 fprintf(stderr, "started indexing for %s\n", args->infile);
2316 if( in = fopen (args->infile, "rb") )
2317 while( ch = getc(in), ch != EOF )
2322 if( args->num == 1 )
2323 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
2325 else if( args->num )
2326 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
2328 if( bt_insertkey (bt, key, len, 0, line, *tod) )
2329 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
2332 else if( len < 255 )
2334 fprintf(stderr, "finished %s for %d keys\n", args->infile, line);
2338 fprintf(stderr, "started deleting keys for %s\n", args->infile);
2339 if( in = fopen (args->infile, "rb") )
2340 while( ch = getc(in), ch != EOF )
2344 if( args->num == 1 )
2345 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
2347 else if( args->num )
2348 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
2350 if( bt_deletekey (bt, key, len, 0) )
2351 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
2354 else if( len < 255 )
2356 fprintf(stderr, "finished %s for keys, %d \n", args->infile, line);
2360 fprintf(stderr, "started finding keys for %s\n", args->infile);
2361 if( in = fopen (args->infile, "rb") )
2362 while( ch = getc(in), ch != EOF )
2366 if( args->num == 1 )
2367 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
2369 else if( args->num )
2370 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
2372 if( bt_findkey (bt, key, len) )
2375 fprintf(stderr, "Error %d Syserr %d Line: %d\n", bt->err, errno, line), exit(0);
2378 else if( len < 255 )
2380 fprintf(stderr, "finished %s for %d keys, found %d\n", args->infile, line, found);
2384 fprintf(stderr, "started scanning\n");
2386 if( set->pool = bt_pinpool (bt, page_no) )
2387 set->page = bt_page (bt, set->pool, page_no);
2390 set->latch = bt_pinlatch (bt, page_no);
2391 bt_lockpage (BtLockRead, set->latch);
2392 next = bt_getid (set->page->right);
2393 cnt += set->page->act;
2395 for( slot = 0; slot++ < set->page->cnt; )
2396 if( next || slot < set->page->cnt )
2397 if( !slotptr(set->page, slot)->dead ) {
2398 ptr = keyptr(set->page, slot);
2399 fwrite (ptr->key, ptr->len, 1, stdout);
2400 fputc ('\n', stdout);
2403 bt_unlockpage (BtLockRead, set->latch);
2404 bt_unpinlatch (set->latch);
2405 bt_unpinpool (set->pool);
2406 } while( page_no = next );
2408 cnt--; // remove stopper key
2409 fprintf(stderr, " Total keys read %d\n", cnt);
2413 fprintf(stderr, "started counting\n");
2414 next = bt->mgr->latchmgr->nlatchpage + LATCH_page;
2415 page_no = LEAF_page;
2417 while( page_no < bt_getid(bt->mgr->latchmgr->alloc->right) ) {
2418 uid off = page_no << bt->mgr->page_bits;
2420 pread (bt->mgr->idx, bt->frame, bt->mgr->page_size, off);
2424 SetFilePointer (bt->mgr->idx, (long)off, (long*)(&off)+1, FILE_BEGIN);
2426 if( !ReadFile(bt->mgr->idx, bt->frame, bt->mgr->page_size, amt, NULL))
2427 return bt->err = BTERR_map;
2429 if( *amt < bt->mgr->page_size )
2430 return bt->err = BTERR_map;
2432 if( !bt->frame->free && !bt->frame->lvl )
2433 cnt += bt->frame->act;
2434 if( page_no > LEAF_page )
2439 cnt--; // remove stopper key
2440 fprintf(stderr, " Total keys read %d\n", cnt);
2452 typedef struct timeval timer;
2454 int main (int argc, char **argv)
2456 int idx, cnt, len, slot, err;
2457 int segsize, bits = 16;
2474 fprintf (stderr, "Usage: %s idx_file Read/Write/Scan/Delete/Find [page_bits mapped_segments seg_bits line_numbers src_file1 src_file2 ... ]\n", argv[0]);
2475 fprintf (stderr, " where page_bits is the page size in bits\n");
2476 fprintf (stderr, " mapped_segments is the number of mmap segments in buffer pool\n");
2477 fprintf (stderr, " seg_bits is the size of individual segments in buffer pool in pages in bits\n");
2478 fprintf (stderr, " line_numbers = 1 to append line numbers to keys\n");
2479 fprintf (stderr, " src_file1 thru src_filen are files of keys separated by newline\n");
2483 start = getCpuTime(0);
2486 bits = atoi(argv[3]);
2489 poolsize = atoi(argv[4]);
2492 fprintf (stderr, "Warning: no mapped_pool\n");
2494 if( poolsize > 65535 )
2495 fprintf (stderr, "Warning: mapped_pool > 65535 segments\n");
2498 segsize = atoi(argv[5]);
2500 segsize = 4; // 16 pages per mmap segment
2503 num = atoi(argv[6]);
2507 threads = malloc (cnt * sizeof(pthread_t));
2509 threads = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, cnt * sizeof(HANDLE));
2511 args = malloc (cnt * sizeof(ThreadArg));
2513 mgr = bt_mgr ((argv[1]), BT_rw, bits, poolsize, segsize, poolsize / 8);
2516 fprintf(stderr, "Index Open Error %s\n", argv[1]);
2522 for( idx = 0; idx < cnt; idx++ ) {
2523 args[idx].infile = argv[idx + 7];
2524 args[idx].type = argv[2][0];
2525 args[idx].mgr = mgr;
2526 args[idx].num = num;
2527 args[idx].idx = idx;
2529 if( err = pthread_create (threads + idx, NULL, index_file, args + idx) )
2530 fprintf(stderr, "Error creating thread %d\n", err);
2532 threads[idx] = (HANDLE)_beginthreadex(NULL, 65536, index_file, args + idx, 0, NULL);
2536 // wait for termination
2539 for( idx = 0; idx < cnt; idx++ )
2540 pthread_join (threads[idx], NULL);
2542 WaitForMultipleObjects (cnt, threads, TRUE, INFINITE);
2544 for( idx = 0; idx < cnt; idx++ )
2545 CloseHandle(threads[idx]);
2548 elapsed = getCpuTime(0) - start;
2549 fprintf(stderr, " real %dm%.3fs\n", (int)(elapsed/60), elapsed - (int)(elapsed/60)*60);
2550 elapsed = getCpuTime(1);
2551 fprintf(stderr, " user %dm%.3fs\n", (int)(elapsed/60), elapsed - (int)(elapsed/60)*60);
2552 elapsed = getCpuTime(2);
2553 fprintf(stderr, " sys %dm%.3fs\n", (int)(elapsed/60), elapsed - (int)(elapsed/60)*60);