1 // btree version threads2h pthread rw lock/SRW version
2 // with fixed bt_deletekey code
5 // author: karl malbrain, malbrain@cal.berkeley.edu
8 This work, including the source code, documentation
9 and related data, is placed into the public domain.
11 The orginal author is Karl Malbrain.
13 THIS SOFTWARE IS PROVIDED AS-IS WITHOUT WARRANTY
14 OF ANY KIND, NOT EVEN THE IMPLIED WARRANTY OF
15 MERCHANTABILITY. THE AUTHOR OF THIS SOFTWARE,
16 ASSUMES _NO_ RESPONSIBILITY FOR ANY CONSEQUENCE
17 RESULTING FROM THE USE, MODIFICATION, OR
18 REDISTRIBUTION OF THIS SOFTWARE.
21 // Please see the project home page for documentation
22 // code.google.com/p/high-concurrency-btree
24 #define _FILE_OFFSET_BITS 64
25 #define _LARGEFILE64_SOURCE
41 #define WIN32_LEAN_AND_MEAN
55 typedef unsigned long long uid;
58 typedef unsigned long long off64_t;
59 typedef unsigned short ushort;
60 typedef unsigned int uint;
63 #define BT_latchtable 128 // number of latch manager slots
65 #define BT_ro 0x6f72 // ro
66 #define BT_rw 0x7772 // rw
68 #define BT_maxbits 24 // maximum page size in bits
69 #define BT_minbits 9 // minimum page size in bits
70 #define BT_minpage (1 << BT_minbits) // minimum page size
71 #define BT_maxpage (1 << BT_maxbits) // maximum page size
74 There are five lock types for each node in three independent sets:
75 1. (set 1) AccessIntent: Sharable. Going to Read the node. Incompatible with NodeDelete.
76 2. (set 1) NodeDelete: Exclusive. About to release the node. Incompatible with AccessIntent.
77 3. (set 2) ReadLock: Sharable. Read the node. Incompatible with WriteLock.
78 4. (set 2) WriteLock: Exclusive. Modify the node. Incompatible with ReadLock and other WriteLocks.
79 5. (set 3) ParentModification: Exclusive. Change the node's parent keys. Incompatible with another ParentModification.
90 // mode & definition for latch implementation
92 // exclusive is set for write access
93 // share is count of read accessors
94 // grant write lock when share == 0
96 volatile typedef struct {
97 unsigned char mutex[1];
98 unsigned char exclusive:1;
99 unsigned char pending:1;
103 // hash table entries
106 BtSpinLatch latch[1];
107 volatile ushort slot; // Latch table entry at head of chain
110 // latch manager table structure
114 pthread_rwlock_t lock[1];
121 BtLatch readwr[1]; // read/write page lock
122 BtLatch access[1]; // Access Intent/Page delete
123 BtLatch parent[1]; // Posting of fence key in parent
124 BtSpinLatch busy[1]; // slot is being moved between chains
125 volatile ushort next; // next entry in hash table chain
126 volatile ushort prev; // prev entry in hash table chain
127 volatile ushort pin; // number of outstanding locks
128 volatile ushort hash; // hash slot entry is under
129 volatile uid page_no; // latch set page number
132 // Define the length of the page and key pointers
136 // Page key slot definition.
138 // If BT_maxbits is 15 or less, you can save 4 bytes
139 // for each key stored by making the first two uints
140 // into ushorts. You can also save 4 bytes by removing
141 // the tod field from the key.
143 // Keys are marked dead, but remain on the page until
144 // it cleanup is called. The fence key (highest key) for
145 // the page is always present, even after cleanup.
148 uint off:BT_maxbits; // page offset for key start
149 uint dead:1; // set for deleted key
150 uint tod; // time-stamp for key
151 unsigned char id[BtId]; // id associated with key
154 // The key structure occupies space at the upper end of
155 // each page. It's a length byte followed by the value
160 unsigned char key[1];
163 // The first part of an index page.
164 // It is immediately followed
165 // by the BtSlot array of keys.
167 typedef struct BtPage_ {
168 uint cnt; // count of keys in page
169 uint act; // count of active keys
170 uint min; // next key offset
171 unsigned char bits:7; // page size in bits
172 unsigned char free:1; // page is on free list
173 unsigned char lvl:6; // level of page
174 unsigned char kill:1; // page is being killed
175 unsigned char dirty:1; // page has deleted keys
176 unsigned char right[BtId]; // page number to right
179 // The memory mapping pool table buffer manager entry
182 uid basepage; // mapped base page number
183 char *map; // mapped memory pointer
184 ushort slot; // slot index in this array
185 ushort pin; // mapped page pin counter
186 void *hashprev; // previous pool entry for the same hash idx
187 void *hashnext; // next pool entry for the same hash idx
189 HANDLE hmap; // Windows memory mapping handle
193 #define CLOCK_bit 0x8000 // bit in pool->pin
195 // The loadpage interface object
198 uid page_no; // current page number
199 BtPage page; // current page pointer
200 BtPool *pool; // current page pool
201 BtLatchSet *latch; // current page latch set
204 // structure for latch manager on ALLOC_page
207 struct BtPage_ alloc[2]; // next & free page_nos in right ptr
208 BtSpinLatch lock[1]; // allocation area lite latch
209 ushort latchdeployed; // highest number of latch entries deployed
210 ushort nlatchpage; // number of latch pages at BT_latch
211 ushort latchtotal; // number of page latch entries
212 ushort latchhash; // number of latch hash table slots
213 ushort latchvictim; // next latch entry to examine
214 BtHashEntry table[0]; // the hash table
217 // The object structure for Btree access
220 uint page_size; // page size
221 uint page_bits; // page size in bits
222 uint seg_bits; // seg size in pages in bits
223 uint mode; // read-write mode
229 ushort poolcnt; // highest page pool node in use
230 ushort poolmax; // highest page pool node allocated
231 ushort poolmask; // total number of pages in mmap segment - 1
232 ushort hashsize; // size of Hash Table for pool entries
233 volatile uint evicted; // last evicted pool table slot
234 ushort *hash; // pool index for hash entries
235 BtSpinLatch *latch; // latches for hash table slots
236 BtLatchMgr *latchmgr; // mapped latch page from allocation page
237 BtLatchSet *latchsets; // mapped latch set from latch pages
238 BtPool *pool; // memory pool page segments
240 HANDLE halloc; // allocation and latch table handle
245 BtMgr *mgr; // buffer manager for thread
246 BtPage cursor; // cached frame for start/next (never mapped)
247 BtPage frame; // spare frame for the page split (never mapped)
248 BtPage zero; // page frame for zeroes at end of file
249 uid cursor_page; // current cursor page number
250 unsigned char *mem; // frame, cursor, page memory buffer
251 int found; // last delete or insert was found
252 int err; // last error
266 extern void bt_close (BtDb *bt);
267 extern BtDb *bt_open (BtMgr *mgr);
268 extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod);
269 extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl);
270 extern uid bt_findkey (BtDb *bt, unsigned char *key, uint len);
271 extern uint bt_startkey (BtDb *bt, unsigned char *key, uint len);
272 extern uint bt_nextkey (BtDb *bt, uint slot);
275 extern BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolsize, uint segsize, uint hashsize);
276 void bt_mgrclose (BtMgr *mgr);
278 // Helper functions to return slot values
280 extern BtKey bt_key (BtDb *bt, uint slot);
281 extern uid bt_uid (BtDb *bt, uint slot);
282 extern uint bt_tod (BtDb *bt, uint slot);
284 // BTree page number constants
285 #define ALLOC_page 0 // allocation & lock manager hash table
286 #define ROOT_page 1 // root of the btree
287 #define LEAF_page 2 // first page of leaves
288 #define LATCH_page 3 // pages for lock manager
290 // Number of levels to create in a new BTree
294 // The page is allocated from low and hi ends.
295 // The key offsets and row-id's are allocated
296 // from the bottom, while the text of the key
297 // is allocated from the top. When the two
298 // areas meet, the page is split into two.
300 // A key consists of a length byte, two bytes of
301 // index number (0 - 65534), and up to 253 bytes
302 // of key value. Duplicate keys are discarded.
303 // Associated with each key is a 48 bit row-id.
305 // The b-tree root is always located at page 1.
306 // The first leaf page of level zero is always
307 // located on page 2.
309 // The b-tree pages are linked with next
310 // pointers to facilitate enumerators,
311 // and provide for concurrency.
313 // When to root page fills, it is split in two and
314 // the tree height is raised by a new root at page
315 // one with two keys.
317 // Deleted keys are marked with a dead bit until
318 // page cleanup The fence key for a node is
319 // present in a special array.
321 // Groups of pages called segments from the btree are optionally
322 // cached with a memory mapped pool. A hash table is used to keep
323 // track of the cached segments. This behaviour is controlled
324 // by the cache block size parameter to bt_open.
326 // To achieve maximum concurrency one page is locked at a time
327 // as the tree is traversed to find leaf key in question. The right
328 // page numbers are used in cases where the page is being split,
331 // Page 0 is dedicated to lock for new page extensions,
332 // and chains empty pages together for reuse.
334 // The ParentModification lock on a node is obtained to serialize posting
335 // or changing the fence key for a node.
337 // Empty pages are chained together through the ALLOC page and reused.
339 // Access macros to address slot and key values from the page.
340 // Page slots use 1 based indexing.
342 #define slotptr(page, slot) (((BtSlot *)(page+1)) + (slot-1))
343 #define keyptr(page, slot) ((BtKey)((unsigned char*)(page) + slotptr(page, slot)->off))
345 void bt_putid(unsigned char *dest, uid id)
350 dest[i] = (unsigned char)id, id >>= 8;
353 uid bt_getid(unsigned char *src)
358 for( i = 0; i < BtId; i++ )
359 id <<= 8, id |= *src++;
366 // wait until write lock mode is clear
367 // and add 1 to the share count
369 void bt_spinreadlock(BtSpinLatch *latch)
374 // obtain latch mutex
376 if( __sync_lock_test_and_set(latch->mutex, 1) )
379 if( _InterlockedExchange8(latch->mutex, 1) )
382 // see if exclusive request is granted or pending
384 if( prev = !(latch->exclusive | latch->pending) )
388 __sync_lock_release (latch->mutex);
390 _InterlockedExchange8(latch->mutex, 0);
397 } while( sched_yield(), 1 );
399 } while( SwitchToThread(), 1 );
403 // wait for other read and write latches to relinquish
405 void bt_spinwritelock(BtSpinLatch *latch)
411 if( __sync_lock_test_and_set(latch->mutex, 1) )
414 if( _InterlockedExchange8(latch->mutex, 1) )
417 if( prev = !(latch->share | latch->exclusive) )
418 latch->exclusive = 1, latch->pending = 0;
422 __sync_lock_release (latch->mutex);
424 _InterlockedExchange8(latch->mutex, 0);
429 } while( sched_yield(), 1 );
431 } while( SwitchToThread(), 1 );
435 // try to obtain write lock
437 // return 1 if obtained,
440 int bt_spinwritetry(BtSpinLatch *latch)
445 if( __sync_lock_test_and_set(latch->mutex, 1) )
448 if( _InterlockedExchange8(latch->mutex, 1) )
451 // take write access if all bits are clear
453 if( prev = !(latch->exclusive | latch->share) )
454 latch->exclusive = 1;
457 __sync_lock_release (latch->mutex);
459 _InterlockedExchange8(latch->mutex, 0);
466 void bt_spinreleasewrite(BtSpinLatch *latch)
468 // obtain latch mutex
470 while( __sync_lock_test_and_set(latch->mutex, 1) )
473 while( _InterlockedExchange8(latch->mutex, 1) )
476 latch->exclusive = 0;
478 __sync_lock_release (latch->mutex);
480 _InterlockedExchange8(latch->mutex, 0);
484 // decrement reader count
486 void bt_spinreleaseread(BtSpinLatch *latch)
489 while( __sync_lock_test_and_set(latch->mutex, 1) )
492 while( _InterlockedExchange8(latch->mutex, 1) )
497 __sync_lock_release (latch->mutex);
499 _InterlockedExchange8(latch->mutex, 0);
503 void bt_readlock(BtLatch *latch)
506 pthread_rwlock_rdlock (latch->lock);
508 AcquireSRWLockShared (latch->srw);
512 // wait for other read and write latches to relinquish
514 void bt_writelock(BtLatch *latch)
517 pthread_rwlock_wrlock (latch->lock);
519 AcquireSRWLockExclusive (latch->srw);
523 // try to obtain write lock
525 // return 1 if obtained,
526 // 0 if already write or read locked
528 int bt_writetry(BtLatch *latch)
533 result = !pthread_rwlock_trywrlock (latch->lock);
535 result = TryAcquireSRWLockExclusive (latch->srw);
542 void bt_releasewrite(BtLatch *latch)
545 pthread_rwlock_unlock (latch->lock);
547 ReleaseSRWLockExclusive (latch->srw);
551 // decrement reader count
553 void bt_releaseread(BtLatch *latch)
556 pthread_rwlock_unlock (latch->lock);
558 ReleaseSRWLockShared (latch->srw);
562 void bt_initlockset (BtLatchSet *set)
565 pthread_rwlockattr_t rwattr[1];
567 pthread_rwlockattr_init (rwattr);
568 pthread_rwlockattr_setpshared (rwattr, PTHREAD_PROCESS_SHARED);
570 pthread_rwlock_init (set->readwr->lock, rwattr);
571 pthread_rwlock_init (set->access->lock, rwattr);
572 pthread_rwlock_init (set->parent->lock, rwattr);
573 pthread_rwlockattr_destroy (rwattr);
575 InitializeSRWLock (set->readwr->srw);
576 InitializeSRWLock (set->access->srw);
577 InitializeSRWLock (set->parent->srw);
581 // link latch table entry into latch hash table
583 void bt_latchlink (BtDb *bt, ushort hashidx, ushort victim, uid page_no)
585 BtLatchSet *set = bt->mgr->latchsets + victim;
587 if( set->next = bt->mgr->latchmgr->table[hashidx].slot )
588 bt->mgr->latchsets[set->next].prev = victim;
590 bt->mgr->latchmgr->table[hashidx].slot = victim;
591 set->page_no = page_no;
598 void bt_unpinlatch (BtLatchSet *set)
601 __sync_fetch_and_add(&set->pin, -1);
603 _InterlockedDecrement16 (&set->pin);
607 // find existing latchset or inspire new one
608 // return with latchset pinned
610 BtLatchSet *bt_pinlatch (BtDb *bt, uid page_no)
612 ushort hashidx = page_no % bt->mgr->latchmgr->latchhash;
613 ushort slot, avail = 0, victim, idx;
616 // try to find existing latch table entry for this page
618 // obtain read lock on hash table entry
620 bt_spinreadlock(bt->mgr->latchmgr->table[hashidx].latch);
622 if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do
624 set = bt->mgr->latchsets + slot;
625 if( page_no == set->page_no )
627 } while( slot = set->next );
631 __sync_fetch_and_add(&set->pin, 1);
633 _InterlockedIncrement16 (&set->pin);
637 bt_spinreleaseread (bt->mgr->latchmgr->table[hashidx].latch);
642 // try again, this time with write lock
644 bt_spinwritelock(bt->mgr->latchmgr->table[hashidx].latch);
646 if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do
648 set = bt->mgr->latchsets + slot;
649 if( page_no == set->page_no )
651 if( !set->pin && !avail )
653 } while( slot = set->next );
655 // found our entry, or take over an unpinned one
657 if( slot || (slot = avail) ) {
658 set = bt->mgr->latchsets + slot;
660 __sync_fetch_and_add(&set->pin, 1);
662 _InterlockedIncrement16 (&set->pin);
664 set->page_no = page_no;
665 bt_spinreleasewrite(bt->mgr->latchmgr->table[hashidx].latch);
669 // see if there are any unused entries
671 victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, 1) + 1;
673 victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchdeployed);
676 if( victim < bt->mgr->latchmgr->latchtotal ) {
677 set = bt->mgr->latchsets + victim;
679 __sync_fetch_and_add(&set->pin, 1);
681 _InterlockedIncrement16 (&set->pin);
683 bt_initlockset (set);
684 bt_latchlink (bt, hashidx, victim, page_no);
685 bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch);
690 victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, -1);
692 victim = _InterlockedDecrement16 (&bt->mgr->latchmgr->latchdeployed);
694 // find and reuse previous lock entry
698 victim = __sync_fetch_and_add(&bt->mgr->latchmgr->latchvictim, 1);
700 victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchvictim) - 1;
702 // we don't use slot zero
704 if( victim %= bt->mgr->latchmgr->latchtotal )
705 set = bt->mgr->latchsets + victim;
709 // take control of our slot
710 // from other threads
712 if( set->pin || !bt_spinwritetry (set->busy) )
717 // try to get write lock on hash chain
718 // skip entry if not obtained
719 // or has outstanding locks
721 if( !bt_spinwritetry (bt->mgr->latchmgr->table[idx].latch) ) {
722 bt_spinreleasewrite (set->busy);
727 bt_spinreleasewrite (set->busy);
728 bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch);
732 // unlink our available victim from its hash chain
735 bt->mgr->latchsets[set->prev].next = set->next;
737 bt->mgr->latchmgr->table[idx].slot = set->next;
740 bt->mgr->latchsets[set->next].prev = set->prev;
742 bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch);
744 __sync_fetch_and_add(&set->pin, 1);
746 _InterlockedIncrement16 (&set->pin);
748 bt_latchlink (bt, hashidx, victim, page_no);
749 bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch);
750 bt_spinreleasewrite (set->busy);
755 void bt_mgrclose (BtMgr *mgr)
760 // release mapped pages
761 // note that slot zero is never used
763 for( slot = 1; slot < mgr->poolmax; slot++ ) {
764 pool = mgr->pool + slot;
767 munmap (pool->map, (mgr->poolmask+1) << mgr->page_bits);
770 FlushViewOfFile(pool->map, 0);
771 UnmapViewOfFile(pool->map);
772 CloseHandle(pool->hmap);
778 munmap (mgr->latchsets, mgr->latchmgr->nlatchpage * mgr->page_size);
779 munmap (mgr->latchmgr, mgr->page_size);
781 FlushViewOfFile(mgr->latchmgr, 0);
782 UnmapViewOfFile(mgr->latchmgr);
783 CloseHandle(mgr->halloc);
789 free ((void *)mgr->latch);
792 FlushFileBuffers(mgr->idx);
793 CloseHandle(mgr->idx);
794 GlobalFree (mgr->pool);
795 GlobalFree (mgr->hash);
796 GlobalFree ((void *)mgr->latch);
801 // close and release memory
803 void bt_close (BtDb *bt)
810 VirtualFree (bt->mem, 0, MEM_RELEASE);
815 // open/create new btree buffer manager
817 // call with file_name, BT_openmode, bits in page size (e.g. 16),
818 // size of mapped page pool (e.g. 8192)
820 BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolmax, uint segsize, uint hashsize)
822 uint lvl, attr, cacheblk, last, slot, idx;
823 uint nlatchpage, latchhash;
824 BtLatchMgr *latchmgr;
832 SYSTEM_INFO sysinfo[1];
835 // determine sanity of page size and buffer pool
837 if( bits > BT_maxbits )
839 else if( bits < BT_minbits )
843 return NULL; // must have buffer pool
846 mgr = calloc (1, sizeof(BtMgr));
848 mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666);
851 return free(mgr), NULL;
853 cacheblk = 4096; // minimum mmap segment size for unix
856 mgr = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, sizeof(BtMgr));
857 attr = FILE_ATTRIBUTE_NORMAL;
858 mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL);
860 if( mgr->idx == INVALID_HANDLE_VALUE )
861 return GlobalFree(mgr), NULL;
863 // normalize cacheblk to multiple of sysinfo->dwAllocationGranularity
864 GetSystemInfo(sysinfo);
865 cacheblk = sysinfo->dwAllocationGranularity;
869 latchmgr = malloc (BT_maxpage);
872 // read minimum page size to get root info
874 if( size = lseek (mgr->idx, 0L, 2) ) {
875 if( pread(mgr->idx, latchmgr, BT_minpage, 0) == BT_minpage )
876 bits = latchmgr->alloc->bits;
878 return free(mgr), free(latchmgr), NULL;
879 } else if( mode == BT_ro )
880 return free(latchmgr), bt_mgrclose (mgr), NULL;
882 latchmgr = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE);
883 size = GetFileSize(mgr->idx, amt);
886 if( !ReadFile(mgr->idx, (char *)latchmgr, BT_minpage, amt, NULL) )
887 return bt_mgrclose (mgr), NULL;
888 bits = latchmgr->alloc->bits;
889 } else if( mode == BT_ro )
890 return bt_mgrclose (mgr), NULL;
893 mgr->page_size = 1 << bits;
894 mgr->page_bits = bits;
896 mgr->poolmax = poolmax;
899 if( cacheblk < mgr->page_size )
900 cacheblk = mgr->page_size;
902 // mask for partial memmaps
904 mgr->poolmask = (cacheblk >> bits) - 1;
906 // see if requested size of pages per memmap is greater
908 if( (1 << segsize) > mgr->poolmask )
909 mgr->poolmask = (1 << segsize) - 1;
913 while( (1 << mgr->seg_bits) <= mgr->poolmask )
916 mgr->hashsize = hashsize;
919 mgr->pool = calloc (poolmax, sizeof(BtPool));
920 mgr->hash = calloc (hashsize, sizeof(ushort));
921 mgr->latch = calloc (hashsize, sizeof(BtSpinLatch));
923 mgr->pool = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, poolmax * sizeof(BtPool));
924 mgr->hash = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(ushort));
925 mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtSpinLatch));
931 // initialize an empty b-tree with latch page, root page, page of leaves
932 // and page(s) of latches
934 memset (latchmgr, 0, 1 << bits);
935 nlatchpage = BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1;
936 bt_putid(latchmgr->alloc->right, MIN_lvl+1+nlatchpage);
937 latchmgr->alloc->bits = mgr->page_bits;
939 latchmgr->nlatchpage = nlatchpage;
940 latchmgr->latchtotal = nlatchpage * (mgr->page_size / sizeof(BtLatchSet));
942 // initialize latch manager
944 latchhash = (mgr->page_size - sizeof(BtLatchMgr)) / sizeof(BtHashEntry);
946 // size of hash table = total number of latchsets
948 if( latchhash > latchmgr->latchtotal )
949 latchhash = latchmgr->latchtotal;
951 latchmgr->latchhash = latchhash;
954 if( write (mgr->idx, latchmgr, mgr->page_size) < mgr->page_size )
955 return bt_mgrclose (mgr), NULL;
957 if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) )
958 return bt_mgrclose (mgr), NULL;
960 if( *amt < mgr->page_size )
961 return bt_mgrclose (mgr), NULL;
964 memset (latchmgr, 0, 1 << bits);
965 latchmgr->alloc->bits = mgr->page_bits;
967 for( lvl=MIN_lvl; lvl--; ) {
968 slotptr(latchmgr->alloc, 1)->off = mgr->page_size - 3;
969 bt_putid(slotptr(latchmgr->alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number
970 key = keyptr(latchmgr->alloc, 1);
971 key->len = 2; // create stopper key
974 latchmgr->alloc->min = mgr->page_size - 3;
975 latchmgr->alloc->lvl = lvl;
976 latchmgr->alloc->cnt = 1;
977 latchmgr->alloc->act = 1;
979 if( write (mgr->idx, latchmgr, mgr->page_size) < mgr->page_size )
980 return bt_mgrclose (mgr), NULL;
982 if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) )
983 return bt_mgrclose (mgr), NULL;
985 if( *amt < mgr->page_size )
986 return bt_mgrclose (mgr), NULL;
990 // clear out latch manager locks
991 // and rest of pages to round out segment
993 memset(latchmgr, 0, mgr->page_size);
996 while( last <= ((MIN_lvl + 1 + nlatchpage) | mgr->poolmask) ) {
998 pwrite(mgr->idx, latchmgr, mgr->page_size, last << mgr->page_bits);
1000 SetFilePointer (mgr->idx, last << mgr->page_bits, NULL, FILE_BEGIN);
1001 if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) )
1002 return bt_mgrclose (mgr), NULL;
1003 if( *amt < mgr->page_size )
1004 return bt_mgrclose (mgr), NULL;
1011 flag = PROT_READ | PROT_WRITE;
1012 mgr->latchmgr = mmap (0, mgr->page_size, flag, MAP_SHARED, mgr->idx, ALLOC_page * mgr->page_size);
1013 if( mgr->latchmgr == MAP_FAILED )
1014 return bt_mgrclose (mgr), NULL;
1015 mgr->latchsets = (BtLatchSet *)mmap (0, mgr->latchmgr->nlatchpage * mgr->page_size, flag, MAP_SHARED, mgr->idx, LATCH_page * mgr->page_size);
1016 if( mgr->latchsets == MAP_FAILED )
1017 return bt_mgrclose (mgr), NULL;
1019 flag = PAGE_READWRITE;
1020 mgr->halloc = CreateFileMapping(mgr->idx, NULL, flag, 0, (BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1 + LATCH_page) * mgr->page_size, NULL);
1022 return bt_mgrclose (mgr), NULL;
1024 flag = FILE_MAP_WRITE;
1025 mgr->latchmgr = MapViewOfFile(mgr->halloc, flag, 0, 0, (BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1 + LATCH_page) * mgr->page_size);
1026 if( !mgr->latchmgr )
1027 return GetLastError(), bt_mgrclose (mgr), NULL;
1029 mgr->latchsets = (void *)((char *)mgr->latchmgr + LATCH_page * mgr->page_size);
1035 VirtualFree (latchmgr, 0, MEM_RELEASE);
1040 // open BTree access method
1041 // based on buffer manager
1043 BtDb *bt_open (BtMgr *mgr)
1045 BtDb *bt = malloc (sizeof(*bt));
1047 memset (bt, 0, sizeof(*bt));
1050 bt->mem = malloc (3 *mgr->page_size);
1052 bt->mem = VirtualAlloc(NULL, 3 * mgr->page_size, MEM_COMMIT, PAGE_READWRITE);
1054 bt->frame = (BtPage)bt->mem;
1055 bt->zero = (BtPage)(bt->mem + 1 * mgr->page_size);
1056 bt->cursor = (BtPage)(bt->mem + 2 * mgr->page_size);
1058 memset (bt->zero, 0, mgr->page_size);
1062 // compare two keys, returning > 0, = 0, or < 0
1063 // as the comparison value
1065 int keycmp (BtKey key1, unsigned char *key2, uint len2)
1067 uint len1 = key1->len;
1070 if( ans = memcmp (key1->key, key2, len1 > len2 ? len2 : len1) )
1083 // find segment in pool
1084 // must be called with hashslot idx locked
1085 // return NULL if not there
1086 // otherwise return node
1088 BtPool *bt_findpool(BtDb *bt, uid page_no, uint idx)
1093 // compute start of hash chain in pool
1095 if( slot = bt->mgr->hash[idx] )
1096 pool = bt->mgr->pool + slot;
1100 page_no &= ~bt->mgr->poolmask;
1102 while( pool->basepage != page_no )
1103 if( pool = pool->hashnext )
1111 // add segment to hash table
1113 void bt_linkhash(BtDb *bt, BtPool *pool, uid page_no, int idx)
1118 pool->hashprev = pool->hashnext = NULL;
1119 pool->basepage = page_no & ~bt->mgr->poolmask;
1120 pool->pin = CLOCK_bit + 1;
1122 if( slot = bt->mgr->hash[idx] ) {
1123 node = bt->mgr->pool + slot;
1124 pool->hashnext = node;
1125 node->hashprev = pool;
1128 bt->mgr->hash[idx] = pool->slot;
1131 // map new buffer pool segment to virtual memory
1133 BTERR bt_mapsegment(BtDb *bt, BtPool *pool, uid page_no)
1135 off64_t off = (page_no & ~bt->mgr->poolmask) << bt->mgr->page_bits;
1136 off64_t limit = off + ((bt->mgr->poolmask+1) << bt->mgr->page_bits);
1140 flag = PROT_READ | ( bt->mgr->mode == BT_ro ? 0 : PROT_WRITE );
1141 pool->map = mmap (0, (bt->mgr->poolmask+1) << bt->mgr->page_bits, flag, MAP_SHARED, bt->mgr->idx, off);
1142 if( pool->map == MAP_FAILED )
1143 return bt->err = BTERR_map;
1145 flag = ( bt->mgr->mode == BT_ro ? PAGE_READONLY : PAGE_READWRITE );
1146 pool->hmap = CreateFileMapping(bt->mgr->idx, NULL, flag, (DWORD)(limit >> 32), (DWORD)limit, NULL);
1148 return bt->err = BTERR_map;
1150 flag = ( bt->mgr->mode == BT_ro ? FILE_MAP_READ : FILE_MAP_WRITE );
1151 pool->map = MapViewOfFile(pool->hmap, flag, (DWORD)(off >> 32), (DWORD)off, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
1153 return bt->err = BTERR_map;
1158 // calculate page within pool
1160 BtPage bt_page (BtDb *bt, BtPool *pool, uid page_no)
1162 uint subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping
1165 page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits));
1171 void bt_unpinpool (BtPool *pool)
1174 __sync_fetch_and_add(&pool->pin, -1);
1176 _InterlockedDecrement16 (&pool->pin);
1180 // find or place requested page in segment-pool
1181 // return pool table entry, incrementing pin
1183 BtPool *bt_pinpool(BtDb *bt, uid page_no)
1185 uint slot, hashidx, idx, victim;
1186 BtPool *pool, *node, *next;
1188 // lock hash table chain
1190 hashidx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
1191 bt_spinwritelock (&bt->mgr->latch[hashidx]);
1193 // look up in hash table
1195 if( pool = bt_findpool(bt, page_no, hashidx) ) {
1197 __sync_fetch_and_or(&pool->pin, CLOCK_bit);
1198 __sync_fetch_and_add(&pool->pin, 1);
1200 _InterlockedOr16 (&pool->pin, CLOCK_bit);
1201 _InterlockedIncrement16 (&pool->pin);
1203 bt_spinreleasewrite (&bt->mgr->latch[hashidx]);
1207 // allocate a new pool node
1208 // and add to hash table
1211 slot = __sync_fetch_and_add(&bt->mgr->poolcnt, 1);
1213 slot = _InterlockedIncrement16 (&bt->mgr->poolcnt) - 1;
1216 if( ++slot < bt->mgr->poolmax ) {
1217 pool = bt->mgr->pool + slot;
1220 if( bt_mapsegment(bt, pool, page_no) )
1223 bt_linkhash(bt, pool, page_no, hashidx);
1224 bt_spinreleasewrite (&bt->mgr->latch[hashidx]);
1228 // pool table is full
1229 // find best pool entry to evict
1232 __sync_fetch_and_add(&bt->mgr->poolcnt, -1);
1234 _InterlockedDecrement16 (&bt->mgr->poolcnt);
1239 victim = __sync_fetch_and_add(&bt->mgr->evicted, 1);
1241 victim = _InterlockedIncrement (&bt->mgr->evicted) - 1;
1243 victim %= bt->mgr->poolmax;
1244 pool = bt->mgr->pool + victim;
1245 idx = (uint)(pool->basepage >> bt->mgr->seg_bits) % bt->mgr->hashsize;
1250 // try to get write lock
1251 // skip entry if not obtained
1253 if( !bt_spinwritetry (&bt->mgr->latch[idx]) )
1256 // skip this entry if
1258 // or clock bit is set
1262 __sync_fetch_and_and(&pool->pin, ~CLOCK_bit);
1264 _InterlockedAnd16 (&pool->pin, ~CLOCK_bit);
1266 bt_spinreleasewrite (&bt->mgr->latch[idx]);
1270 // unlink victim pool node from hash table
1272 if( node = pool->hashprev )
1273 node->hashnext = pool->hashnext;
1274 else if( node = pool->hashnext )
1275 bt->mgr->hash[idx] = node->slot;
1277 bt->mgr->hash[idx] = 0;
1279 if( node = pool->hashnext )
1280 node->hashprev = pool->hashprev;
1282 bt_spinreleasewrite (&bt->mgr->latch[idx]);
1284 // remove old file mapping
1286 munmap (pool->map, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
1288 FlushViewOfFile(pool->map, 0);
1289 UnmapViewOfFile(pool->map);
1290 CloseHandle(pool->hmap);
1294 // create new pool mapping
1295 // and link into hash table
1297 if( bt_mapsegment(bt, pool, page_no) )
1300 bt_linkhash(bt, pool, page_no, hashidx);
1301 bt_spinreleasewrite (&bt->mgr->latch[hashidx]);
1306 // place write, read, or parent lock on requested page_no.
1308 void bt_lockpage(BtLock mode, BtLatchSet *set)
1312 bt_readlock (set->readwr);
1315 bt_writelock (set->readwr);
1318 bt_readlock (set->access);
1321 bt_writelock (set->access);
1324 bt_writelock (set->parent);
1329 // remove write, read, or parent lock on requested page
1331 void bt_unlockpage(BtLock mode, BtLatchSet *set)
1335 bt_releaseread (set->readwr);
1338 bt_releasewrite (set->readwr);
1341 bt_releaseread (set->access);
1344 bt_releasewrite (set->access);
1347 bt_releasewrite (set->parent);
1352 // allocate a new page and write page into it
1354 uid bt_newpage(BtDb *bt, BtPage page)
1360 // lock allocation page
1362 bt_spinwritelock(bt->mgr->latchmgr->lock);
1364 // use empty chain first
1365 // else allocate empty page
1367 if( new_page = bt_getid(bt->mgr->latchmgr->alloc[1].right) ) {
1368 if( set->pool = bt_pinpool (bt, new_page) )
1369 set->page = bt_page (bt, set->pool, new_page);
1373 bt_putid(bt->mgr->latchmgr->alloc[1].right, bt_getid(set->page->right));
1374 bt_unpinpool (set->pool);
1377 new_page = bt_getid(bt->mgr->latchmgr->alloc->right);
1378 bt_putid(bt->mgr->latchmgr->alloc->right, new_page+1);
1382 if( pwrite(bt->mgr->idx, page, bt->mgr->page_size, new_page << bt->mgr->page_bits) < bt->mgr->page_size )
1383 return bt->err = BTERR_wrt, 0;
1385 // if writing first page of pool block, zero last page in the block
1387 if( !reuse && bt->mgr->poolmask > 0 && (new_page & bt->mgr->poolmask) == 0 )
1389 // use zero buffer to write zeros
1390 if( pwrite(bt->mgr->idx,bt->zero, bt->mgr->page_size, (new_page | bt->mgr->poolmask) << bt->mgr->page_bits) < bt->mgr->page_size )
1391 return bt->err = BTERR_wrt, 0;
1394 // bring new page into pool and copy page.
1395 // this will extend the file into the new pages.
1397 if( set->pool = bt_pinpool (bt, new_page) )
1398 set->page = bt_page (bt, set->pool, new_page);
1402 memcpy(set->page, page, bt->mgr->page_size);
1403 bt_unpinpool (set->pool);
1405 // unlock allocation latch and return new page no
1407 bt_spinreleasewrite(bt->mgr->latchmgr->lock);
1411 // find slot in page for given key at a given level
1413 int bt_findslot (BtPageSet *set, unsigned char *key, uint len)
1415 uint diff, higher = set->page->cnt, low = 1, slot;
1418 // make stopper key an infinite fence value
1420 if( bt_getid (set->page->right) )
1425 // low is the lowest candidate.
1426 // loop ends when they meet
1428 // higher is already
1429 // tested as .ge. the passed key.
1431 while( diff = higher - low ) {
1432 slot = low + ( diff >> 1 );
1433 if( keycmp (keyptr(set->page, slot), key, len) < 0 )
1436 higher = slot, good++;
1439 // return zero if key is on right link page
1441 return good ? higher : 0;
1444 // find and load page at given level for given key
1445 // leave page rd or wr locked as requested
1447 int bt_loadpage (BtDb *bt, BtPageSet *set, unsigned char *key, uint len, uint lvl, BtLock lock)
1449 uid page_no = ROOT_page, prevpage = 0;
1450 uint drill = 0xff, slot;
1451 BtLatchSet *prevlatch;
1452 uint mode, prevmode;
1455 // start at root of btree and drill down
1458 // determine lock mode of drill level
1459 mode = (drill == lvl) ? lock : BtLockRead;
1461 set->latch = bt_pinlatch (bt, page_no);
1462 set->page_no = page_no;
1464 // pin page contents
1466 if( set->pool = bt_pinpool (bt, page_no) )
1467 set->page = bt_page (bt, set->pool, page_no);
1471 // obtain access lock using lock chaining with Access mode
1473 if( page_no > ROOT_page )
1474 bt_lockpage(BtLockAccess, set->latch);
1476 // release & unpin parent page
1479 bt_unlockpage(prevmode, prevlatch);
1480 bt_unpinlatch (prevlatch);
1481 bt_unpinpool (prevpool);
1485 // obtain read lock using lock chaining
1487 bt_lockpage(mode, set->latch);
1489 if( set->page->free )
1490 return bt->err = BTERR_struct, 0;
1492 if( page_no > ROOT_page )
1493 bt_unlockpage(BtLockAccess, set->latch);
1495 // re-read and re-lock root after determining actual level of root
1497 if( set->page->lvl != drill) {
1498 if( set->page_no != ROOT_page )
1499 return bt->err = BTERR_struct, 0;
1501 drill = set->page->lvl;
1503 if( lock != BtLockRead && drill == lvl ) {
1504 bt_unlockpage(mode, set->latch);
1505 bt_unpinlatch (set->latch);
1506 bt_unpinpool (set->pool);
1511 prevpage = set->page_no;
1512 prevlatch = set->latch;
1513 prevpool = set->pool;
1516 // find key on page at this level
1517 // and descend to requested level
1519 if( !set->page->kill )
1520 if( slot = bt_findslot (set, key, len) ) {
1524 while( slotptr(set->page, slot)->dead )
1525 if( slot++ < set->page->cnt )
1530 page_no = bt_getid(slotptr(set->page, slot)->id);
1535 // or slide right into next page
1538 page_no = bt_getid(set->page->right);
1542 // return error on end of right chain
1544 bt->err = BTERR_struct;
1545 return 0; // return error
1548 // return page to free list
1549 // page must be delete & write locked
1551 void bt_freepage (BtDb *bt, BtPageSet *set)
1553 // lock allocation page
1555 bt_spinwritelock (bt->mgr->latchmgr->lock);
1557 // store chain in second right
1558 bt_putid(set->page->right, bt_getid(bt->mgr->latchmgr->alloc[1].right));
1559 bt_putid(bt->mgr->latchmgr->alloc[1].right, set->page_no);
1560 set->page->free = 1;
1562 // unlock released page
1564 bt_unlockpage (BtLockDelete, set->latch);
1565 bt_unlockpage (BtLockWrite, set->latch);
1566 bt_unpinlatch (set->latch);
1567 bt_unpinpool (set->pool);
1569 // unlock allocation page
1571 bt_spinreleasewrite (bt->mgr->latchmgr->lock);
1574 // a fence key was deleted from a page
1575 // push new fence value upwards
1577 BTERR bt_fixfence (BtDb *bt, BtPageSet *set, uint lvl)
1579 unsigned char leftkey[256], rightkey[256];
1583 // remove the old fence value
1585 ptr = keyptr(set->page, set->page->cnt);
1586 memcpy (rightkey, ptr, ptr->len + 1);
1588 memset (slotptr(set->page, set->page->cnt--), 0, sizeof(BtSlot));
1589 set->page->dirty = 1;
1591 ptr = keyptr(set->page, set->page->cnt);
1592 memcpy (leftkey, ptr, ptr->len + 1);
1593 page_no = set->page_no;
1595 bt_lockpage (BtLockParent, set->latch);
1596 bt_unlockpage (BtLockWrite, set->latch);
1598 // insert new (now smaller) fence key
1600 if( bt_insertkey (bt, leftkey+1, *leftkey, lvl+1, page_no, time(NULL)) )
1603 // now delete old fence key
1605 if( bt_deletekey (bt, rightkey+1, *rightkey, lvl+1) )
1608 bt_unlockpage (BtLockParent, set->latch);
1609 bt_unpinlatch(set->latch);
1610 bt_unpinpool (set->pool);
1614 // root has a single child
1615 // collapse a level from the tree
1617 BTERR bt_collapseroot (BtDb *bt, BtPageSet *root)
1622 // find the child entry and promote as new root contents
1625 for( idx = 0; idx++ < root->page->cnt; )
1626 if( !slotptr(root->page, idx)->dead )
1629 child->page_no = bt_getid (slotptr(root->page, idx)->id);
1631 child->latch = bt_pinlatch (bt, child->page_no);
1632 bt_lockpage (BtLockDelete, child->latch);
1633 bt_lockpage (BtLockWrite, child->latch);
1635 if( child->pool = bt_pinpool (bt, child->page_no) )
1636 child->page = bt_page (bt, child->pool, child->page_no);
1640 memcpy (root->page, child->page, bt->mgr->page_size);
1641 bt_freepage (bt, child);
1643 } while( root->page->lvl > 1 && root->page->act == 1 );
1645 bt_unlockpage (BtLockWrite, root->latch);
1646 bt_unpinlatch (root->latch);
1647 bt_unpinpool (root->pool);
1651 // find and delete key on page by marking delete flag bit
1652 // if page becomes empty, delete it from the btree
1654 BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl)
1656 unsigned char lowerfence[256], higherfence[256];
1657 uint slot, idx, dirty = 0, fence, found;
1658 BtPageSet set[1], right[1];
1661 if( slot = bt_loadpage (bt, set, key, len, lvl, BtLockWrite) )
1662 ptr = keyptr(set->page, slot);
1666 // are we deleting a fence slot?
1668 fence = slot == set->page->cnt;
1670 // if key is found delete it, otherwise ignore request
1672 if( found = !keycmp (ptr, key, len) )
1673 if( found = slotptr(set->page, slot)->dead == 0 ) {
1674 dirty = slotptr(set->page, slot)->dead = 1;
1675 set->page->dirty = 1;
1678 // collapse empty slots
1680 while( idx = set->page->cnt - 1 )
1681 if( slotptr(set->page, idx)->dead ) {
1682 *slotptr(set->page, idx) = *slotptr(set->page, idx + 1);
1683 memset (slotptr(set->page, set->page->cnt--), 0, sizeof(BtSlot));
1688 // did we delete a fence key in an upper level?
1690 if( dirty && lvl && set->page->act && fence )
1691 if( bt_fixfence (bt, set, lvl) )
1694 return bt->found = found, 0;
1696 // is this a collapsed root?
1698 if( lvl > 1 && set->page_no == ROOT_page && set->page->act == 1 )
1699 if( bt_collapseroot (bt, set) )
1702 return bt->found = found, 0;
1704 // return if page is not empty
1706 if( set->page->act ) {
1707 bt_unlockpage(BtLockWrite, set->latch);
1708 bt_unpinlatch (set->latch);
1709 bt_unpinpool (set->pool);
1710 return bt->found = found, 0;
1713 // cache copy of fence key
1714 // to post in parent
1716 ptr = keyptr(set->page, set->page->cnt);
1717 memcpy (lowerfence, ptr, ptr->len + 1);
1719 // obtain lock on right page
1721 right->page_no = bt_getid(set->page->right);
1722 right->latch = bt_pinlatch (bt, right->page_no);
1723 bt_lockpage (BtLockWrite, right->latch);
1725 // pin page contents
1727 if( right->pool = bt_pinpool (bt, right->page_no) )
1728 right->page = bt_page (bt, right->pool, right->page_no);
1732 if( right->page->kill )
1733 return bt->err = BTERR_struct;
1735 // pull contents of right peer into our empty page
1737 memcpy (set->page, right->page, bt->mgr->page_size);
1739 // cache copy of key to update
1741 ptr = keyptr(right->page, right->page->cnt);
1742 memcpy (higherfence, ptr, ptr->len + 1);
1744 // mark right page deleted and point it to left page
1745 // until we can post parent updates
1747 bt_putid (right->page->right, set->page_no);
1748 right->page->kill = 1;
1750 bt_lockpage (BtLockParent, right->latch);
1751 bt_unlockpage (BtLockWrite, right->latch);
1753 bt_lockpage (BtLockParent, set->latch);
1754 bt_unlockpage (BtLockWrite, set->latch);
1756 // redirect higher key directly to our new node contents
1758 if( bt_insertkey (bt, higherfence+1, *higherfence, lvl+1, set->page_no, time(NULL)) )
1761 // delete old lower key to our node
1763 if( bt_deletekey (bt, lowerfence+1, *lowerfence, lvl+1) )
1766 // obtain delete and write locks to right node
1768 bt_unlockpage (BtLockParent, right->latch);
1769 bt_lockpage (BtLockDelete, right->latch);
1770 bt_lockpage (BtLockWrite, right->latch);
1771 bt_freepage (bt, right);
1773 bt_unlockpage (BtLockParent, set->latch);
1774 bt_unpinlatch (set->latch);
1775 bt_unpinpool (set->pool);
1780 // find key in leaf level and return row-id
1782 uid bt_findkey (BtDb *bt, unsigned char *key, uint len)
1789 if( slot = bt_loadpage (bt, set, key, len, 0, BtLockRead) )
1790 ptr = keyptr(set->page, slot);
1794 // if key exists, return row-id
1795 // otherwise return 0
1797 if( slot <= set->page->cnt )
1798 if( !keycmp (ptr, key, len) )
1799 id = bt_getid(slotptr(set->page,slot)->id);
1801 bt_unlockpage (BtLockRead, set->latch);
1802 bt_unpinlatch (set->latch);
1803 bt_unpinpool (set->pool);
1807 // check page for space available,
1808 // clean if necessary and return
1809 // 0 - page needs splitting
1810 // >0 new slot value
1812 uint bt_cleanpage(BtDb *bt, BtPage page, uint amt, uint slot)
1814 uint nxt = bt->mgr->page_size;
1815 uint cnt = 0, idx = 0;
1816 uint max = page->cnt;
1820 if( page->min >= (max+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1823 // skip cleanup if nothing to reclaim
1828 memcpy (bt->frame, page, bt->mgr->page_size);
1830 // skip page info and set rest of page to zero
1832 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1836 // try cleaning up page first
1837 // by removing deleted keys
1839 while( cnt++ < max ) {
1842 if( cnt < max && slotptr(bt->frame,cnt)->dead )
1845 // copy the key across
1847 key = keyptr(bt->frame, cnt);
1848 nxt -= key->len + 1;
1849 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1853 memcpy(slotptr(page, ++idx)->id, slotptr(bt->frame, cnt)->id, BtId);
1854 if( !(slotptr(page, idx)->dead = slotptr(bt->frame, cnt)->dead) )
1856 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1857 slotptr(page, idx)->off = nxt;
1863 // see if page has enough space now, or does it need splitting?
1865 if( page->min >= (idx+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1871 // split the root and raise the height of the btree
1873 BTERR bt_splitroot(BtDb *bt, BtPageSet *root, unsigned char *leftkey, uid page_no2)
1875 uint nxt = bt->mgr->page_size;
1878 // Obtain an empty page to use, and copy the current
1879 // root contents into it, e.g. lower keys
1881 if( !(left = bt_newpage(bt, root->page)) )
1884 // preserve the page info at the bottom
1885 // of higher keys and set rest to zero
1887 memset(root->page+1, 0, bt->mgr->page_size - sizeof(*root->page));
1889 // insert lower keys page fence key on newroot page as first key
1891 nxt -= *leftkey + 1;
1892 memcpy ((unsigned char *)root->page + nxt, leftkey, *leftkey + 1);
1893 bt_putid(slotptr(root->page, 1)->id, left);
1894 slotptr(root->page, 1)->off = nxt;
1896 // insert stopper key on newroot page
1897 // and increase the root height
1900 ((unsigned char *)root->page)[nxt] = 2;
1901 ((unsigned char *)root->page)[nxt+1] = 0xff;
1902 ((unsigned char *)root->page)[nxt+2] = 0xff;
1903 bt_putid(slotptr(root->page, 2)->id, page_no2);
1904 slotptr(root->page, 2)->off = nxt;
1906 bt_putid(root->page->right, 0);
1907 root->page->min = nxt; // reset lowest used offset and key count
1908 root->page->cnt = 2;
1909 root->page->act = 2;
1912 // release and unpin root
1914 bt_unlockpage(BtLockWrite, root->latch);
1915 bt_unpinlatch (root->latch);
1916 bt_unpinpool (root->pool);
1920 // split already locked full node
1923 BTERR bt_splitpage (BtDb *bt, BtPageSet *set)
1925 uint cnt = 0, idx = 0, max, nxt = bt->mgr->page_size;
1926 unsigned char fencekey[256], rightkey[256];
1927 uint lvl = set->page->lvl;
1932 // split higher half of keys to bt->frame
1934 memset (bt->frame, 0, bt->mgr->page_size);
1935 max = set->page->cnt;
1939 while( cnt++ < max ) {
1940 key = keyptr(set->page, cnt);
1941 nxt -= key->len + 1;
1942 memcpy ((unsigned char *)bt->frame + nxt, key, key->len + 1);
1944 memcpy(slotptr(bt->frame,++idx)->id, slotptr(set->page,cnt)->id, BtId);
1945 if( !(slotptr(bt->frame, idx)->dead = slotptr(set->page, cnt)->dead) )
1947 slotptr(bt->frame, idx)->tod = slotptr(set->page, cnt)->tod;
1948 slotptr(bt->frame, idx)->off = nxt;
1951 // remember existing fence key for new page to the right
1953 memcpy (rightkey, key, key->len + 1);
1955 bt->frame->bits = bt->mgr->page_bits;
1956 bt->frame->min = nxt;
1957 bt->frame->cnt = idx;
1958 bt->frame->lvl = lvl;
1962 if( set->page_no > ROOT_page )
1963 memcpy (bt->frame->right, set->page->right, BtId);
1965 // get new free page and write higher keys to it.
1967 if( !(right->page_no = bt_newpage(bt, bt->frame)) )
1970 // update lower keys to continue in old page
1972 memcpy (bt->frame, set->page, bt->mgr->page_size);
1973 memset (set->page+1, 0, bt->mgr->page_size - sizeof(*set->page));
1974 nxt = bt->mgr->page_size;
1975 set->page->dirty = 0;
1980 // assemble page of smaller keys
1982 while( cnt++ < max / 2 ) {
1983 key = keyptr(bt->frame, cnt);
1984 nxt -= key->len + 1;
1985 memcpy ((unsigned char *)set->page + nxt, key, key->len + 1);
1986 memcpy(slotptr(set->page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId);
1987 slotptr(set->page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1988 slotptr(set->page, idx)->off = nxt;
1992 // remember fence key for smaller page
1994 memcpy(fencekey, key, key->len + 1);
1996 bt_putid(set->page->right, right->page_no);
1997 set->page->min = nxt;
1998 set->page->cnt = idx;
2000 // if current page is the root page, split it
2002 if( set->page_no == ROOT_page )
2003 return bt_splitroot (bt, set, fencekey, right->page_no);
2005 // insert new fences in their parent pages
2007 right->latch = bt_pinlatch (bt, right->page_no);
2008 bt_lockpage (BtLockParent, right->latch);
2010 bt_lockpage (BtLockParent, set->latch);
2011 bt_unlockpage (BtLockWrite, set->latch);
2013 // insert new fence for reformulated left block of smaller keys
2015 if( bt_insertkey (bt, fencekey+1, *fencekey, lvl+1, set->page_no, time(NULL)) )
2018 // switch fence for right block of larger keys to new right page
2020 if( bt_insertkey (bt, rightkey+1, *rightkey, lvl+1, right->page_no, time(NULL)) )
2023 bt_unlockpage (BtLockParent, set->latch);
2024 bt_unpinlatch (set->latch);
2025 bt_unpinpool (set->pool);
2027 bt_unlockpage (BtLockParent, right->latch);
2028 bt_unpinlatch (right->latch);
2032 // Insert new key into the btree at given level.
2034 BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod)
2041 if( slot = bt_loadpage (bt, set, key, len, lvl, BtLockWrite) )
2042 ptr = keyptr(set->page, slot);
2046 bt->err = BTERR_ovflw;
2050 // if key already exists, update id and return
2052 if( !keycmp (ptr, key, len) ) {
2053 if( slotptr(set->page, slot)->dead )
2055 slotptr(set->page, slot)->dead = 0;
2056 slotptr(set->page, slot)->tod = tod;
2057 bt_putid(slotptr(set->page,slot)->id, id);
2058 bt_unlockpage(BtLockWrite, set->latch);
2059 bt_unpinlatch (set->latch);
2060 bt_unpinpool (set->pool);
2064 // check if page has enough space
2066 if( slot = bt_cleanpage (bt, set->page, len, slot) )
2069 if( bt_splitpage (bt, set) )
2073 // calculate next available slot and copy key into page
2075 set->page->min -= len + 1; // reset lowest used offset
2076 ((unsigned char *)set->page)[set->page->min] = len;
2077 memcpy ((unsigned char *)set->page + set->page->min +1, key, len );
2079 for( idx = slot; idx < set->page->cnt; idx++ )
2080 if( slotptr(set->page, idx)->dead )
2083 // now insert key into array before slot
2085 if( idx == set->page->cnt )
2086 idx++, set->page->cnt++;
2091 *slotptr(set->page, idx) = *slotptr(set->page, idx -1), idx--;
2093 bt_putid(slotptr(set->page,slot)->id, id);
2094 slotptr(set->page, slot)->off = set->page->min;
2095 slotptr(set->page, slot)->tod = tod;
2096 slotptr(set->page, slot)->dead = 0;
2098 bt_unlockpage (BtLockWrite, set->latch);
2099 bt_unpinlatch (set->latch);
2100 bt_unpinpool (set->pool);
2104 // cache page of keys into cursor and return starting slot for given key
2106 uint bt_startkey (BtDb *bt, unsigned char *key, uint len)
2111 // cache page for retrieval
2113 if( slot = bt_loadpage (bt, set, key, len, 0, BtLockRead) )
2114 memcpy (bt->cursor, set->page, bt->mgr->page_size);
2118 bt->cursor_page = set->page_no;
2120 bt_unlockpage(BtLockRead, set->latch);
2121 bt_unpinlatch (set->latch);
2122 bt_unpinpool (set->pool);
2126 // return next slot for cursor page
2127 // or slide cursor right into next page
2129 uint bt_nextkey (BtDb *bt, uint slot)
2135 right = bt_getid(bt->cursor->right);
2137 while( slot++ < bt->cursor->cnt )
2138 if( slotptr(bt->cursor,slot)->dead )
2140 else if( right || (slot < bt->cursor->cnt) ) // skip infinite stopper
2148 bt->cursor_page = right;
2150 if( set->pool = bt_pinpool (bt, right) )
2151 set->page = bt_page (bt, set->pool, right);
2155 set->latch = bt_pinlatch (bt, right);
2156 bt_lockpage(BtLockRead, set->latch);
2158 memcpy (bt->cursor, set->page, bt->mgr->page_size);
2160 bt_unlockpage(BtLockRead, set->latch);
2161 bt_unpinlatch (set->latch);
2162 bt_unpinpool (set->pool);
2170 BtKey bt_key(BtDb *bt, uint slot)
2172 return keyptr(bt->cursor, slot);
2175 uid bt_uid(BtDb *bt, uint slot)
2177 return bt_getid(slotptr(bt->cursor,slot)->id);
2180 uint bt_tod(BtDb *bt, uint slot)
2182 return slotptr(bt->cursor,slot)->tod;
2189 double getCpuTime(int type)
2192 FILETIME xittime[1];
2193 FILETIME systime[1];
2194 FILETIME usrtime[1];
2195 SYSTEMTIME timeconv[1];
2198 memset (timeconv, 0, sizeof(SYSTEMTIME));
2202 GetSystemTimeAsFileTime (xittime);
2203 FileTimeToSystemTime (xittime, timeconv);
2204 ans = (double)timeconv->wDayOfWeek * 3600 * 24;
2207 GetProcessTimes (GetCurrentProcess(), crtime, xittime, systime, usrtime);
2208 FileTimeToSystemTime (usrtime, timeconv);
2211 GetProcessTimes (GetCurrentProcess(), crtime, xittime, systime, usrtime);
2212 FileTimeToSystemTime (systime, timeconv);
2216 ans += (double)timeconv->wHour * 3600;
2217 ans += (double)timeconv->wMinute * 60;
2218 ans += (double)timeconv->wSecond;
2219 ans += (double)timeconv->wMilliseconds / 1000;
2224 #include <sys/resource.h>
2226 double getCpuTime(int type)
2228 struct rusage used[1];
2229 struct timeval tv[1];
2233 gettimeofday(tv, NULL);
2234 return (double)tv->tv_sec + (double)tv->tv_usec / 1000000;
2237 getrusage(RUSAGE_SELF, used);
2238 return (double)used->ru_utime.tv_sec + (double)used->ru_utime.tv_usec / 1000000;
2241 getrusage(RUSAGE_SELF, used);
2242 return (double)used->ru_stime.tv_sec + (double)used->ru_stime.tv_usec / 1000000;
2249 void bt_latchaudit (BtDb *bt)
2251 ushort idx, hashidx;
2257 for( idx = 1; idx <= bt->mgr->latchmgr->latchdeployed; idx++ ) {
2258 set->latch = bt->mgr->latchsets + idx;
2259 if( set->latch->pin ) {
2260 fprintf(stderr, "latchset %d pinned for page %.6x\n", idx, set->latch->page_no);
2261 set->latch->pin = 0;
2265 for( hashidx = 0; hashidx < bt->mgr->latchmgr->latchhash; hashidx++ ) {
2266 if( idx = bt->mgr->latchmgr->table[hashidx].slot ) do {
2267 set->latch = bt->mgr->latchsets + idx;
2268 if( set->latch->hash != hashidx )
2269 fprintf(stderr, "latchset %d wrong hashidx\n", idx);
2270 if( set->latch->pin )
2271 fprintf(stderr, "latchset %d pinned for page %.8x\n", idx, set->latch->page_no);
2272 } while( idx = set->latch->next );
2275 next = bt->mgr->latchmgr->nlatchpage + LATCH_page;
2276 page_no = LEAF_page;
2278 while( page_no < bt_getid(bt->mgr->latchmgr->alloc->right) ) {
2279 pread (bt->mgr->idx, bt->frame, bt->mgr->page_size, page_no << bt->mgr->page_bits);
2280 if( !bt->frame->free )
2281 for( idx = 0; idx++ < bt->frame->cnt - 1; ) {
2282 ptr = keyptr(bt->frame, idx+1);
2283 if( keycmp (keyptr(bt->frame, idx), ptr->key, ptr->len) >= 0 )
2284 fprintf(stderr, "page %.8x idx %.2x out of order\n", page_no, idx);
2287 if( page_no > LEAF_page )
2301 // standalone program to index file of keys
2302 // then list them onto std-out
2305 void *index_file (void *arg)
2307 uint __stdcall index_file (void *arg)
2310 int line = 0, found = 0, cnt = 0;
2311 uid next, page_no = LEAF_page; // start on first page of leaves
2312 unsigned char key[256];
2313 ThreadArg *args = arg;
2314 int ch, len = 0, slot;
2321 bt = bt_open (args->mgr);
2324 switch(args->type | 0x20)
2327 fprintf(stderr, "started latch mgr audit\n");
2329 fprintf(stderr, "finished latch mgr audit\n");
2333 fprintf(stderr, "started indexing for %s\n", args->infile);
2334 if( in = fopen (args->infile, "rb") )
2335 while( ch = getc(in), ch != EOF )
2340 if( args->num == 1 )
2341 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
2343 else if( args->num )
2344 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
2346 if( bt_insertkey (bt, key, len, 0, line, *tod) )
2347 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
2350 else if( len < 255 )
2352 fprintf(stderr, "finished %s for %d keys\n", args->infile, line);
2356 fprintf(stderr, "started deleting keys for %s\n", args->infile);
2357 if( in = fopen (args->infile, "rb") )
2358 while( ch = getc(in), ch != EOF )
2362 if( args->num == 1 )
2363 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
2365 else if( args->num )
2366 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
2368 if( bt_deletekey (bt, key, len, 0) )
2369 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
2372 else if( len < 255 )
2374 fprintf(stderr, "finished %s for keys, %d \n", args->infile, line);
2378 fprintf(stderr, "started finding keys for %s\n", args->infile);
2379 if( in = fopen (args->infile, "rb") )
2380 while( ch = getc(in), ch != EOF )
2384 if( args->num == 1 )
2385 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
2387 else if( args->num )
2388 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
2390 if( bt_findkey (bt, key, len) )
2393 fprintf(stderr, "Error %d Syserr %d Line: %d\n", bt->err, errno, line), exit(0);
2396 else if( len < 255 )
2398 fprintf(stderr, "finished %s for %d keys, found %d\n", args->infile, line, found);
2402 fprintf(stderr, "started scanning\n");
2404 if( set->pool = bt_pinpool (bt, page_no) )
2405 set->page = bt_page (bt, set->pool, page_no);
2408 set->latch = bt_pinlatch (bt, page_no);
2409 bt_lockpage (BtLockRead, set->latch);
2410 next = bt_getid (set->page->right);
2411 cnt += set->page->act;
2413 for( slot = 0; slot++ < set->page->cnt; )
2414 if( next || slot < set->page->cnt )
2415 if( !slotptr(set->page, slot)->dead ) {
2416 ptr = keyptr(set->page, slot);
2417 fwrite (ptr->key, ptr->len, 1, stdout);
2418 fputc ('\n', stdout);
2421 bt_unlockpage (BtLockRead, set->latch);
2422 bt_unpinlatch (set->latch);
2423 bt_unpinpool (set->pool);
2424 } while( page_no = next );
2426 cnt--; // remove stopper key
2427 fprintf(stderr, " Total keys read %d\n", cnt);
2431 fprintf(stderr, "started counting\n");
2432 next = bt->mgr->latchmgr->nlatchpage + LATCH_page;
2433 page_no = LEAF_page;
2435 while( page_no < bt_getid(bt->mgr->latchmgr->alloc->right) ) {
2436 uid off = page_no << bt->mgr->page_bits;
2438 pread (bt->mgr->idx, bt->frame, bt->mgr->page_size, off);
2442 SetFilePointer (bt->mgr->idx, (long)off, (long*)(&off)+1, FILE_BEGIN);
2444 if( !ReadFile(bt->mgr->idx, bt->frame, bt->mgr->page_size, amt, NULL))
2445 return bt->err = BTERR_map;
2447 if( *amt < bt->mgr->page_size )
2448 return bt->err = BTERR_map;
2450 if( !bt->frame->free && !bt->frame->lvl )
2451 cnt += bt->frame->act;
2452 if( page_no > LEAF_page )
2457 cnt--; // remove stopper key
2458 fprintf(stderr, " Total keys read %d\n", cnt);
2470 typedef struct timeval timer;
2472 int main (int argc, char **argv)
2474 int idx, cnt, len, slot, err;
2475 int segsize, bits = 16;
2492 fprintf (stderr, "Usage: %s idx_file Read/Write/Scan/Delete/Find [page_bits mapped_segments seg_bits line_numbers src_file1 src_file2 ... ]\n", argv[0]);
2493 fprintf (stderr, " where page_bits is the page size in bits\n");
2494 fprintf (stderr, " mapped_segments is the number of mmap segments in buffer pool\n");
2495 fprintf (stderr, " seg_bits is the size of individual segments in buffer pool in pages in bits\n");
2496 fprintf (stderr, " line_numbers = 1 to append line numbers to keys\n");
2497 fprintf (stderr, " src_file1 thru src_filen are files of keys separated by newline\n");
2501 start = getCpuTime(0);
2504 bits = atoi(argv[3]);
2507 poolsize = atoi(argv[4]);
2510 fprintf (stderr, "Warning: no mapped_pool\n");
2512 if( poolsize > 65535 )
2513 fprintf (stderr, "Warning: mapped_pool > 65535 segments\n");
2516 segsize = atoi(argv[5]);
2518 segsize = 4; // 16 pages per mmap segment
2521 num = atoi(argv[6]);
2525 threads = malloc (cnt * sizeof(pthread_t));
2527 threads = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, cnt * sizeof(HANDLE));
2529 args = malloc (cnt * sizeof(ThreadArg));
2531 mgr = bt_mgr ((argv[1]), BT_rw, bits, poolsize, segsize, poolsize / 8);
2534 fprintf(stderr, "Index Open Error %s\n", argv[1]);
2540 for( idx = 0; idx < cnt; idx++ ) {
2541 args[idx].infile = argv[idx + 7];
2542 args[idx].type = argv[2][0];
2543 args[idx].mgr = mgr;
2544 args[idx].num = num;
2545 args[idx].idx = idx;
2547 if( err = pthread_create (threads + idx, NULL, index_file, args + idx) )
2548 fprintf(stderr, "Error creating thread %d\n", err);
2550 threads[idx] = (HANDLE)_beginthreadex(NULL, 65536, index_file, args + idx, 0, NULL);
2554 // wait for termination
2557 for( idx = 0; idx < cnt; idx++ )
2558 pthread_join (threads[idx], NULL);
2560 WaitForMultipleObjects (cnt, threads, TRUE, INFINITE);
2562 for( idx = 0; idx < cnt; idx++ )
2563 CloseHandle(threads[idx]);
2566 elapsed = getCpuTime(0) - start;
2567 fprintf(stderr, " real %dm%.3fs\n", (int)(elapsed/60), elapsed - (int)(elapsed/60)*60);
2568 elapsed = getCpuTime(1);
2569 fprintf(stderr, " user %dm%.3fs\n", (int)(elapsed/60), elapsed - (int)(elapsed/60)*60);
2570 elapsed = getCpuTime(2);
2571 fprintf(stderr, " sys %dm%.3fs\n", (int)(elapsed/60), elapsed - (int)(elapsed/60)*60);