1 // foster btree version d
4 // author: karl malbrain, malbrain@cal.berkeley.edu
7 This work, including the source code, documentation
8 and related data, is placed into the public domain.
10 The orginal author is Karl Malbrain.
12 THIS SOFTWARE IS PROVIDED AS-IS WITHOUT WARRANTY
13 OF ANY KIND, NOT EVEN THE IMPLIED WARRANTY OF
14 MERCHANTABILITY. THE AUTHOR OF THIS SOFTWARE,
15 ASSUMES _NO_ RESPONSIBILITY FOR ANY CONSEQUENCE
16 RESULTING FROM THE USE, MODIFICATION, OR
17 REDISTRIBUTION OF THIS SOFTWARE.
20 // Please see the project home page for documentation
21 // code.google.com/p/high-concurrency-btree
23 #define _FILE_OFFSET_BITS 64
24 #define _LARGEFILE64_SOURCE
40 #define WIN32_LEAN_AND_MEAN
53 typedef unsigned long long uid;
56 typedef unsigned long long off64_t;
57 typedef unsigned short ushort;
58 typedef unsigned int uint;
61 #define BT_ro 0x6f72 // ro
62 #define BT_rw 0x7772 // rw
64 #define BT_maxbits 24 // maximum page size in bits
65 #define BT_minbits 9 // minimum page size in bits
66 #define BT_minpage (1 << BT_minbits) // minimum page size
67 #define BT_maxpage (1 << BT_maxbits) // maximum page size
70 There are five lock types for each node in three independent sets:
71 1. (set 1) AccessIntent: Sharable. Going to Read the node. Incompatible with NodeDelete.
72 2. (set 1) NodeDelete: Exclusive. About to release the node. Incompatible with AccessIntent.
73 3. (set 2) ReadLock: Sharable. Read the node. Incompatible with WriteLock.
74 4. (set 2) WriteLock: Exclusive. Modify the node. Incompatible with ReadLock and other WriteLocks.
75 5. (set 3) ParentLock: Exclusive. Have parent adopt/delete maximum foster child from the node.
86 // Define the length of the page and key pointers
90 // Page key slot definition.
92 // If BT_maxbits is 15 or less, you can save 4 bytes
93 // for each key stored by making the first two uints
94 // into ushorts. You can also save 4 bytes by removing
95 // the tod field from the key.
97 // Keys are marked dead, but remain on the page until
98 // it cleanup is called. The fence key (highest key) for
99 // the page is always present, even after cleanup.
102 uint off:BT_maxbits; // page offset for key start
103 uint dead:1; // set for deleted key
104 uint tod; // time-stamp for key
105 unsigned char id[BtId]; // id associated with key
108 // The key structure occupies space at the upper end of
109 // each page. It's a length byte followed by the value
114 unsigned char key[1];
117 // The first part of an index page.
118 // It is immediately followed
119 // by the BtSlot array of keys.
121 typedef struct Page {
122 uint cnt; // count of keys in page
123 uint act; // count of active keys
124 uint min; // next key offset
125 uint foster; // count of foster children
126 unsigned char bits; // page size in bits
127 unsigned char lvl:6; // level of page
128 unsigned char kill:1; // page is being deleted
129 unsigned char dirty:1; // page needs to be cleaned
130 unsigned char right[BtId]; // page number to right
133 // latch table lock structure
135 // exclusive is set for write access
136 // share is count of read accessors
137 // pending is count of waiting writers
138 // grant write lock when share == 0
142 volatile uint exclusive:1;
143 volatile uint pending:15;
144 volatile uint share:16;
145 pthread_mutex_t mut[1];
146 pthread_cond_t cond[1];
153 BtLatch readwr[1]; // read/write page lock
154 BtLatch access[1]; // Access Intent/Page delete
155 BtLatch parent[1]; // adoption of foster children
158 // The memory mapping pool table buffer manager entry
161 unsigned long long int lru; // number of times accessed
162 uid basepage; // mapped base page number
163 char *map; // mapped memory pointer
164 uint pin; // mapped page pin counter
165 uint slot; // slot index in this array
166 void *hashprev; // previous pool entry for the same hash idx
167 void *hashnext; // next pool entry for the same hash idx
171 // array of page latch sets, one for each page in map segment
172 BtLatchSet pagelatch[0];
175 // The object structure for Btree access
178 uint page_size; // page size
179 uint page_bits; // page size in bits
180 uint seg_bits; // seg size in pages in bits
181 uint mode; // read-write mode
184 char *pooladvise; // bit maps for pool page advisements
188 uint poolcnt; // highest page pool node in use
189 uint poolmax; // highest page pool node allocated
190 uint poolmask; // total size of pages in mmap segment - 1
191 uint hashsize; // size of Hash Table for pool entries
192 volatile uint evicted; // last evicted hash table slot
193 ushort *hash; // hash table of pool entries
194 BtLatch *latch; // latches for hash table slots
195 char *nodes; // memory pool page segments
199 BtMgr *mgr; // buffer manager for thread
200 BtPage temp; // temporary frame buffer (memory mapped/file IO)
201 BtPage alloc; // frame buffer for alloc page ( page 0 )
202 BtPage cursor; // cached frame for start/next (never mapped)
203 BtPage frame; // spare frame for the page split (never mapped)
204 BtPage zero; // page frame for zeroes at end of file
205 BtPage page; // current page
206 uid page_no; // current page number
207 uid cursor_page; // current cursor page number
208 unsigned char *mem; // frame, cursor, page memory buffer
209 int err; // last error
223 extern void bt_close (BtDb *bt);
224 extern BtDb *bt_open (BtMgr *mgr);
225 extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uid id, uint tod);
226 extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl);
227 extern uid bt_findkey (BtDb *bt, unsigned char *key, uint len);
228 extern uint bt_startkey (BtDb *bt, unsigned char *key, uint len);
229 extern uint bt_nextkey (BtDb *bt, uint slot);
232 extern BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolsize, uint segsize, uint hashsize);
233 void bt_mgrclose (BtMgr *mgr);
235 // Helper functions to return cursor slot values
237 extern BtKey bt_key (BtDb *bt, uint slot);
238 extern uid bt_uid (BtDb *bt, uint slot);
239 extern uint bt_tod (BtDb *bt, uint slot);
241 // BTree page number constants
246 // Number of levels to create in a new BTree
250 // The page is allocated from low and hi ends.
251 // The key offsets and row-id's are allocated
252 // from the bottom, while the text of the key
253 // is allocated from the top. When the two
254 // areas meet, the page is split into two.
256 // A key consists of a length byte, two bytes of
257 // index number (0 - 65534), and up to 253 bytes
258 // of key value. Duplicate keys are discarded.
259 // Associated with each key is a 48 bit row-id.
261 // The b-tree root is always located at page 1.
262 // The first leaf page of level zero is always
263 // located on page 2.
265 // When to root page fills, it is split in two and
266 // the tree height is raised by a new root at page
267 // one with two keys.
269 // Deleted keys are marked with a dead bit until
270 // page cleanup The fence key for a node is always
271 // present, even after deletion and cleanup.
273 // Groups of pages called segments from the btree are
274 // cached with memory mapping. A hash table is used to keep
275 // track of the cached segments. This behaviour is controlled
276 // by the cache block size parameter to bt_open.
278 // To achieve maximum concurrency one page is locked at a time
279 // as the tree is traversed to find leaf key in question.
281 // An adoption traversal leaves the parent node locked as the
282 // tree is traversed to the level in quesiton.
284 // Page 0 is dedicated to lock for new page extensions,
285 // and chains empty pages together for reuse.
287 // Empty pages are chained together through the ALLOC page and reused.
289 // Access macros to address slot and key values from the page
291 #define slotptr(page, slot) (((BtSlot *)(page+1)) + (slot-1))
292 #define keyptr(page, slot) ((BtKey)((unsigned char*)(page) + slotptr(page, slot)->off))
294 void bt_putid(unsigned char *dest, uid id)
299 dest[i] = (unsigned char)id, id >>= 8;
302 uid bt_getid(unsigned char *src)
307 for( i = 0; i < BtId; i++ )
308 id <<= 8, id |= *src++;
313 void bt_mgrclose (BtMgr *mgr)
318 // release mapped pages
319 // note that slot zero is never used
321 for( slot = 1; slot < mgr->poolmax; slot++ ) {
322 pool = (BtPool *)(mgr->nodes + slot * (sizeof(BtPool) + (mgr->poolmask + 1) * sizeof(BtLatchSet)));
325 munmap (pool->map, (mgr->poolmask+1) << mgr->page_bits);
328 FlushViewOfFile(pool->map, 0);
329 UnmapViewOfFile(pool->map);
330 CloseHandle(pool->hmap);
340 free (mgr->pooladvise);
343 FlushFileBuffers(mgr->idx);
344 CloseHandle(mgr->idx);
345 GlobalFree (mgr->nodes);
346 GlobalFree (mgr->hash);
347 GlobalFree (mgr->latch);
352 // close and release memory
354 void bt_close (BtDb *bt)
361 VirtualFree (bt->mem, 0, MEM_RELEASE);
366 // open/create new btree buffer manager
368 // call with file_name, BT_openmode, bits in page size (e.g. 16),
369 // size of mapped page pool (e.g. 8192)
371 BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolmax, uint segsize, uint hashsize)
373 uint lvl, attr, cacheblk, last, slot, idx;
382 SYSTEM_INFO sysinfo[1];
385 // determine sanity of page size and buffer pool
387 if( bits > BT_maxbits )
389 else if( bits < BT_minbits )
393 return NULL; // must have buffer pool
396 mgr = calloc (1, sizeof(BtMgr));
398 switch (mode & 0x7fff)
401 mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666);
407 mgr->idx = open ((char*)name, O_RDONLY);
412 return free(mgr), NULL;
414 cacheblk = 4096; // minimum mmap segment size for unix
417 mgr = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, sizeof(BtMgr));
418 attr = FILE_ATTRIBUTE_NORMAL;
419 switch (mode & 0x7fff)
422 mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL);
428 mgr->idx = CreateFile(name, GENERIC_READ, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_EXISTING, attr, NULL);
432 if( mgr->idx == INVALID_HANDLE_VALUE )
433 return GlobalFree(mgr), NULL;
435 // normalize cacheblk to multiple of sysinfo->dwAllocationGranularity
436 GetSystemInfo(sysinfo);
437 cacheblk = sysinfo->dwAllocationGranularity;
441 alloc = malloc (BT_maxpage);
444 // read minimum page size to get root info
446 if( size = lseek (mgr->idx, 0L, 2) ) {
447 if( pread(mgr->idx, alloc, BT_minpage, 0) == BT_minpage )
450 return free(mgr), free(alloc), NULL;
451 } else if( mode == BT_ro )
452 return bt_mgrclose (mgr), NULL;
454 alloc = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE);
455 size = GetFileSize(mgr->idx, amt);
458 if( !ReadFile(mgr->idx, (char *)alloc, BT_minpage, amt, NULL) )
459 return bt_mgrclose (mgr), NULL;
461 } else if( mode == BT_ro )
462 return bt_mgrclose (mgr), NULL;
465 mgr->page_size = 1 << bits;
466 mgr->page_bits = bits;
468 mgr->poolmax = poolmax;
471 if( cacheblk < mgr->page_size )
472 cacheblk = mgr->page_size;
474 // mask for partial memmaps
476 mgr->poolmask = (cacheblk >> bits) - 1;
478 // see if requested size of pages per memmap is greater
480 if( (1 << segsize) > mgr->poolmask )
481 mgr->poolmask = (1 << segsize) - 1;
485 while( (1 << mgr->seg_bits) <= mgr->poolmask )
488 mgr->hashsize = hashsize;
491 mgr->nodes = calloc (poolmax, (sizeof(BtPool) + (mgr->poolmask + 1) * sizeof(BtLatchSet)));
492 mgr->hash = calloc (hashsize, sizeof(ushort));
493 mgr->latch = calloc (hashsize, sizeof(BtLatch));
494 mgr->pooladvise = calloc (poolmax, (mgr->poolmask + 1) / 8);
496 mgr->nodes = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, poolmax * (sizeof(BtPool) + (mgr->poolmask + 1) * sizeof(BtLatchSet)));
497 mgr->hash = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(ushort));
498 mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtLatch));
501 // initialize buffer pool page latches
503 for( slot = 1; slot < poolmax; slot++ ) {
504 BtLatchSet *latchset = (BtLatchSet *)(mgr->nodes + slot * (sizeof(BtPool) + (mgr->poolmask + 1) * sizeof(BtLatchSet)));
505 for( idx = 0; idx < mgr->poolmask + 1; idx++ ) {
507 pthread_mutex_init (latchset[idx].readwr->mut, NULL);
508 pthread_cond_init (latchset[idx].readwr->cond, NULL);
509 pthread_mutex_init (latchset[idx].access->mut, NULL);
510 pthread_cond_init (latchset[idx].access->cond, NULL);
511 pthread_mutex_init (latchset[idx].parent->mut, NULL);
512 pthread_cond_init (latchset[idx].parent->cond, NULL);
514 InitializeSRWLock (latchset[idx].readwr->srw);
515 InitializeSRWLock (latchset[idx].access->srw);
516 InitializeSRWLock (latchset[idx].parent->srw);
521 // initialize buffer pool mgr latches
523 for( slot = 0; slot < hashsize; slot++ ) {
525 pthread_mutex_init (mgr->latch[slot].mut, NULL);
526 pthread_cond_init (mgr->latch[slot].cond, NULL);
528 InitializeSRWLock (mgr->latch[slot].srw);
535 // initializes an empty b-tree with root page and page of leaves
537 memset (alloc, 0, 1 << bits);
538 bt_putid(alloc->right, MIN_lvl+1);
539 alloc->bits = mgr->page_bits;
542 if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size )
543 return bt_mgrclose (mgr), NULL;
545 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
546 return bt_mgrclose (mgr), NULL;
548 if( *amt < mgr->page_size )
549 return bt_mgrclose (mgr), NULL;
552 memset (alloc, 0, 1 << bits);
553 alloc->bits = mgr->page_bits;
555 for( lvl=MIN_lvl; lvl--; ) {
556 slotptr(alloc, 1)->off = mgr->page_size - 3;
557 bt_putid(slotptr(alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number
558 key = keyptr(alloc, 1);
559 key->len = 2; // create stopper key
562 alloc->min = mgr->page_size - 3;
567 if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size )
568 return bt_mgrclose (mgr), NULL;
570 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
571 return bt_mgrclose (mgr), NULL;
573 if( *amt < mgr->page_size )
574 return bt_mgrclose (mgr), NULL;
578 // create empty page area by writing last page of first
579 // segment area (other pages are zeroed by O/S)
581 if( mgr->poolmask ) {
582 memset(alloc, 0, mgr->page_size);
583 last = mgr->poolmask;
585 while( last < MIN_lvl + 1 )
586 last += mgr->poolmask + 1;
589 pwrite(mgr->idx, alloc, mgr->page_size, last << mgr->page_bits);
591 SetFilePointer (mgr->idx, last << mgr->page_bits, NULL, FILE_BEGIN);
592 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
593 return bt_mgrclose (mgr), NULL;
594 if( *amt < mgr->page_size )
595 return bt_mgrclose (mgr), NULL;
603 VirtualFree (alloc, 0, MEM_RELEASE);
608 // open BTree access method
609 // based on buffer manager
611 BtDb *bt_open (BtMgr *mgr)
613 BtDb *bt = malloc (sizeof(*bt));
615 memset (bt, 0, sizeof(*bt));
618 bt->mem = malloc (3 *mgr->page_size);
620 bt->mem = VirtualAlloc(NULL, 3 * mgr->page_size, MEM_COMMIT, PAGE_READWRITE);
622 bt->frame = (BtPage)bt->mem;
623 bt->zero = (BtPage)(bt->mem + 1 * mgr->page_size);
624 bt->cursor = (BtPage)(bt->mem + 2 * mgr->page_size);
628 // compare two keys, returning > 0, = 0, or < 0
629 // as the comparison value
631 int keycmp (BtKey key1, unsigned char *key2, uint len2)
633 uint len1 = key1->len;
636 if( ans = memcmp (key1->key, key2, len1 > len2 ? len2 : len1) )
649 // wait if exclusive request is pending, or granted
650 // and add 1 to the share count
652 void bt_readlock(BtLatch *latch)
655 pthread_mutex_lock (latch->mut);
657 while( latch->pending || latch->exclusive )
658 pthread_cond_wait (latch->cond, latch->mut);
660 // add one to readers counter
663 pthread_mutex_unlock (latch->mut);
665 AcquireSRWLockShared (latch->srw);
669 // wait for other read and write latches to relinquish
671 void bt_writelock(BtLatch *latch)
674 pthread_mutex_lock (latch->mut);
677 while( latch->share || latch->exclusive )
678 pthread_cond_wait (latch->cond, latch->mut);
680 latch->exclusive = 1;
682 pthread_mutex_unlock (latch->mut);
684 AcquireSRWLockExclusive (latch->srw);
688 // try to obtain write lock
690 // return 1 if obtained,
691 // 0 if already write or read locked
693 int bt_writetry(BtLatch *latch)
698 pthread_mutex_lock (latch->mut);
700 if( !latch->share && !latch->exclusive )
701 result = latch->exclusive = 1;
703 pthread_mutex_unlock (latch->mut);
705 result = TryAcquireSRWLockExclusive (latch->srw);
712 void bt_releasewrite(BtLatch *latch)
715 pthread_mutex_lock (latch->mut);
716 latch->exclusive = 0;
717 pthread_cond_broadcast (latch->cond);
718 pthread_mutex_unlock (latch->mut);
720 ReleaseSRWLockExclusive (latch->srw);
724 // decrement reader count
726 void bt_releaseread(BtLatch *latch)
729 pthread_mutex_lock (latch->mut);
731 if( !--latch->share && latch->pending )
732 pthread_cond_broadcast (latch->cond);
734 pthread_mutex_unlock (latch->mut);
736 ReleaseSRWLockShared (latch->srw);
742 // find segment in pool
743 // must be called with hashslot idx locked
744 // return NULL if not there
745 // otherwise return node
747 BtPool *bt_findpool(BtDb *bt, uid page_no, uint idx)
752 // compute start of hash chain in pool
754 if( slot = bt->mgr->hash[idx] )
755 pool = (BtPool *)(bt->mgr->nodes + slot * (sizeof(BtPool) + (bt->mgr->poolmask + 1) * sizeof(BtLatchSet)));
759 page_no &= ~bt->mgr->poolmask;
761 while( pool->basepage != page_no )
762 if( pool = pool->hashnext )
770 // add segment to hash table
772 void bt_linkhash(BtDb *bt, BtPool *pool, uid page_no, int idx)
777 pool->hashprev = pool->hashnext = NULL;
778 pool->basepage = page_no & ~bt->mgr->poolmask;
781 if( slot = bt->mgr->hash[idx] ) {
782 node = (BtPool *)(bt->mgr->nodes + slot * (sizeof(BtPool) + (bt->mgr->poolmask + 1) * sizeof(BtLatchSet)));
783 pool->hashnext = node;
784 node->hashprev = pool;
787 bt->mgr->hash[idx] = pool->slot;
790 // find best segment to evict from buffer pool
792 BtPool *bt_findlru (BtDb *bt, uint hashslot)
794 unsigned long long int target = ~0LL;
795 BtPool *pool = NULL, *node;
800 node = (BtPool *)(bt->mgr->nodes + hashslot * (sizeof(BtPool) + (bt->mgr->poolmask + 1) * sizeof(BtLatchSet)));
802 // scan pool entries under hash table slot
807 if( node->lru > target )
811 } while( node = node->hashnext );
816 // map new buffer pool segment to virtual memory
818 BTERR bt_mapsegment(BtDb *bt, BtPool *pool, uid page_no)
820 off64_t off = (page_no & ~bt->mgr->poolmask) << bt->mgr->page_bits;
821 off64_t limit = off + ((bt->mgr->poolmask+1) << bt->mgr->page_bits);
825 flag = PROT_READ | ( bt->mgr->mode == BT_ro ? 0 : PROT_WRITE );
826 pool->map = mmap (0, (bt->mgr->poolmask+1) << bt->mgr->page_bits, flag, MAP_SHARED, bt->mgr->idx, off);
827 if( pool->map == MAP_FAILED )
828 return bt->err = BTERR_map;
829 // clear out madvise issued bits
830 memset (bt->mgr->pooladvise + pool->slot * (bt->mgr->poolmask + 1) / 8, 0, (bt->mgr->poolmask + 1)/8);
832 flag = ( bt->mgr->mode == BT_ro ? PAGE_READONLY : PAGE_READWRITE );
833 pool->hmap = CreateFileMapping(bt->mgr->idx, NULL, flag, (DWORD)(limit >> 32), (DWORD)limit, NULL);
835 return bt->err = BTERR_map;
837 flag = ( bt->mgr->mode == BT_ro ? FILE_MAP_READ : FILE_MAP_WRITE );
838 pool->map = MapViewOfFile(pool->hmap, flag, (DWORD)(off >> 32), (DWORD)off, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
840 return bt->err = BTERR_map;
845 // find or place requested page in segment-pool
846 // return pool table entry, incrementing pin
848 BtPool *bt_pinpage(BtDb *bt, uid page_no)
850 BtPool *pool, *node, *next;
851 uint slot, idx, victim;
854 // lock hash table chain
856 idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
857 bt_readlock (&bt->mgr->latch[idx]);
859 // look up in hash table
861 if( pool = bt_findpool(bt, page_no, idx) ) {
863 __sync_fetch_and_add(&pool->pin, 1);
865 _InterlockedIncrement (&pool->pin);
867 bt_releaseread (&bt->mgr->latch[idx]);
872 // upgrade to write lock
874 bt_releaseread (&bt->mgr->latch[idx]);
875 bt_writelock (&bt->mgr->latch[idx]);
877 // try to find page in pool with write lock
879 if( pool = bt_findpool(bt, page_no, idx) ) {
881 __sync_fetch_and_add(&pool->pin, 1);
883 _InterlockedIncrement (&pool->pin);
885 bt_releasewrite (&bt->mgr->latch[idx]);
890 // allocate a new pool node
891 // and add to hash table
894 slot = __sync_fetch_and_add(&bt->mgr->poolcnt, 1);
896 slot = _InterlockedIncrement (&bt->mgr->poolcnt) - 1;
899 if( ++slot < bt->mgr->poolmax ) {
900 pool = (BtPool *)(bt->mgr->nodes + slot * (sizeof(BtPool) + (bt->mgr->poolmask + 1) * sizeof(BtLatchSet)));
903 if( bt_mapsegment(bt, pool, page_no) )
906 bt_linkhash(bt, pool, page_no, idx);
908 __sync_fetch_and_add(&pool->pin, 1);
910 _InterlockedIncrement (&pool->pin);
912 bt_releasewrite (&bt->mgr->latch[idx]);
916 // pool table is full
917 // find best pool entry to evict
920 __sync_fetch_and_add(&bt->mgr->poolcnt, -1);
922 _InterlockedDecrement (&bt->mgr->poolcnt);
927 victim = __sync_fetch_and_add(&bt->mgr->evicted, 1);
929 victim = _InterlockedIncrement (&bt->mgr->evicted) - 1;
931 victim %= bt->mgr->hashsize;
933 // try to get write lock
934 // skip entry if not obtained
936 if( !bt_writetry (&bt->mgr->latch[victim]) )
939 // if cache entry is empty
940 // or no slots are unpinned
943 if( !(pool = bt_findlru(bt, bt->mgr->hash[victim])) ) {
944 bt_releasewrite (&bt->mgr->latch[victim]);
948 // unlink victim pool node from hash table
950 if( node = pool->hashprev )
951 node->hashnext = pool->hashnext;
952 else if( node = pool->hashnext )
953 bt->mgr->hash[victim] = node->slot;
955 bt->mgr->hash[victim] = 0;
957 if( node = pool->hashnext )
958 node->hashprev = pool->hashprev;
960 bt_releasewrite (&bt->mgr->latch[victim]);
962 // remove old file mapping
964 munmap (pool->map, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
966 FlushViewOfFile(pool->map, 0);
967 UnmapViewOfFile(pool->map);
968 CloseHandle(pool->hmap);
972 // create new pool mapping
973 // and link into hash table
975 if( bt_mapsegment(bt, pool, page_no) )
978 bt_linkhash(bt, pool, page_no, idx);
980 __sync_fetch_and_add(&pool->pin, 1);
982 _InterlockedIncrement (&pool->pin);
984 bt_releasewrite (&bt->mgr->latch[idx]);
989 // place write, read, or parent lock on requested page_no.
990 // pin to buffer pool and return page pointer
992 BTERR bt_lockpage(BtDb *bt, uid page_no, BtLock mode, BtPage *pageptr)
999 // find/create maping in pool table
1000 // and pin our pool slot
1002 if( pool = bt_pinpage(bt, page_no) )
1003 subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping
1007 set = pool->pagelatch + subpage;
1008 page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits));
1011 uint idx = subpage / 8;
1012 uint bit = subpage % 8;
1014 if( !((bt->mgr->pooladvise + pool->slot * (bt->mgr->poolmask + 1)/8)[idx] >> bit) & 1 ) {
1015 madvise (page, bt->mgr->page_size, MADV_WILLNEED);
1016 (bt->mgr->pooladvise + pool->slot * (bt->mgr->poolmask + 1)/8)[idx] |= 1 << bit;
1023 bt_readlock (set->readwr);
1026 bt_writelock (set->readwr);
1029 bt_readlock (set->access);
1032 bt_writelock (set->access);
1035 bt_writelock (set->parent);
1038 return bt->err = BTERR_lock;
1047 // remove write, read, or parent lock on requested page_no.
1049 BTERR bt_unlockpage(BtDb *bt, uid page_no, BtLock mode)
1055 // since page is pinned
1056 // it should still be in the buffer pool
1057 // and is in no danger of being a victim for reuse
1059 idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
1060 bt_readlock (&bt->mgr->latch[idx]);
1062 if( pool = bt_findpool(bt, page_no, idx) )
1063 subpage = (uint)(page_no & bt->mgr->poolmask);
1065 return bt->err = BTERR_hash;
1067 bt_releaseread (&bt->mgr->latch[idx]);
1068 set = pool->pagelatch + subpage;
1072 bt_releaseread (set->readwr);
1075 bt_releasewrite (set->readwr);
1078 bt_releaseread (set->access);
1081 bt_releasewrite (set->access);
1084 bt_releasewrite (set->parent);
1087 return bt->err = BTERR_lock;
1091 __sync_fetch_and_add(&pool->pin, -1);
1093 _InterlockedDecrement (&pool->pin);
1098 // deallocate a deleted page
1099 // place on free chain out of allocator page
1101 BTERR bt_freepage(BtDb *bt, uid page_no)
1103 // obtain delete lock on deleted page
1105 if( bt_lockpage(bt, page_no, BtLockDelete, NULL) )
1108 // obtain write lock on deleted page
1110 if( bt_lockpage(bt, page_no, BtLockWrite, &bt->temp) )
1113 // lock allocation page
1115 if ( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) )
1118 // store chain in second right
1119 bt_putid(bt->temp->right, bt_getid(bt->alloc[1].right));
1120 bt_putid(bt->alloc[1].right, page_no);
1124 if( bt_unlockpage(bt, ALLOC_page, BtLockWrite) )
1127 // remove write lock on deleted node
1129 if( bt_unlockpage(bt, page_no, BtLockWrite) )
1132 // remove delete lock on deleted node
1134 if( bt_unlockpage(bt, page_no, BtLockDelete) )
1140 // allocate a new page and write page into it
1142 uid bt_newpage(BtDb *bt, BtPage page)
1150 if ( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) )
1153 // use empty chain first
1154 // else allocate empty page
1156 if( new_page = bt_getid(bt->alloc[1].right) ) {
1157 if( bt_lockpage (bt, new_page, BtLockWrite, &bt->temp) )
1159 bt_putid(bt->alloc[1].right, bt_getid(bt->temp->right));
1160 if( bt_unlockpage (bt, new_page, BtLockWrite) )
1164 new_page = bt_getid(bt->alloc->right);
1165 bt_putid(bt->alloc->right, new_page+1);
1169 if ( pwrite(bt->mgr->idx, page, bt->mgr->page_size, new_page << bt->mgr->page_bits) < bt->mgr->page_size )
1170 return bt->err = BTERR_wrt, 0;
1172 // if writing first page of pool block, zero last page in the block
1174 if ( !reuse && bt->mgr->poolmask > 0 && (new_page & bt->mgr->poolmask) == 0 )
1176 // use zero buffer to write zeros
1177 memset(bt->zero, 0, bt->mgr->page_size);
1178 if ( pwrite(bt->mgr->idx,bt->zero, bt->mgr->page_size, (new_page | bt->mgr->poolmask) << bt->mgr->page_bits) < bt->mgr->page_size )
1179 return bt->err = BTERR_wrt, 0;
1182 // bring new page into pool and copy page.
1183 // this will extend the file into the new pages.
1185 if( bt_lockpage(bt, new_page, BtLockWrite, &pmap) )
1188 memcpy(pmap, page, bt->mgr->page_size);
1190 if( bt_unlockpage (bt, new_page, BtLockWrite) )
1195 if ( bt_unlockpage(bt, ALLOC_page, BtLockWrite) )
1201 // find slot in page for given key at a given level
1203 int bt_findslot (BtDb *bt, unsigned char *key, uint len)
1205 uint diff, higher = bt->page->cnt, low = 1, slot;
1207 // low is the lowest candidate, higher is already
1208 // tested as .ge. the given key, loop ends when they meet
1210 while( diff = higher - low ) {
1211 slot = low + ( diff >> 1 );
1212 if( keycmp (keyptr(bt->page, slot), key, len) < 0 )
1221 // find and load page at given level for given key
1222 // leave page rd or wr locked as requested
1224 int bt_loadpage (BtDb *bt, unsigned char *key, uint len, uint lvl, uint lock)
1226 uid page_no = ROOT_page, prevpage = 0;
1227 uint drill = 0xff, slot;
1228 uint mode, prevmode;
1230 // start at root of btree and drill down
1233 // determine lock mode of drill level
1234 mode = (lock == BtLockWrite) && (drill == lvl) ? BtLockWrite : BtLockRead;
1236 bt->page_no = page_no;
1238 // obtain access lock using lock chaining with Access mode
1240 if( page_no > ROOT_page )
1241 if( bt_lockpage(bt, page_no, BtLockAccess, NULL) )
1245 if( bt_unlockpage(bt, prevpage, prevmode) )
1248 // obtain read lock using lock chaining
1249 // and pin page contents
1251 if( bt_lockpage(bt, page_no, mode, &bt->page) )
1254 if( page_no > ROOT_page )
1255 if( bt_unlockpage(bt, page_no, BtLockAccess) )
1258 // re-read and re-lock root after determining actual level of root
1260 if( bt->page_no == ROOT_page )
1261 if( bt->page->lvl != drill) {
1262 drill = bt->page->lvl;
1264 if( lock == BtLockWrite && drill == lvl )
1265 if( bt_unlockpage(bt, page_no, mode) )
1271 // if page is being deleted,
1272 // move back to preceeding page
1274 if( bt->page->kill ) {
1275 page_no = bt_getid (bt->page->right);
1279 // find key on page at this level
1280 // and descend to requested level
1282 slot = bt_findslot (bt, key, len);
1284 // is this slot a foster child?
1286 if( slot <= bt->page->cnt - bt->page->foster )
1292 while( slotptr(bt->page, slot)->dead )
1293 if( slot++ < bt->page->cnt )
1296 return bt->err = BTERR_struct, 0;
1298 // continue down / right using overlapping locks
1299 // to protect pages being killed or split.
1302 prevpage = bt->page_no;
1303 page_no = bt_getid(slotptr(bt->page, slot)->id);
1306 // return error on end of chain
1308 bt->err = BTERR_struct;
1309 return 0; // return error
1312 // find and delete key on page by marking delete flag bit
1313 // when page becomes empty, delete it from the btree
1315 BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl)
1317 unsigned char leftkey[256], rightkey[256];
1322 if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) )
1323 ptr = keyptr(bt->page, slot);
1327 // if key is found delete it, otherwise ignore request
1329 if( !keycmp (ptr, key, len) )
1330 if( slotptr(bt->page, slot)->dead == 0 ) {
1331 slotptr(bt->page,slot)->dead = 1;
1332 if( slot < bt->page->cnt )
1333 bt->page->dirty = 1;
1337 // return if page is not empty, or it has no right sibling
1339 right = bt_getid(bt->page->right);
1340 page_no = bt->page_no;
1342 if( !right || bt->page->act )
1343 return bt_unlockpage(bt, page_no, BtLockWrite);
1345 // obtain Parent lock over write lock
1347 if( bt_lockpage(bt, page_no, BtLockParent, NULL) )
1350 // cache copy of key to delete
1352 ptr = keyptr(bt->page, bt->page->cnt);
1353 memcpy(leftkey, ptr, ptr->len + 1);
1355 // lock and map right page
1357 if ( bt_lockpage(bt, right, BtLockWrite, &bt->temp) )
1360 // pull contents of next page into current empty page
1361 memcpy (bt->page, bt->temp, bt->mgr->page_size);
1363 // cache copy of key to update
1364 ptr = keyptr(bt->temp, bt->temp->cnt);
1365 memcpy(rightkey, ptr, ptr->len + 1);
1367 // Mark right page as deleted and point it to left page
1368 // until we can post updates at higher level.
1370 bt_putid(bt->temp->right, page_no);
1374 if( bt_unlockpage(bt, right, BtLockWrite) )
1376 if( bt_unlockpage(bt, page_no, BtLockWrite) )
1379 // delete old lower key to consolidated node
1381 if( bt_deletekey (bt, leftkey + 1, *leftkey, lvl + 1) )
1384 // redirect higher key directly to consolidated node
1386 if( slot = bt_loadpage (bt, rightkey+1, *rightkey, lvl+1, BtLockWrite) )
1387 ptr = keyptr(bt->page, slot);
1391 // since key already exists, update id
1393 if( keycmp (ptr, rightkey+1, *rightkey) )
1394 return bt->err = BTERR_struct;
1396 slotptr(bt->page, slot)->dead = 0;
1397 bt_putid(slotptr(bt->page,slot)->id, page_no);
1398 bt_unlockpage(bt, bt->page_no, BtLockWrite);
1400 // obtain write lock and
1401 // add right block to free chain
1403 if( bt_freepage (bt, right) )
1406 // remove ParentModify lock
1408 if( bt_unlockpage(bt, page_no, BtLockParent) )
1414 // find key in leaf level and return row-id
1416 uid bt_findkey (BtDb *bt, unsigned char *key, uint len)
1422 if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) )
1423 ptr = keyptr(bt->page, slot);
1427 // if key exists, return row-id
1428 // otherwise return 0
1430 if( ptr->len == len && !memcmp (ptr->key, key, len) )
1431 id = bt_getid(slotptr(bt->page,slot)->id);
1435 if ( bt_unlockpage(bt, bt->page_no, BtLockRead) )
1441 // check page for space available,
1442 // clean if necessary and return
1443 // 0 - page needs splitting
1446 uint bt_cleanpage(BtDb *bt, uint amt)
1448 uint nxt = bt->mgr->page_size;
1449 BtPage page = bt->page;
1450 uint cnt = 0, idx = 0;
1451 uint max = page->cnt;
1454 if( page->min >= (max+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1457 // skip cleanup if nothing to reclaim
1462 memcpy (bt->frame, page, bt->mgr->page_size);
1464 // skip page info and set rest of page to zero
1466 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1470 // try cleaning up page first
1472 while( cnt++ < max ) {
1473 // always leave fence key and foster children in list
1474 if( cnt < max - page->foster && slotptr(bt->frame,cnt)->dead )
1478 key = keyptr(bt->frame, cnt);
1479 nxt -= key->len + 1;
1480 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1483 memcpy(slotptr(page, ++idx)->id, slotptr(bt->frame, cnt)->id, BtId);
1484 if( !(slotptr(page, idx)->dead = slotptr(bt->frame, cnt)->dead) )
1486 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1487 slotptr(page, idx)->off = nxt;
1493 // see if page has enough space now, or does it need splitting?
1495 if( page->min >= (idx+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1502 // return with page unlocked
1504 BTERR bt_addkeytopage (BtDb *bt, uint slot, unsigned char *key, uint len, uid id, uint tod)
1506 BtPage page = bt->page;
1509 // calculate next available slot and copy key into page
1511 page->min -= len + 1;
1512 ((unsigned char *)page)[page->min] = len;
1513 memcpy ((unsigned char *)page + page->min +1, key, len );
1515 for( idx = slot; idx < page->cnt; idx++ )
1516 if( slotptr(page, idx)->dead )
1519 // now insert key into array before slot
1520 // preserving the fence slot
1522 if( idx == page->cnt )
1528 *slotptr(page, idx) = *slotptr(page, idx -1), idx--;
1530 bt_putid(slotptr(page,slot)->id, id);
1531 slotptr(page, slot)->off = page->min;
1532 slotptr(page, slot)->tod = tod;
1533 slotptr(page, slot)->dead = 0;
1535 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1538 // split the root and raise the height of the btree
1540 BTERR bt_splitroot(BtDb *bt, uid right)
1542 uint nxt = bt->mgr->page_size;
1543 unsigned char fencekey[256];
1544 BtPage root = bt->page;
1548 // Obtain an empty page to use, and copy the left page
1549 // contents into it from the root. Strip foster child key.
1550 // (it's the stopper key)
1556 // Save left fence key.
1558 key = keyptr(root, root->cnt);
1559 memcpy (fencekey, key, key->len + 1);
1561 // copy the lower keys into a new left page
1563 if( !(new_page = bt_newpage(bt, root)) )
1566 // preserve the page info at the bottom
1567 // and set rest of the root to zero
1569 memset (root+1, 0, bt->mgr->page_size - sizeof(*root));
1571 // insert left fence key on empty newroot page
1573 nxt -= *fencekey + 1;
1574 memcpy ((unsigned char *)root + nxt, fencekey, *fencekey + 1);
1575 bt_putid(slotptr(root, 1)->id, new_page);
1576 slotptr(root, 1)->off = nxt;
1578 // insert stopper key on newroot page
1579 // and increase the root height
1585 memcpy ((unsigned char *)root + nxt, fencekey, *fencekey + 1);
1586 bt_putid(slotptr(root, 2)->id, right);
1587 slotptr(root, 2)->off = nxt;
1589 bt_putid(root->right, 0);
1590 root->min = nxt; // reset lowest used offset and key count
1595 // release root (bt->page)
1597 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1600 // split already locked full node
1603 BTERR bt_splitpage (BtDb *bt)
1605 uint slot, cnt, idx, max, nxt = bt->mgr->page_size;
1606 unsigned char fencekey[256];
1607 uid page_no = bt->page_no;
1608 BtPage page = bt->page;
1609 uint tod = time(NULL);
1610 uint lvl = page->lvl;
1611 uid new_page, right;
1614 // initialize frame buffer
1616 memset (bt->frame, 0, bt->mgr->page_size);
1617 max = page->cnt - page->foster;
1618 tod = (uint)time(NULL);
1622 // split higher half of keys to bt->frame
1623 // leaving foster children in the left node.
1625 while( cnt++ < max ) {
1626 key = keyptr(page, cnt);
1627 nxt -= key->len + 1;
1628 memcpy ((unsigned char *)bt->frame + nxt, key, key->len + 1);
1629 memcpy(slotptr(bt->frame,++idx)->id, slotptr(page,cnt)->id, BtId);
1630 slotptr(bt->frame, idx)->tod = slotptr(page, cnt)->tod;
1631 slotptr(bt->frame, idx)->off = nxt;
1635 // transfer right link node
1637 if( page_no > ROOT_page ) {
1638 right = bt_getid (page->right);
1639 bt_putid(bt->frame->right, right);
1642 bt->frame->bits = bt->mgr->page_bits;
1643 bt->frame->min = nxt;
1644 bt->frame->cnt = idx;
1645 bt->frame->lvl = lvl;
1647 // get new free page and write frame to it.
1649 if( !(new_page = bt_newpage(bt, bt->frame)) )
1652 // remember fence key for new page to add
1655 key = keyptr(bt->frame, idx);
1656 memcpy (fencekey, key, key->len + 1);
1658 // update lower keys and foster children to continue in old page
1660 memcpy (bt->frame, page, bt->mgr->page_size);
1661 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1662 nxt = bt->mgr->page_size;
1667 // assemble page of smaller keys
1668 // to remain in the old page
1670 while( cnt++ < max / 2 ) {
1671 key = keyptr(bt->frame, cnt);
1672 nxt -= key->len + 1;
1673 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1674 memcpy (slotptr(page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId);
1675 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1676 slotptr(page, idx)->off = nxt;
1680 // insert new foster child at beginning of the current foster children
1682 nxt -= *fencekey + 1;
1683 memcpy ((unsigned char *)page + nxt, fencekey, *fencekey + 1);
1684 bt_putid (slotptr(page,++idx)->id, new_page);
1685 slotptr(page, idx)->tod = tod;
1686 slotptr(page, idx)->off = nxt;
1690 // continue with old foster child keys if any
1692 cnt = bt->frame->cnt - bt->frame->foster;
1694 while( cnt++ < bt->frame->cnt ) {
1695 key = keyptr(bt->frame, cnt);
1696 nxt -= key->len + 1;
1697 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1698 memcpy (slotptr(page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId);
1699 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1700 slotptr(page, idx)->off = nxt;
1707 // link new right page
1709 bt_putid (page->right, new_page);
1711 // if current page is the root page, split it
1713 if( page_no == ROOT_page )
1714 return bt_splitroot (bt, new_page);
1716 // release wr lock on page
1718 if( bt_unlockpage (bt, page_no, BtLockWrite) )
1721 // obtain ParentModification lock for current page
1722 // to fix fence key and highest foster child on page
1724 if( bt_lockpage (bt, page_no, BtLockParent, NULL) )
1727 // get our highest foster child key to find in parent node
1729 if( bt_lockpage (bt, page_no, BtLockRead, &page) )
1732 key = keyptr(page, page->cnt);
1733 memcpy (fencekey, key, key->len+1);
1735 if( bt_unlockpage (bt, page_no, BtLockRead) )
1741 slot = bt_loadpage (bt, fencekey + 1, *fencekey, lvl + 1, BtLockWrite);
1746 // check if parent page has enough space for any possible key
1748 if( bt_cleanpage (bt, 256) )
1751 if( bt_splitpage (bt) )
1755 // see if we are still a foster child from another node
1757 if( bt_getid (slotptr(bt->page, slot)->id) != page_no ) {
1758 bt_unlockpage (bt, bt->page_no, BtLockWrite);
1767 // wait until readers from parent get their locks
1769 if( bt_lockpage (bt, page_no, BtLockDelete, NULL) )
1772 if( bt_lockpage (bt, page_no, BtLockWrite, &page) )
1775 // switch parent fence key to foster child
1777 if( slotptr(page, page->cnt)->dead )
1778 slotptr(bt->page, slot)->dead = 1;
1780 bt_putid (slotptr(bt->page, slot)->id, bt_getid(slotptr(page, page->cnt)->id));
1782 // remove highest foster child from our page
1783 // add our new fence key to parent
1789 key = keyptr(page, page->cnt);
1791 if( bt_addkeytopage (bt, slot, key->key, key->len, page_no, tod) )
1794 if( bt_unlockpage (bt, page_no, BtLockDelete) )
1797 if( bt_unlockpage (bt, page_no, BtLockWrite) )
1800 return bt_unlockpage (bt, page_no, BtLockParent);
1803 // Insert new key into the btree at leaf level.
1805 BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uid id, uint tod)
1812 if( slot = bt_loadpage (bt, key, len, 0, BtLockWrite) )
1813 ptr = keyptr(bt->page, slot);
1817 bt->err = BTERR_ovflw;
1821 // if key already exists, update id and return
1825 if( !keycmp (ptr, key, len) ) {
1826 slotptr(page, slot)->dead = 0;
1827 slotptr(page, slot)->tod = tod;
1828 bt_putid(slotptr(page,slot)->id, id);
1829 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1832 // check if page has enough space
1834 if( bt_cleanpage (bt, len) )
1837 if( bt_splitpage (bt) )
1841 return bt_addkeytopage (bt, slot, key, len, id, tod);
1844 // cache page of keys into cursor and return starting slot for given key
1846 uint bt_startkey (BtDb *bt, unsigned char *key, uint len)
1850 // cache page for retrieval
1851 if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) )
1852 memcpy (bt->cursor, bt->page, bt->mgr->page_size);
1853 bt->cursor_page = bt->page_no;
1854 if ( bt_unlockpage(bt, bt->page_no, BtLockRead) )
1860 // return next slot for cursor page
1861 // or slide cursor right into next page
1863 uint bt_nextkey (BtDb *bt, uint slot)
1869 right = bt_getid(bt->cursor->right);
1870 while( slot++ < bt->cursor->cnt - bt->cursor->foster )
1871 if( slotptr(bt->cursor,slot)->dead )
1873 else if( right || (slot < bt->cursor->cnt - bt->cursor->foster) )
1881 bt->cursor_page = right;
1883 if( bt_lockpage(bt, right, BtLockRead, &page) )
1886 memcpy (bt->cursor, page, bt->mgr->page_size);
1888 if ( bt_unlockpage(bt, right, BtLockRead) )
1897 BtKey bt_key(BtDb *bt, uint slot)
1899 return keyptr(bt->cursor, slot);
1902 uid bt_uid(BtDb *bt, uint slot)
1904 return bt_getid(slotptr(bt->cursor,slot)->id);
1907 uint bt_tod(BtDb *bt, uint slot)
1909 return slotptr(bt->cursor,slot)->tod;
1922 // standalone program to index file of keys
1923 // then list them onto std-out
1926 void *index_file (void *arg)
1928 uint __stdcall index_file (void *arg)
1931 int line = 0, found = 0, cnt = 0;
1932 uid next, page_no = LEAF_page; // start on first page of leaves
1933 unsigned char key[256];
1934 ThreadArg *args = arg;
1935 int ch, len = 0, slot;
1942 bt = bt_open (args->mgr);
1945 switch(args->type | 0x20)
1948 fprintf(stderr, "started indexing for %s\n", args->infile);
1949 if( in = fopen (args->infile, "rb") )
1950 while( ch = getc(in), ch != EOF )
1955 if( args->num == 1 )
1956 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
1958 else if( args->num )
1959 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
1961 if( bt_insertkey (bt, key, len, line, *tod) )
1962 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
1965 else if( len < 255 )
1967 fprintf(stderr, "finished %s for %d keys\n", args->infile, line);
1971 fprintf(stderr, "started deleting keys for %s\n", args->infile);
1972 if( in = fopen (args->infile, "rb") )
1973 while( ch = getc(in), ch != EOF )
1977 if( args->num == 1 )
1978 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
1980 else if( args->num )
1981 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
1983 if( bt_deletekey (bt, key, len, 0) )
1984 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
1987 else if( len < 255 )
1989 fprintf(stderr, "finished %s for keys, %d \n", args->infile, line);
1993 fprintf(stderr, "started finding keys for %s\n", args->infile);
1994 if( in = fopen (args->infile, "rb") )
1995 while( ch = getc(in), ch != EOF )
1999 if( args->num == 1 )
2000 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
2002 else if( args->num )
2003 sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9;
2005 if( bt_findkey (bt, key, len) )
2008 fprintf(stderr, "Error %d Syserr %d Line: %d\n", bt->err, errno, line), exit(0);
2011 else if( len < 255 )
2013 fprintf(stderr, "finished %s for %d keys, found %d\n", args->infile, line, found);
2019 fprintf(stderr, "started reading\n");
2021 if( slot = bt_startkey (bt, key, len) )
2024 fprintf(stderr, "Error %d in StartKey. Syserror: %d\n", bt->err, errno), exit(0);
2026 while( slot = bt_nextkey (bt, slot) ) {
2027 ptr = bt_key(bt, slot);
2028 fwrite (ptr->key, ptr->len, 1, stdout);
2029 fputc ('\n', stdout);
2035 fprintf(stderr, "started reading\n");
2038 bt_lockpage (bt, page_no, BtLockRead, &page);
2040 next = bt_getid (page->right);
2041 bt_unlockpage (bt, page_no, BtLockRead);
2042 } while( page_no = next );
2044 cnt--; // remove stopper key
2045 fprintf(stderr, " Total keys read %d\n", cnt);
2057 typedef struct timeval timer;
2059 int main (int argc, char **argv)
2061 int idx, cnt, len, slot, err;
2062 int segsize, bits = 16;
2067 time_t start[1], stop[1];
2080 fprintf (stderr, "Usage: %s idx_file Read/Write/Scan/Delete/Find [page_bits mapped_segments seg_bits line_numbers src_file1 src_file2 ... ]\n", argv[0]);
2081 fprintf (stderr, " where page_bits is the page size in bits\n");
2082 fprintf (stderr, " mapped_segments is the number of mmap segments in buffer pool\n");
2083 fprintf (stderr, " seg_bits is the size of individual segments in buffer pool in pages in bits\n");
2084 fprintf (stderr, " line_numbers = 1 to append line numbers to keys\n");
2085 fprintf (stderr, " src_file1 thru src_filen are files of keys separated by newline\n");
2090 gettimeofday(&start, NULL);
2096 bits = atoi(argv[3]);
2099 poolsize = atoi(argv[4]);
2102 fprintf (stderr, "Warning: no mapped_pool\n");
2104 if( poolsize > 65535 )
2105 fprintf (stderr, "Warning: mapped_pool > 65535 segments\n");
2108 segsize = atoi(argv[5]);
2110 segsize = 4; // 16 pages per mmap segment
2113 num = atoi(argv[6]);
2117 threads = malloc (cnt * sizeof(pthread_t));
2119 threads = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, cnt * sizeof(HANDLE));
2121 args = malloc (cnt * sizeof(ThreadArg));
2123 mgr = bt_mgr ((argv[1]), BT_rw, bits, poolsize, segsize, poolsize / 8);
2126 fprintf(stderr, "Index Open Error %s\n", argv[1]);
2132 for( idx = 0; idx < cnt; idx++ ) {
2133 args[idx].infile = argv[idx + 7];
2134 args[idx].type = argv[2][0];
2135 args[idx].mgr = mgr;
2136 args[idx].num = num;
2137 args[idx].idx = idx;
2139 if( err = pthread_create (threads + idx, NULL, index_file, args + idx) )
2140 fprintf(stderr, "Error creating thread %d\n", err);
2142 threads[idx] = (HANDLE)_beginthreadex(NULL, 65536, index_file, args + idx, 0, NULL);
2146 // wait for termination
2149 for( idx = 0; idx < cnt; idx++ )
2150 pthread_join (threads[idx], NULL);
2151 gettimeofday(&stop, NULL);
2152 real_time = 1000.0 * ( stop.tv_sec - start.tv_sec ) + 0.001 * (stop.tv_usec - start.tv_usec );
2154 WaitForMultipleObjects (cnt, threads, TRUE, INFINITE);
2156 for( idx = 0; idx < cnt; idx++ )
2157 CloseHandle(threads[idx]);
2160 real_time = 1000 * (*stop - *start);
2162 fprintf(stderr, " Time to complete: %.2f seconds\n", real_time/1000);