1 // btree version threads2g sched_yield version
4 // author: karl malbrain, malbrain@cal.berkeley.edu
7 This work, including the source code, documentation
8 and related data, is placed into the public domain.
10 The orginal author is Karl Malbrain.
12 THIS SOFTWARE IS PROVIDED AS-IS WITHOUT WARRANTY
13 OF ANY KIND, NOT EVEN THE IMPLIED WARRANTY OF
14 MERCHANTABILITY. THE AUTHOR OF THIS SOFTWARE,
15 ASSUMES _NO_ RESPONSIBILITY FOR ANY CONSEQUENCE
16 RESULTING FROM THE USE, MODIFICATION, OR
17 REDISTRIBUTION OF THIS SOFTWARE.
20 // Please see the project home page for documentation
21 // code.google.com/p/high-concurrency-btree
23 #define _FILE_OFFSET_BITS 64
24 #define _LARGEFILE64_SOURCE
40 #define WIN32_LEAN_AND_MEAN
53 typedef unsigned long long uid;
56 typedef unsigned long long off64_t;
57 typedef unsigned short ushort;
58 typedef unsigned int uint;
61 #define BT_ro 0x6f72 // ro
62 #define BT_rw 0x7772 // rw
64 #define BT_maxbits 24 // maximum page size in bits
65 #define BT_minbits 9 // minimum page size in bits
66 #define BT_minpage (1 << BT_minbits) // minimum page size
67 #define BT_maxpage (1 << BT_maxbits) // maximum page size
70 There are five lock types for each node in three independent sets:
71 1. (set 1) AccessIntent: Sharable. Going to Read the node. Incompatible with NodeDelete.
72 2. (set 1) NodeDelete: Exclusive. About to release the node. Incompatible with AccessIntent.
73 3. (set 2) ReadLock: Sharable. Read the node. Incompatible with WriteLock.
74 4. (set 2) WriteLock: Exclusive. Modify the node. Incompatible with ReadLock and other WriteLocks.
75 5. (set 3) ParentModification: Exclusive. Change the node's parent keys. Incompatible with another ParentModification.
86 // mode & definition for latch implementation
94 // exclusive is set for write access
95 // share is count of read accessors
96 // grant write lock when share == 0
99 volatile uint exclusive:1;
100 volatile uint request:1;
101 volatile uint share:30;
105 BtLatch readwr[1]; // read/write page lock
106 BtLatch access[1]; // Access Intent/Page delete
107 BtLatch parent[1]; // Parent modification
110 // Define the length of the page and key pointers
114 // Page key slot definition.
116 // If BT_maxbits is 15 or less, you can save 4 bytes
117 // for each key stored by making the first two uints
118 // into ushorts. You can also save 4 bytes by removing
119 // the tod field from the key.
121 // Keys are marked dead, but remain on the page until
122 // it cleanup is called. The fence key (highest key) for
123 // the page is always present, even after cleanup.
126 uint off:BT_maxbits; // page offset for key start
127 uint dead:1; // set for deleted key
128 uint tod; // time-stamp for key
129 unsigned char id[BtId]; // id associated with key
132 // The key structure occupies space at the upper end of
133 // each page. It's a length byte followed by the value
138 unsigned char key[1];
141 // The first part of an index page.
142 // It is immediately followed
143 // by the BtSlot array of keys.
145 typedef struct Page {
146 BtLatchSet latch[1]; // Set of three latches
147 uint cnt; // count of keys in page
148 uint act; // count of active keys
149 uint min; // next key offset
150 unsigned char bits; // page size in bits
151 unsigned char lvl:6; // level of page
152 unsigned char kill:1; // page is being deleted
153 unsigned char dirty:1; // page has deleted keys
154 unsigned char right[BtId]; // page number to right
157 // The memory mapping pool table buffer manager entry
160 unsigned long long int lru; // number of times accessed
161 uid basepage; // mapped base page number
162 char *map; // mapped memory pointer
163 uint slot; // slot index in this array
164 volatile uint pin; // mapped page pin counter
165 void *hashprev; // previous pool entry for the same hash idx
166 void *hashnext; // next pool entry for the same hash idx
172 // The object structure for Btree access
175 uint page_size; // page size
176 uint page_bits; // page size in bits
177 uint seg_bits; // seg size in pages in bits
178 uint mode; // read-write mode
180 char *pooladvise; // bit maps for pool page advisements
185 uint poolcnt; // highest page pool node in use
186 uint poolmax; // highest page pool node allocated
187 uint poolmask; // total size of pages in mmap segment - 1
188 uint hashsize; // size of Hash Table for pool entries
189 volatile uint evicted; // last evicted hash table slot
190 ushort *hash; // pool index for hash entries
191 BtLatch *latch; // latches for hash table slots
192 BtPool *pool; // memory pool page segments
196 BtMgr *mgr; // buffer manager for thread
197 BtPage temp; // temporary frame buffer (memory mapped/file IO)
198 BtPage alloc; // frame buffer for alloc page ( page 0 )
199 BtPage cursor; // cached frame for start/next (never mapped)
200 BtPage frame; // spare frame for the page split (never mapped)
201 BtPage zero; // page frame for zeroes at end of file
202 BtPage page; // current page
203 uid page_no; // current page number
204 uid cursor_page; // current cursor page number
205 unsigned char *mem; // frame, cursor, page memory buffer
206 int err; // last error
220 extern void bt_close (BtDb *bt);
221 extern BtDb *bt_open (BtMgr *mgr);
222 extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod);
223 extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl);
224 extern uid bt_findkey (BtDb *bt, unsigned char *key, uint len);
225 extern uint bt_startkey (BtDb *bt, unsigned char *key, uint len);
226 extern uint bt_nextkey (BtDb *bt, uint slot);
229 extern BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolsize, uint segsize, uint hashsize);
230 void bt_mgrclose (BtMgr *mgr);
232 // Helper functions to return slot values
234 extern BtKey bt_key (BtDb *bt, uint slot);
235 extern uid bt_uid (BtDb *bt, uint slot);
236 extern uint bt_tod (BtDb *bt, uint slot);
238 // BTree page number constants
243 // Number of levels to create in a new BTree
247 // The page is allocated from low and hi ends.
248 // The key offsets and row-id's are allocated
249 // from the bottom, while the text of the key
250 // is allocated from the top. When the two
251 // areas meet, the page is split into two.
253 // A key consists of a length byte, two bytes of
254 // index number (0 - 65534), and up to 253 bytes
255 // of key value. Duplicate keys are discarded.
256 // Associated with each key is a 48 bit row-id.
258 // The b-tree root is always located at page 1.
259 // The first leaf page of level zero is always
260 // located on page 2.
262 // The b-tree pages are linked with next
263 // pointers to facilitate enumerators,
264 // and provide for concurrency.
266 // When to root page fills, it is split in two and
267 // the tree height is raised by a new root at page
268 // one with two keys.
270 // Deleted keys are marked with a dead bit until
271 // page cleanup The fence key for a node is always
272 // present, even after deletion and cleanup.
274 // Groups of pages called segments from the btree are optionally
275 // cached with a memory mapped pool. A hash table is used to keep
276 // track of the cached segments. This behaviour is controlled
277 // by the cache block size parameter to bt_open.
279 // To achieve maximum concurrency one page is locked at a time
280 // as the tree is traversed to find leaf key in question. The right
281 // page numbers are used in cases where the page is being split,
284 // Page 0 is dedicated to lock for new page extensions,
285 // and chains empty pages together for reuse.
287 // The ParentModification lock on a node is obtained to prevent resplitting
288 // or deleting a node before its fence is posted into its upper level.
290 // Empty pages are chained together through the ALLOC page and reused.
292 // Access macros to address slot and key values from the page
294 #define slotptr(page, slot) (((BtSlot *)(page+1)) + (slot-1))
295 #define keyptr(page, slot) ((BtKey)((unsigned char*)(page) + slotptr(page, slot)->off))
297 void bt_putid(unsigned char *dest, uid id)
302 dest[i] = (unsigned char)id, id >>= 8;
305 uid bt_getid(unsigned char *src)
310 for( i = 0; i < BtId; i++ )
311 id <<= 8, id |= *src++;
316 void bt_mgrclose (BtMgr *mgr)
321 // release mapped pages
322 // note that slot zero is never used
324 for( slot = 1; slot < mgr->poolmax; slot++ ) {
325 pool = mgr->pool + slot;
328 munmap (pool->map, (mgr->poolmask+1) << mgr->page_bits);
331 FlushViewOfFile(pool->map, 0);
332 UnmapViewOfFile(pool->map);
333 CloseHandle(pool->hmap);
343 free (mgr->pooladvise);
346 FlushFileBuffers(mgr->idx);
347 CloseHandle(mgr->idx);
348 GlobalFree (mgr->pool);
349 GlobalFree (mgr->hash);
350 GlobalFree (mgr->latch);
355 // close and release memory
357 void bt_close (BtDb *bt)
364 VirtualFree (bt->mem, 0, MEM_RELEASE);
369 // open/create new btree buffer manager
371 // call with file_name, BT_openmode, bits in page size (e.g. 16),
372 // size of mapped page pool (e.g. 8192)
374 BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolmax, uint segsize, uint hashsize)
376 uint lvl, attr, cacheblk, last;
385 SYSTEM_INFO sysinfo[1];
388 // determine sanity of page size and buffer pool
390 if( bits > BT_maxbits )
392 else if( bits < BT_minbits )
396 return NULL; // must have buffer pool
399 mgr = calloc (1, sizeof(BtMgr));
401 switch (mode & 0x7fff)
404 mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666);
410 mgr->idx = open ((char*)name, O_RDONLY);
415 return free(mgr), NULL;
417 cacheblk = 4096; // minimum mmap segment size for unix
420 mgr = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, sizeof(BtMgr));
421 attr = FILE_ATTRIBUTE_NORMAL;
422 switch (mode & 0x7fff)
425 mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL);
431 mgr->idx = CreateFile(name, GENERIC_READ, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_EXISTING, attr, NULL);
435 if( mgr->idx == INVALID_HANDLE_VALUE )
436 return GlobalFree(mgr), NULL;
438 // normalize cacheblk to multiple of sysinfo->dwAllocationGranularity
439 GetSystemInfo(sysinfo);
440 cacheblk = sysinfo->dwAllocationGranularity;
444 alloc = malloc (BT_maxpage);
447 // read minimum page size to get root info
449 if( size = lseek (mgr->idx, 0L, 2) ) {
450 if( pread(mgr->idx, alloc, BT_minpage, 0) == BT_minpage )
453 return free(mgr), free(alloc), NULL;
454 } else if( mode == BT_ro )
455 return bt_mgrclose (mgr), NULL;
457 alloc = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE);
458 size = GetFileSize(mgr->idx, amt);
461 if( !ReadFile(mgr->idx, (char *)alloc, BT_minpage, amt, NULL) )
462 return bt_mgrclose (mgr), NULL;
464 } else if( mode == BT_ro )
465 return bt_mgrclose (mgr), NULL;
468 mgr->page_size = 1 << bits;
469 mgr->page_bits = bits;
471 mgr->poolmax = poolmax;
474 if( cacheblk < mgr->page_size )
475 cacheblk = mgr->page_size;
477 // mask for partial memmaps
479 mgr->poolmask = (cacheblk >> bits) - 1;
481 // see if requested size of pages per memmap is greater
483 if( (1 << segsize) > mgr->poolmask )
484 mgr->poolmask = (1 << segsize) - 1;
488 while( (1 << mgr->seg_bits) <= mgr->poolmask )
491 mgr->hashsize = hashsize;
494 mgr->pool = calloc (poolmax, sizeof(BtPool));
495 mgr->hash = calloc (hashsize, sizeof(ushort));
496 mgr->latch = calloc (hashsize, sizeof(BtLatch));
497 mgr->pooladvise = calloc (poolmax, (mgr->poolmask + 8) / 8);
499 mgr->pool = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, poolmax * sizeof(BtPool));
500 mgr->hash = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(ushort));
501 mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtLatch));
507 // initializes an empty b-tree with root page and page of leaves
509 memset (alloc, 0, 1 << bits);
510 bt_putid(alloc->right, MIN_lvl+1);
511 alloc->bits = mgr->page_bits;
514 if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size )
515 return bt_mgrclose (mgr), NULL;
517 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
518 return bt_mgrclose (mgr), NULL;
520 if( *amt < mgr->page_size )
521 return bt_mgrclose (mgr), NULL;
524 memset (alloc, 0, 1 << bits);
525 alloc->bits = mgr->page_bits;
527 for( lvl=MIN_lvl; lvl--; ) {
528 slotptr(alloc, 1)->off = mgr->page_size - 3;
529 bt_putid(slotptr(alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number
530 key = keyptr(alloc, 1);
531 key->len = 2; // create stopper key
534 alloc->min = mgr->page_size - 3;
539 if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size )
540 return bt_mgrclose (mgr), NULL;
542 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
543 return bt_mgrclose (mgr), NULL;
545 if( *amt < mgr->page_size )
546 return bt_mgrclose (mgr), NULL;
550 // create empty page area by writing last page of first
551 // segment area (other pages are zeroed by O/S)
553 if( mgr->poolmask ) {
554 memset(alloc, 0, mgr->page_size);
555 last = mgr->poolmask;
557 while( last < MIN_lvl + 1 )
558 last += mgr->poolmask + 1;
561 pwrite(mgr->idx, alloc, mgr->page_size, last << mgr->page_bits);
563 SetFilePointer (mgr->idx, last << mgr->page_bits, NULL, FILE_BEGIN);
564 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
565 return bt_mgrclose (mgr), NULL;
566 if( *amt < mgr->page_size )
567 return bt_mgrclose (mgr), NULL;
575 VirtualFree (alloc, 0, MEM_RELEASE);
580 // open BTree access method
581 // based on buffer manager
583 BtDb *bt_open (BtMgr *mgr)
585 BtDb *bt = malloc (sizeof(*bt));
587 memset (bt, 0, sizeof(*bt));
590 bt->mem = malloc (3 *mgr->page_size);
592 bt->mem = VirtualAlloc(NULL, 3 * mgr->page_size, MEM_COMMIT, PAGE_READWRITE);
594 bt->frame = (BtPage)bt->mem;
595 bt->zero = (BtPage)(bt->mem + 1 * mgr->page_size);
596 bt->cursor = (BtPage)(bt->mem + 2 * mgr->page_size);
600 // compare two keys, returning > 0, = 0, or < 0
601 // as the comparison value
603 int keycmp (BtKey key1, unsigned char *key2, uint len2)
605 uint len1 = key1->len;
608 if( ans = memcmp (key1->key, key2, len1 > len2 ? len2 : len1) )
621 // wait until write lock mode is clear
622 // and add 1 to the share count
624 void bt_readlock(BtLatch *latch)
627 // see if exclusive request is pending, or granted
629 if( !(volatile int)latch->request && !(volatile int)latch->exclusive ) {
630 // add one to counter, check write bit
632 if( ~__sync_fetch_and_add((volatile int *)latch, Share) & Write )
635 if( ~_InterlockedExchangeAdd((volatile int *)latch, Share) & Write )
638 // didn't get latch, reduce counter by one
641 __sync_fetch_and_add((volatile int *)latch, -Share);
643 _InterlockedExchangeAdd ((volatile int *)latch, -Share);
656 // wait for other read and write latches to relinquish
658 void bt_writelock(BtLatch *latch)
663 // set exclusive access pending
666 __sync_fetch_and_or((int *)latch, Pending);
668 _InterlockedOr((int *)latch, Pending);
671 // see if we can get write access
674 prev = __sync_fetch_and_or((volatile int *)latch, Write);
676 prev = _InterlockedOr((volatile int *)latch, Write);
679 // did we get exclusive access?
680 // if so, clear write pending
682 if( !(prev & ~Pending) ) {
684 __sync_fetch_and_and((volatile int *)latch, ~Pending);
686 _InterlockedAnd((volatile int *)latch, ~Pending);
691 // reset our Write mode if it was clear before
693 if( !(prev & Write) ) {
695 __sync_fetch_and_and((volatile int *)latch, ~Write);
697 _InterlockedAnd((volatile int *)latch, ~Write);
711 // try to obtain write lock
713 // return 1 if obtained,
716 int bt_writetry(BtLatch *latch)
720 // see if we can get write access
723 prev = __sync_fetch_and_or((volatile int *)latch, Write);
725 prev = _InterlockedOr((volatile int *)latch, Write);
728 // did we get exclusive access?
731 if( !(prev & ~Pending) )
734 // reset our Write mode if it was clear before
736 if( !(prev & Write) ) {
738 __sync_fetch_and_and((volatile int *)latch, ~Write);
740 _InterlockedAnd((volatile int *)latch, ~Write);
748 void bt_releasewrite(BtLatch *latch)
751 __sync_fetch_and_and((int *)latch, ~Write);
753 _InterlockedAnd ((int *)latch, ~Write);
757 // decrement reader count
759 void bt_releaseread(BtLatch *latch)
762 __sync_fetch_and_add((int *)latch, -Share);
764 _InterlockedExchangeAdd((int *)latch, -Share);
770 // find segment in pool
771 // must be called with hashslot idx locked
772 // return NULL if not there
773 // otherwise return node
775 BtPool *bt_findpool(BtDb *bt, uid page_no, uint idx)
780 // compute start of hash chain in pool
782 if( slot = bt->mgr->hash[idx] )
783 pool = bt->mgr->pool + slot;
787 page_no &= ~bt->mgr->poolmask;
789 while( pool->basepage != page_no )
790 if( pool = pool->hashnext )
798 // add segment to hash table
800 void bt_linkhash(BtDb *bt, BtPool *pool, uid page_no, int idx)
805 pool->hashprev = pool->hashnext = NULL;
806 pool->basepage = page_no & ~bt->mgr->poolmask;
809 if( slot = bt->mgr->hash[idx] ) {
810 node = bt->mgr->pool + slot;
811 pool->hashnext = node;
812 node->hashprev = pool;
815 bt->mgr->hash[idx] = pool->slot;
818 // find best segment to evict from buffer pool
820 BtPool *bt_findlru (BtDb *bt, uint hashslot)
822 unsigned long long int target = ~0LL;
823 BtPool *pool = NULL, *node;
828 node = bt->mgr->pool + hashslot;
830 // scan pool entries under hash table slot
835 if( node->lru > target )
839 } while( node = node->hashnext );
844 // map new buffer pool segment to virtual memory
846 BTERR bt_mapsegment(BtDb *bt, BtPool *pool, uid page_no)
848 off64_t off = (page_no & ~bt->mgr->poolmask) << bt->mgr->page_bits;
849 off64_t limit = off + ((bt->mgr->poolmask+1) << bt->mgr->page_bits);
853 flag = PROT_READ | ( bt->mgr->mode == BT_ro ? 0 : PROT_WRITE );
854 pool->map = mmap (0, (bt->mgr->poolmask+1) << bt->mgr->page_bits, flag, MAP_SHARED, bt->mgr->idx, off);
855 if( pool->map == MAP_FAILED )
856 return bt->err = BTERR_map;
858 // clear out madvise issued bits
859 memset (bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8) / 8), 0, (bt->mgr->poolmask + 8)/8);
861 flag = ( bt->mgr->mode == BT_ro ? PAGE_READONLY : PAGE_READWRITE );
862 pool->hmap = CreateFileMapping(bt->mgr->idx, NULL, flag, (DWORD)(limit >> 32), (DWORD)limit, NULL);
864 return bt->err = BTERR_map;
866 flag = ( bt->mgr->mode == BT_ro ? FILE_MAP_READ : FILE_MAP_WRITE );
867 pool->map = MapViewOfFile(pool->hmap, flag, (DWORD)(off >> 32), (DWORD)off, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
869 return bt->err = BTERR_map;
874 // find or place requested page in segment-pool
875 // return pool table entry, incrementing pin
877 BtPool *bt_pinpage(BtDb *bt, uid page_no)
879 BtPool *pool, *node, *next;
880 uint slot, idx, victim;
882 // lock hash table chain
884 idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
885 bt_readlock (&bt->mgr->latch[idx]);
887 // look up in hash table
889 if( pool = bt_findpool(bt, page_no, idx) ) {
891 __sync_fetch_and_add(&pool->pin, 1);
893 _InterlockedIncrement (&pool->pin);
895 bt_releaseread (&bt->mgr->latch[idx]);
900 // upgrade to write lock
902 bt_releaseread (&bt->mgr->latch[idx]);
903 bt_writelock (&bt->mgr->latch[idx]);
905 // try to find page in pool with write lock
907 if( pool = bt_findpool(bt, page_no, idx) ) {
909 __sync_fetch_and_add(&pool->pin, 1);
911 _InterlockedIncrement (&pool->pin);
913 bt_releasewrite (&bt->mgr->latch[idx]);
918 // allocate a new pool node
919 // and add to hash table
922 slot = __sync_fetch_and_add(&bt->mgr->poolcnt, 1);
924 slot = _InterlockedIncrement (&bt->mgr->poolcnt) - 1;
927 if( ++slot < bt->mgr->poolmax ) {
928 pool = bt->mgr->pool + slot;
931 if( bt_mapsegment(bt, pool, page_no) )
934 bt_linkhash(bt, pool, page_no, idx);
936 __sync_fetch_and_add(&pool->pin, 1);
938 _InterlockedIncrement (&pool->pin);
940 bt_releasewrite (&bt->mgr->latch[idx]);
944 // pool table is full
945 // find best pool entry to evict
948 __sync_fetch_and_add(&bt->mgr->poolcnt, -1);
950 _InterlockedDecrement (&bt->mgr->poolcnt);
955 victim = __sync_fetch_and_add(&bt->mgr->evicted, 1);
957 victim = _InterlockedIncrement (&bt->mgr->evicted) - 1;
959 victim %= bt->mgr->hashsize;
961 // try to get write lock
962 // skip entry if not obtained
964 if( !bt_writetry (&bt->mgr->latch[victim]) )
967 // if pool entry is empty
968 // or any pages are pinned
971 if( !(pool = bt_findlru(bt, bt->mgr->hash[victim])) ) {
972 bt_releasewrite (&bt->mgr->latch[victim]);
976 // unlink victim pool node from hash table
978 if( node = pool->hashprev )
979 node->hashnext = pool->hashnext;
980 else if( node = pool->hashnext )
981 bt->mgr->hash[victim] = node->slot;
983 bt->mgr->hash[victim] = 0;
985 if( node = pool->hashnext )
986 node->hashprev = pool->hashprev;
988 bt_releasewrite (&bt->mgr->latch[victim]);
990 // remove old file mapping
992 munmap (pool->map, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
994 FlushViewOfFile(pool->map, 0);
995 UnmapViewOfFile(pool->map);
996 CloseHandle(pool->hmap);
1000 // create new pool mapping
1001 // and link into hash table
1003 if( bt_mapsegment(bt, pool, page_no) )
1006 bt_linkhash(bt, pool, page_no, idx);
1008 __sync_fetch_and_add(&pool->pin, 1);
1010 _InterlockedIncrement (&pool->pin);
1012 bt_releasewrite (&bt->mgr->latch[idx]);
1017 // place write, read, or parent lock on requested page_no.
1018 // pin to buffer pool and return page pointer
1020 BTERR bt_lockpage(BtDb *bt, uid page_no, BtLock mode, BtPage *pageptr)
1026 // find/create maping in pool table
1027 // and pin our pool slot
1029 if( pool = bt_pinpage(bt, page_no) )
1030 subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping
1034 page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits));
1037 uint idx = subpage / 8;
1038 uint bit = subpage % 8;
1040 if( ~((bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8)/8))[idx] >> bit) & 1 ) {
1041 madvise (page, bt->mgr->page_size, MADV_WILLNEED);
1042 (bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8)/8))[idx] |= 1 << bit;
1049 bt_readlock (page->latch->readwr);
1052 bt_writelock (page->latch->readwr);
1055 bt_readlock (page->latch->access);
1058 bt_writelock (page->latch->access);
1061 bt_writelock (page->latch->parent);
1064 return bt->err = BTERR_lock;
1072 // remove write, read, or parent lock on requested page
1074 BTERR bt_unlockpage(BtDb *bt, uid page_no, BtLock mode)
1080 // since page is pinned
1081 // it should still be in the buffer pool
1082 // and is in no danger of being a victim for reuse
1084 idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
1085 bt_readlock (&bt->mgr->latch[idx]);
1087 if( pool = bt_findpool(bt, page_no, idx) )
1088 subpage = (uint)(page_no & bt->mgr->poolmask);
1090 return bt->err = BTERR_hash;
1092 bt_releaseread (&bt->mgr->latch[idx]);
1093 page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits));
1097 bt_releaseread (page->latch->readwr);
1100 bt_releasewrite (page->latch->readwr);
1103 bt_releaseread (page->latch->access);
1106 bt_releasewrite (page->latch->access);
1109 bt_releasewrite (page->latch->parent);
1112 return bt->err = BTERR_lock;
1116 __sync_fetch_and_add(&pool->pin, -1);
1118 _InterlockedDecrement (&pool->pin);
1123 // deallocate a deleted page
1124 // place on free chain out of allocator page
1126 BTERR bt_freepage(BtDb *bt, uid page_no)
1128 // obtain delete lock on deleted page
1130 if( bt_lockpage(bt, page_no, BtLockDelete, NULL) )
1133 // obtain write lock on deleted page
1135 if( bt_lockpage(bt, page_no, BtLockWrite, &bt->temp) )
1138 // lock allocation page
1140 if ( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) )
1143 // store chain in second right
1144 bt_putid(bt->temp->right, bt_getid(bt->alloc[1].right));
1145 bt_putid(bt->alloc[1].right, page_no);
1149 if( bt_unlockpage(bt, ALLOC_page, BtLockWrite) )
1152 // remove write lock on deleted node
1154 if( bt_unlockpage(bt, page_no, BtLockWrite) )
1157 // remove delete lock on deleted node
1159 if( bt_unlockpage(bt, page_no, BtLockDelete) )
1165 // allocate a new page and write page into it
1167 uid bt_newpage(BtDb *bt, BtPage page)
1177 if ( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) )
1180 // use empty chain first
1181 // else allocate empty page
1183 if( new_page = bt_getid(bt->alloc[1].right) ) {
1184 if( bt_lockpage (bt, new_page, BtLockWrite, &bt->temp) )
1186 bt_putid(bt->alloc[1].right, bt_getid(bt->temp->right));
1187 if( bt_unlockpage (bt, new_page, BtLockWrite) )
1191 new_page = bt_getid(bt->alloc->right);
1192 bt_putid(bt->alloc->right, new_page+1);
1197 memset(bt->zero, 0, sizeof(BtLatchSet)); // clear locks
1198 memcpy((char *)bt->zero + sizeof(BtLatchSet), (char *)page + sizeof(BtLatchSet), bt->mgr->page_size - sizeof(BtLatchSet));
1200 if ( pwrite(bt->mgr->idx, bt->zero, bt->mgr->page_size, new_page << bt->mgr->page_bits) < bt->mgr->page_size )
1201 return bt->err = BTERR_wrt, 0;
1203 // if writing first page of pool block, zero last page in the block
1205 if ( !reuse && bt->mgr->poolmask > 0 && (new_page & bt->mgr->poolmask) == 0 )
1207 // use zero buffer to write zeros
1208 memset(bt->zero, 0, bt->mgr->page_size);
1209 if ( pwrite(bt->mgr->idx,bt->zero, bt->mgr->page_size, (new_page | bt->mgr->poolmask) << bt->mgr->page_bits) < bt->mgr->page_size )
1210 return bt->err = BTERR_wrt, 0;
1213 // bring new page into pool and copy page.
1214 // this will extend the file into the new pages.
1216 if( bt_lockpage(bt, new_page, BtLockWrite, &pmap) )
1219 memcpy(pmap, page, bt->mgr->page_size);
1221 if( bt_unlockpage (bt, new_page, BtLockWrite) )
1224 // unlock page allocation page
1226 if ( bt_unlockpage(bt, ALLOC_page, BtLockWrite) )
1232 // find slot in page for given key at a given level
1234 int bt_findslot (BtDb *bt, unsigned char *key, uint len)
1236 uint diff, higher = bt->page->cnt, low = 1, slot;
1239 // make stopper key an infinite fence value
1241 if( bt_getid (bt->page->right) )
1246 // low is the next candidate, higher is already
1247 // tested as .ge. the given key, loop ends when they meet
1249 while( diff = higher - low ) {
1250 slot = low + ( diff >> 1 );
1251 if( keycmp (keyptr(bt->page, slot), key, len) < 0 )
1254 higher = slot, good++;
1257 // return zero if key is on right link page
1259 return good ? higher : 0;
1262 // find and load page at given level for given key
1263 // leave page rd or wr locked as requested
1265 int bt_loadpage (BtDb *bt, unsigned char *key, uint len, uint lvl, uint lock)
1267 uid page_no = ROOT_page, prevpage = 0;
1268 uint drill = 0xff, slot;
1269 uint mode, prevmode;
1271 // start at root of btree and drill down
1274 // determine lock mode of drill level
1275 mode = (lock == BtLockWrite) && (drill == lvl) ? BtLockWrite : BtLockRead;
1277 bt->page_no = page_no;
1279 // obtain access lock using lock chaining with Access mode
1281 if( page_no > ROOT_page )
1282 if( bt_lockpage(bt, page_no, BtLockAccess, NULL) )
1286 if( bt_unlockpage(bt, prevpage, prevmode) )
1289 // obtain read lock using lock chaining
1290 // and pin page contents
1292 if( bt_lockpage(bt, page_no, mode, &bt->page) )
1295 if( page_no > ROOT_page )
1296 if( bt_unlockpage(bt, page_no, BtLockAccess) )
1299 // re-read and re-lock root after determining actual level of root
1301 if( bt->page->lvl != drill) {
1302 if ( bt->page_no != ROOT_page )
1303 return bt->err = BTERR_struct, 0;
1305 drill = bt->page->lvl;
1307 if( lock == BtLockWrite && drill == lvl )
1308 if( bt_unlockpage(bt, page_no, mode) )
1314 // find key on page at this level
1315 // and descend to requested level
1317 if( !bt->page->kill && (slot = bt_findslot (bt, key, len)) ) {
1321 while( slotptr(bt->page, slot)->dead )
1322 if( slot++ < bt->page->cnt )
1325 page_no = bt_getid(bt->page->right);
1329 page_no = bt_getid(slotptr(bt->page, slot)->id);
1333 // or slide right into next page
1334 // (slide left from deleted page)
1337 page_no = bt_getid(bt->page->right);
1339 // continue down / right using overlapping locks
1340 // to protect pages being killed or split.
1343 prevpage = bt->page_no;
1347 // return error on end of right chain
1349 bt->err = BTERR_struct;
1350 return 0; // return error
1353 // find and delete key on page by marking delete flag bit
1354 // when page becomes empty, delete it
1356 BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl)
1358 unsigned char lowerkey[256], higherkey[256];
1363 if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) )
1364 ptr = keyptr(bt->page, slot);
1368 // if key is found delete it, otherwise ignore request
1370 if( !keycmp (ptr, key, len) )
1371 if( slotptr(bt->page, slot)->dead == 0 ) {
1372 slotptr(bt->page,slot)->dead = 1;
1373 if( slot < bt->page->cnt )
1374 bt->page->dirty = 1;
1378 // return if page is not empty, or it has no right sibling
1380 right = bt_getid(bt->page->right);
1381 page_no = bt->page_no;
1383 if( !right || bt->page->act )
1384 return bt_unlockpage(bt, page_no, BtLockWrite);
1386 // obtain Parent lock over write lock
1388 if( bt_lockpage(bt, page_no, BtLockParent, NULL) )
1391 // keep copy of key to delete
1393 ptr = keyptr(bt->page, bt->page->cnt);
1394 memcpy(lowerkey, ptr, ptr->len + 1);
1396 // lock and map right page
1398 if ( bt_lockpage(bt, right, BtLockWrite, &bt->temp) )
1401 // pull contents of next page into current empty page
1402 memcpy((char *)bt->page + sizeof(BtLatchSet), (char *)bt->temp + sizeof(BtLatchSet), bt->mgr->page_size - sizeof(BtLatchSet));
1404 // keep copy of key to update
1405 ptr = keyptr(bt->temp, bt->temp->cnt);
1406 memcpy(higherkey, ptr, ptr->len + 1);
1408 // Mark right page as deleted and point it to left page
1409 // until we can post updates at higher level.
1411 bt_putid(bt->temp->right, page_no);
1415 if( bt_unlockpage(bt, right, BtLockWrite) )
1417 if( bt_unlockpage(bt, page_no, BtLockWrite) )
1420 // delete old lower key to consolidated node
1422 if( bt_deletekey (bt, lowerkey + 1, *lowerkey, lvl + 1) )
1425 // redirect higher key directly to consolidated node
1427 tod = (uint)time(NULL);
1429 if( bt_insertkey (bt, higherkey+1, *higherkey, lvl + 1, page_no, tod) )
1432 // obtain write lock and
1433 // add right block to free chain
1435 if( bt_freepage (bt, right) )
1438 // remove ParentModify lock
1440 if( bt_unlockpage(bt, page_no, BtLockParent) )
1446 // find key in leaf level and return row-id
1448 uid bt_findkey (BtDb *bt, unsigned char *key, uint len)
1454 if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) )
1455 ptr = keyptr(bt->page, slot);
1459 // if key exists, return row-id
1460 // otherwise return 0
1462 if( ptr->len == len && !memcmp (ptr->key, key, len) )
1463 id = bt_getid(slotptr(bt->page,slot)->id);
1467 if ( bt_unlockpage(bt, bt->page_no, BtLockRead) )
1473 // check page for space available,
1474 // clean if necessary and return
1475 // 0 - page needs splitting
1478 uint bt_cleanpage(BtDb *bt, uint amt)
1480 uint nxt = bt->mgr->page_size;
1481 BtPage page = bt->page;
1482 uint cnt = 0, idx = 0;
1483 uint max = page->cnt;
1486 if( page->min >= (page->cnt+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1489 // skip cleanup if nothing to reclaim
1494 memcpy (bt->frame, page, bt->mgr->page_size);
1496 // skip page info and set rest of page to zero
1498 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1502 while( cnt++ < max ) {
1503 // always leave fence key in list
1504 if( cnt < max && slotptr(bt->frame,cnt)->dead )
1508 key = keyptr(bt->frame, cnt);
1509 nxt -= key->len + 1;
1510 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1513 memcpy(slotptr(page, ++idx)->id, slotptr(bt->frame, cnt)->id, BtId);
1514 if( !(slotptr(page, idx)->dead = slotptr(bt->frame, cnt)->dead) )
1516 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1517 slotptr(page, idx)->off = nxt;
1522 if( page->min >= (page->cnt+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1528 // split the root and raise the height of the btree
1530 BTERR bt_splitroot(BtDb *bt, unsigned char *newkey, unsigned char *oldkey, uid page_no2)
1532 uint nxt = bt->mgr->page_size;
1533 BtPage root = bt->page;
1536 // Obtain an empty page to use, and copy the current
1537 // root contents into it
1539 if( !(new_page = bt_newpage(bt, root)) )
1542 // preserve the page info at the bottom
1543 // and set rest to zero
1545 memset(root+1, 0, bt->mgr->page_size - sizeof(*root));
1547 // insert first key on newroot page
1550 memcpy ((unsigned char *)root + nxt, newkey, *newkey + 1);
1551 bt_putid(slotptr(root, 1)->id, new_page);
1552 slotptr(root, 1)->off = nxt;
1554 // insert second key on newroot page
1555 // and increase the root height
1558 memcpy ((unsigned char *)root + nxt, oldkey, *oldkey + 1);
1559 bt_putid(slotptr(root, 2)->id, page_no2);
1560 slotptr(root, 2)->off = nxt;
1562 bt_putid(root->right, 0);
1563 root->min = nxt; // reset lowest used offset and key count
1568 // release root (bt->page)
1570 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1573 // split already locked full node
1576 BTERR bt_splitpage (BtDb *bt)
1578 uint cnt = 0, idx = 0, max, nxt = bt->mgr->page_size;
1579 unsigned char oldkey[256], lowerkey[256];
1580 uid page_no = bt->page_no, right;
1581 BtPage page = bt->page;
1582 uint lvl = page->lvl;
1587 // split higher half of keys to bt->frame
1588 // the last key (fence key) might be dead
1590 tod = (uint)time(NULL);
1592 memset (bt->frame, 0, bt->mgr->page_size);
1593 max = (int)page->cnt;
1597 while( cnt++ < max ) {
1598 key = keyptr(page, cnt);
1599 nxt -= key->len + 1;
1600 memcpy ((unsigned char *)bt->frame + nxt, key, key->len + 1);
1601 memcpy(slotptr(bt->frame,++idx)->id, slotptr(page,cnt)->id, BtId);
1602 if( !(slotptr(bt->frame, idx)->dead = slotptr(page, cnt)->dead) )
1604 slotptr(bt->frame, idx)->tod = slotptr(page, cnt)->tod;
1605 slotptr(bt->frame, idx)->off = nxt;
1608 // remember existing fence key for new page to the right
1610 memcpy (oldkey, key, key->len + 1);
1612 bt->frame->bits = bt->mgr->page_bits;
1613 bt->frame->min = nxt;
1614 bt->frame->cnt = idx;
1615 bt->frame->lvl = lvl;
1619 if( page_no > ROOT_page ) {
1620 right = bt_getid (page->right);
1621 bt_putid(bt->frame->right, right);
1624 // get new free page and write frame to it.
1626 if( !(new_page = bt_newpage(bt, bt->frame)) )
1629 // update lower keys to continue in old page
1631 memcpy (bt->frame, page, bt->mgr->page_size);
1632 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1633 nxt = bt->mgr->page_size;
1638 // assemble page of smaller keys
1639 // (they're all active keys)
1641 while( cnt++ < max / 2 ) {
1642 key = keyptr(bt->frame, cnt);
1643 nxt -= key->len + 1;
1644 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1645 memcpy(slotptr(page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId);
1646 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1647 slotptr(page, idx)->off = nxt;
1651 // remember fence key for old page
1653 memcpy(lowerkey, key, key->len + 1);
1654 bt_putid(page->right, new_page);
1658 // if current page is the root page, split it
1660 if( page_no == ROOT_page )
1661 return bt_splitroot (bt, lowerkey, oldkey, new_page);
1663 // obtain Parent/Write locks
1664 // for left and right node pages
1666 if( bt_lockpage (bt, new_page, BtLockParent, NULL) )
1669 if( bt_lockpage (bt, page_no, BtLockParent, NULL) )
1672 // release wr lock on left page
1674 if( bt_unlockpage (bt, page_no, BtLockWrite) )
1677 // insert new fence for reformulated left block
1679 if( bt_insertkey (bt, lowerkey+1, *lowerkey, lvl + 1, page_no, tod) )
1682 // fix old fence for newly allocated right block page
1684 if( bt_insertkey (bt, oldkey+1, *oldkey, lvl + 1, new_page, tod) )
1687 // release Parent & Write locks
1689 if( bt_unlockpage (bt, new_page, BtLockParent) )
1692 if( bt_unlockpage (bt, page_no, BtLockParent) )
1698 // Insert new key into the btree at requested level.
1699 // Level zero pages are leaf pages and are unlocked at exit.
1700 // Interior pages remain locked.
1702 BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod)
1709 if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) )
1710 ptr = keyptr(bt->page, slot);
1714 bt->err = BTERR_ovflw;
1718 // if key already exists, update id and return
1722 if( !keycmp (ptr, key, len) ) {
1723 slotptr(page, slot)->dead = 0;
1724 slotptr(page, slot)->tod = tod;
1725 bt_putid(slotptr(page,slot)->id, id);
1726 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1729 // check if page has enough space
1731 if( bt_cleanpage (bt, len) )
1734 if( bt_splitpage (bt) )
1738 // calculate next available slot and copy key into page
1740 page->min -= len + 1; // reset lowest used offset
1741 ((unsigned char *)page)[page->min] = len;
1742 memcpy ((unsigned char *)page + page->min +1, key, len );
1744 for( idx = slot; idx < page->cnt; idx++ )
1745 if( slotptr(page, idx)->dead )
1748 // now insert key into array before slot
1749 // preserving the fence slot
1751 if( idx == page->cnt )
1757 *slotptr(page, idx) = *slotptr(page, idx -1), idx--;
1759 bt_putid(slotptr(page,slot)->id, id);
1760 slotptr(page, slot)->off = page->min;
1761 slotptr(page, slot)->tod = tod;
1762 slotptr(page, slot)->dead = 0;
1764 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1767 // cache page of keys into cursor and return starting slot for given key
1769 uint bt_startkey (BtDb *bt, unsigned char *key, uint len)
1773 // cache page for retrieval
1774 if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) )
1775 memcpy (bt->cursor, bt->page, bt->mgr->page_size);
1776 bt->cursor_page = bt->page_no;
1777 if ( bt_unlockpage(bt, bt->page_no, BtLockRead) )
1783 // return next slot for cursor page
1784 // or slide cursor right into next page
1786 uint bt_nextkey (BtDb *bt, uint slot)
1791 right = bt_getid(bt->cursor->right);
1792 while( slot++ < bt->cursor->cnt )
1793 if( slotptr(bt->cursor,slot)->dead )
1795 else if( right || (slot < bt->cursor->cnt))
1803 bt->cursor_page = right;
1805 if( bt_lockpage(bt, right, BtLockRead, &bt->page) )
1808 memcpy (bt->cursor, bt->page, bt->mgr->page_size);
1810 if ( bt_unlockpage(bt, right, BtLockRead) )
1819 BtKey bt_key(BtDb *bt, uint slot)
1821 return keyptr(bt->cursor, slot);
1824 uid bt_uid(BtDb *bt, uint slot)
1826 return bt_getid(slotptr(bt->cursor,slot)->id);
1829 uint bt_tod(BtDb *bt, uint slot)
1831 return slotptr(bt->cursor,slot)->tod;
1844 // standalone program to index file of keys
1845 // then list them onto std-out
1848 void *index_file (void *arg)
1850 uint __stdcall index_file (void *arg)
1853 int line = 0, found = 0, cnt = 0;
1854 uid next, page_no = LEAF_page; // start on first page of leaves
1855 unsigned char key[256];
1856 ThreadArg *args = arg;
1857 int ch, len = 0, slot;
1864 bt = bt_open (args->mgr);
1867 switch(args->type | 0x20)
1870 fprintf(stderr, "started indexing for %s\n", args->infile);
1871 if( in = fopen (args->infile, "rb") )
1872 while( ch = getc(in), ch != EOF )
1877 if( args->num == 1 )
1878 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
1879 else if( args->num )
1880 sprintf((char *)key+len, "%.9d", line+args->idx * args->num), len += 9;
1882 if( bt_insertkey (bt, key, len, 0, line, *tod) )
1883 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
1886 else if( len < 255 )
1888 fprintf(stderr, "finished %s for %d keys\n", args->infile, line);
1892 fprintf(stderr, "started deleting keys for %s\n", args->infile);
1893 if( in = fopen (args->infile, "rb") )
1894 while( ch = getc(in), ch != EOF )
1898 if( args->num == 1 )
1899 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
1900 else if( args->num )
1901 sprintf((char *)key+len, "%.9d", line+args->idx * args->num), len += 9;
1903 if( bt_deletekey (bt, key, len, 0) )
1904 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
1907 else if( len < 255 )
1909 fprintf(stderr, "finished %s for keys, %d \n", args->infile, line);
1913 fprintf(stderr, "started finding keys for %s\n", args->infile);
1914 if( in = fopen (args->infile, "rb") )
1915 while( ch = getc(in), ch != EOF )
1919 if( args->num == 1 )
1920 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
1921 else if( args->num )
1922 sprintf((char *)key+len, "%.9d", line+args->idx * args->num), len += 9;
1924 if( bt_findkey (bt, key, len) )
1927 fprintf(stderr, "Error %d Syserr %d Line: %d\n", bt->err, errno, line), exit(0);
1930 else if( len < 255 )
1932 fprintf(stderr, "finished %s for %d keys, found %d\n", args->infile, line, found);
1938 fprintf(stderr, "started reading\n");
1940 if( slot = bt_startkey (bt, key, len) )
1943 fprintf(stderr, "Error %d in StartKey. Syserror: %d\n", bt->err, errno), exit(0);
1945 while( slot = bt_nextkey (bt, slot) ) {
1946 ptr = bt_key(bt, slot);
1947 fwrite (ptr->key, ptr->len, 1, stdout);
1948 fputc ('\n', stdout);
1954 fprintf(stderr, "started reading\n");
1957 bt_lockpage (bt, page_no, BtLockRead, &page);
1959 next = bt_getid (page->right);
1960 bt_unlockpage (bt, page_no, BtLockRead);
1961 } while( page_no = next );
1963 cnt--; // remove stopper key
1964 fprintf(stderr, " Total keys read %d\n", cnt);
1976 typedef struct timeval timer;
1978 int main (int argc, char **argv)
1980 int idx, cnt, len, slot, err;
1981 int segsize, bits = 16;
1986 time_t start[1], stop[1];
1999 fprintf (stderr, "Usage: %s idx_file Read/Write/Scan/Delete/Find [page_bits mapped_segments seg_bits line_numbers src_file1 src_file2 ... ]\n", argv[0]);
2000 fprintf (stderr, " where page_bits is the page size in bits\n");
2001 fprintf (stderr, " mapped_segments is the number of mmap segments in buffer pool\n");
2002 fprintf (stderr, " seg_bits is the size of individual segments in buffer pool in pages in bits\n");
2003 fprintf (stderr, " line_numbers set to 1 to append line numbers to input lines\n");
2004 fprintf (stderr, " src_file1 thru src_filen are files of keys separated by newline\n");
2009 gettimeofday(&start, NULL);
2015 bits = atoi(argv[3]);
2018 poolsize = atoi(argv[4]);
2021 fprintf (stderr, "Warning: mapped_pool has no segments\n");
2023 if( poolsize > 65535 )
2024 fprintf (stderr, "Warning: mapped_pool > 65535 segments\n");
2027 segsize = atoi(argv[5]);
2029 segsize = 4; // 16 pages per mmap segment
2032 num = atoi(argv[6]);
2036 threads = malloc (cnt * sizeof(pthread_t));
2038 threads = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, cnt * sizeof(HANDLE));
2040 args = malloc (cnt * sizeof(ThreadArg));
2042 mgr = bt_mgr ((argv[1]), BT_rw, bits, poolsize, segsize, poolsize / 8);
2045 fprintf(stderr, "Index Open Error %s\n", argv[1]);
2051 for( idx = 0; idx < cnt; idx++ ) {
2052 args[idx].infile = argv[idx + 7];
2053 args[idx].type = argv[2][0];
2054 args[idx].mgr = mgr;
2055 args[idx].num = num;
2056 args[idx].idx = idx;
2058 if( err = pthread_create (threads + idx, NULL, index_file, args + idx) )
2059 fprintf(stderr, "Error creating thread %d\n", err);
2061 threads[idx] = (HANDLE)_beginthreadex(NULL, 65536, index_file, args + idx, 0, NULL);
2065 // wait for termination
2068 for( idx = 0; idx < cnt; idx++ )
2069 pthread_join (threads[idx], NULL);
2070 gettimeofday(&stop, NULL);
2071 real_time = 1000.0 * ( stop.tv_sec - start.tv_sec ) + 0.001 * (stop.tv_usec - start.tv_usec );
2073 WaitForMultipleObjects (cnt, threads, TRUE, INFINITE);
2075 for( idx = 0; idx < cnt; idx++ )
2076 CloseHandle(threads[idx]);
2079 real_time = 1000 * (*stop - *start);
2081 fprintf(stderr, " Time to complete: %.2f seconds\n", real_time/1000);