1 // btree version threads2g sched_yield version
4 // author: karl malbrain, malbrain@cal.berkeley.edu
7 This work, including the source code, documentation
8 and related data, is placed into the public domain.
10 The orginal author is Karl Malbrain.
12 THIS SOFTWARE IS PROVIDED AS-IS WITHOUT WARRANTY
13 OF ANY KIND, NOT EVEN THE IMPLIED WARRANTY OF
14 MERCHANTABILITY. THE AUTHOR OF THIS SOFTWARE,
15 ASSUMES _NO_ RESPONSIBILITY FOR ANY CONSEQUENCE
16 RESULTING FROM THE USE, MODIFICATION, OR
17 REDISTRIBUTION OF THIS SOFTWARE.
20 // Please see the project home page for documentation
21 // code.google.com/p/high-concurrency-btree
23 #define _FILE_OFFSET_BITS 64
24 #define _LARGEFILE64_SOURCE
40 #define WIN32_LEAN_AND_MEAN
53 typedef unsigned long long uid;
56 typedef unsigned long long off64_t;
57 typedef unsigned short ushort;
58 typedef unsigned int uint;
61 #define BT_ro 0x6f72 // ro
62 #define BT_rw 0x7772 // rw
64 #define BT_maxbits 24 // maximum page size in bits
65 #define BT_minbits 9 // minimum page size in bits
66 #define BT_minpage (1 << BT_minbits) // minimum page size
67 #define BT_maxpage (1 << BT_maxbits) // maximum page size
70 There are five lock types for each node in three independent sets:
71 1. (set 1) AccessIntent: Sharable. Going to Read the node. Incompatible with NodeDelete.
72 2. (set 1) NodeDelete: Exclusive. About to release the node. Incompatible with AccessIntent.
73 3. (set 2) ReadLock: Sharable. Read the node. Incompatible with WriteLock.
74 4. (set 2) WriteLock: Exclusive. Modify the node. Incompatible with ReadLock and other WriteLocks.
75 5. (set 3) ParentModification: Exclusive. Change the node's parent keys. Incompatible with another ParentModification.
86 // mode & definition for latch implementation
95 // exclusive is set for write access
96 // share is count of read accessors
97 // grant write lock when share == 0
100 volatile ushort mutex:1;
101 volatile ushort exclusive:1;
102 volatile ushort pending:1;
103 volatile ushort share:13;
107 BtLatch readwr[1]; // read/write page lock
108 BtLatch access[1]; // Access Intent/Page delete
109 BtLatch parent[1]; // Parent modification
112 // Define the length of the page and key pointers
116 // Page key slot definition.
118 // If BT_maxbits is 15 or less, you can save 4 bytes
119 // for each key stored by making the first two uints
120 // into ushorts. You can also save 4 bytes by removing
121 // the tod field from the key.
123 // Keys are marked dead, but remain on the page until
124 // it cleanup is called. The fence key (highest key) for
125 // the page is always present, even after cleanup.
128 uint off:BT_maxbits; // page offset for key start
129 uint dead:1; // set for deleted key
130 uint tod; // time-stamp for key
131 unsigned char id[BtId]; // id associated with key
134 // The key structure occupies space at the upper end of
135 // each page. It's a length byte followed by the value
140 unsigned char key[1];
143 // The first part of an index page.
144 // It is immediately followed
145 // by the BtSlot array of keys.
147 typedef struct Page {
148 BtLatchSet latch[1]; // Set of three latches
149 uint cnt; // count of keys in page
150 uint act; // count of active keys
151 uint min; // next key offset
152 unsigned char bits; // page size in bits
153 unsigned char lvl:6; // level of page
154 unsigned char kill:1; // page is being deleted
155 unsigned char dirty:1; // page has deleted keys
156 unsigned char right[BtId]; // page number to right
159 // The memory mapping pool table buffer manager entry
162 unsigned long long int lru; // number of times accessed
163 uid basepage; // mapped base page number
164 char *map; // mapped memory pointer
165 uint slot; // slot index in this array
166 volatile uint pin; // mapped page pin counter
167 void *hashprev; // previous pool entry for the same hash idx
168 void *hashnext; // next pool entry for the same hash idx
174 // The object structure for Btree access
177 uint page_size; // page size
178 uint page_bits; // page size in bits
179 uint seg_bits; // seg size in pages in bits
180 uint mode; // read-write mode
182 char *pooladvise; // bit maps for pool page advisements
187 uint poolcnt; // highest page pool node in use
188 uint poolmax; // highest page pool node allocated
189 uint poolmask; // total size of pages in mmap segment - 1
190 uint hashsize; // size of Hash Table for pool entries
191 volatile uint evicted; // last evicted hash table slot
192 ushort *hash; // pool index for hash entries
193 BtLatch *latch; // latches for hash table slots
194 BtPool *pool; // memory pool page segments
198 BtMgr *mgr; // buffer manager for thread
199 BtPage temp; // temporary frame buffer (memory mapped/file IO)
200 BtPage alloc; // frame buffer for alloc page ( page 0 )
201 BtPage cursor; // cached frame for start/next (never mapped)
202 BtPage frame; // spare frame for the page split (never mapped)
203 BtPage zero; // page frame for zeroes at end of file
204 BtPage page; // current page
205 uid page_no; // current page number
206 uid cursor_page; // current cursor page number
207 unsigned char *mem; // frame, cursor, page memory buffer
208 int err; // last error
222 extern void bt_close (BtDb *bt);
223 extern BtDb *bt_open (BtMgr *mgr);
224 extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod);
225 extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl);
226 extern uid bt_findkey (BtDb *bt, unsigned char *key, uint len);
227 extern uint bt_startkey (BtDb *bt, unsigned char *key, uint len);
228 extern uint bt_nextkey (BtDb *bt, uint slot);
231 extern BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolsize, uint segsize, uint hashsize);
232 void bt_mgrclose (BtMgr *mgr);
234 // Helper functions to return slot values
236 extern BtKey bt_key (BtDb *bt, uint slot);
237 extern uid bt_uid (BtDb *bt, uint slot);
238 extern uint bt_tod (BtDb *bt, uint slot);
240 // BTree page number constants
245 // Number of levels to create in a new BTree
249 // The page is allocated from low and hi ends.
250 // The key offsets and row-id's are allocated
251 // from the bottom, while the text of the key
252 // is allocated from the top. When the two
253 // areas meet, the page is split into two.
255 // A key consists of a length byte, two bytes of
256 // index number (0 - 65534), and up to 253 bytes
257 // of key value. Duplicate keys are discarded.
258 // Associated with each key is a 48 bit row-id.
260 // The b-tree root is always located at page 1.
261 // The first leaf page of level zero is always
262 // located on page 2.
264 // The b-tree pages are linked with next
265 // pointers to facilitate enumerators,
266 // and provide for concurrency.
268 // When to root page fills, it is split in two and
269 // the tree height is raised by a new root at page
270 // one with two keys.
272 // Deleted keys are marked with a dead bit until
273 // page cleanup The fence key for a node is always
274 // present, even after deletion and cleanup.
276 // Groups of pages called segments from the btree are optionally
277 // cached with a memory mapped pool. A hash table is used to keep
278 // track of the cached segments. This behaviour is controlled
279 // by the cache block size parameter to bt_open.
281 // To achieve maximum concurrency one page is locked at a time
282 // as the tree is traversed to find leaf key in question. The right
283 // page numbers are used in cases where the page is being split,
286 // Page 0 is dedicated to lock for new page extensions,
287 // and chains empty pages together for reuse.
289 // The ParentModification lock on a node is obtained to prevent resplitting
290 // or deleting a node before its fence is posted into its upper level.
292 // Empty pages are chained together through the ALLOC page and reused.
294 // Access macros to address slot and key values from the page
296 #define slotptr(page, slot) (((BtSlot *)(page+1)) + (slot-1))
297 #define keyptr(page, slot) ((BtKey)((unsigned char*)(page) + slotptr(page, slot)->off))
299 void bt_putid(unsigned char *dest, uid id)
304 dest[i] = (unsigned char)id, id >>= 8;
307 uid bt_getid(unsigned char *src)
312 for( i = 0; i < BtId; i++ )
313 id <<= 8, id |= *src++;
318 void bt_mgrclose (BtMgr *mgr)
323 // release mapped pages
324 // note that slot zero is never used
326 for( slot = 1; slot < mgr->poolmax; slot++ ) {
327 pool = mgr->pool + slot;
330 munmap (pool->map, (mgr->poolmask+1) << mgr->page_bits);
333 FlushViewOfFile(pool->map, 0);
334 UnmapViewOfFile(pool->map);
335 CloseHandle(pool->hmap);
345 free (mgr->pooladvise);
348 FlushFileBuffers(mgr->idx);
349 CloseHandle(mgr->idx);
350 GlobalFree (mgr->pool);
351 GlobalFree (mgr->hash);
352 GlobalFree (mgr->latch);
357 // close and release memory
359 void bt_close (BtDb *bt)
366 VirtualFree (bt->mem, 0, MEM_RELEASE);
371 // open/create new btree buffer manager
373 // call with file_name, BT_openmode, bits in page size (e.g. 16),
374 // size of mapped page pool (e.g. 8192)
376 BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolmax, uint segsize, uint hashsize)
378 uint lvl, attr, cacheblk, last;
387 SYSTEM_INFO sysinfo[1];
390 // determine sanity of page size and buffer pool
392 if( bits > BT_maxbits )
394 else if( bits < BT_minbits )
398 return NULL; // must have buffer pool
401 mgr = calloc (1, sizeof(BtMgr));
403 switch (mode & 0x7fff)
406 mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666);
412 mgr->idx = open ((char*)name, O_RDONLY);
417 return free(mgr), NULL;
419 cacheblk = 4096; // minimum mmap segment size for unix
422 mgr = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, sizeof(BtMgr));
423 attr = FILE_ATTRIBUTE_NORMAL;
424 switch (mode & 0x7fff)
427 mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL);
433 mgr->idx = CreateFile(name, GENERIC_READ, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_EXISTING, attr, NULL);
437 if( mgr->idx == INVALID_HANDLE_VALUE )
438 return GlobalFree(mgr), NULL;
440 // normalize cacheblk to multiple of sysinfo->dwAllocationGranularity
441 GetSystemInfo(sysinfo);
442 cacheblk = sysinfo->dwAllocationGranularity;
446 alloc = malloc (BT_maxpage);
449 // read minimum page size to get root info
451 if( size = lseek (mgr->idx, 0L, 2) ) {
452 if( pread(mgr->idx, alloc, BT_minpage, 0) == BT_minpage )
455 return free(mgr), free(alloc), NULL;
456 } else if( mode == BT_ro )
457 return bt_mgrclose (mgr), NULL;
459 alloc = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE);
460 size = GetFileSize(mgr->idx, amt);
463 if( !ReadFile(mgr->idx, (char *)alloc, BT_minpage, amt, NULL) )
464 return bt_mgrclose (mgr), NULL;
466 } else if( mode == BT_ro )
467 return bt_mgrclose (mgr), NULL;
470 mgr->page_size = 1 << bits;
471 mgr->page_bits = bits;
473 mgr->poolmax = poolmax;
476 if( cacheblk < mgr->page_size )
477 cacheblk = mgr->page_size;
479 // mask for partial memmaps
481 mgr->poolmask = (cacheblk >> bits) - 1;
483 // see if requested size of pages per memmap is greater
485 if( (1 << segsize) > mgr->poolmask )
486 mgr->poolmask = (1 << segsize) - 1;
490 while( (1 << mgr->seg_bits) <= mgr->poolmask )
493 mgr->hashsize = hashsize;
496 mgr->pool = calloc (poolmax, sizeof(BtPool));
497 mgr->hash = calloc (hashsize, sizeof(ushort));
498 mgr->latch = calloc (hashsize, sizeof(BtLatch));
499 mgr->pooladvise = calloc (poolmax, (mgr->poolmask + 8) / 8);
501 mgr->pool = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, poolmax * sizeof(BtPool));
502 mgr->hash = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(ushort));
503 mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtLatch));
509 // initializes an empty b-tree with root page and page of leaves
511 memset (alloc, 0, 1 << bits);
512 bt_putid(alloc->right, MIN_lvl+1);
513 alloc->bits = mgr->page_bits;
516 if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size )
517 return bt_mgrclose (mgr), NULL;
519 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
520 return bt_mgrclose (mgr), NULL;
522 if( *amt < mgr->page_size )
523 return bt_mgrclose (mgr), NULL;
526 memset (alloc, 0, 1 << bits);
527 alloc->bits = mgr->page_bits;
529 for( lvl=MIN_lvl; lvl--; ) {
530 slotptr(alloc, 1)->off = mgr->page_size - 3;
531 bt_putid(slotptr(alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number
532 key = keyptr(alloc, 1);
533 key->len = 2; // create stopper key
536 alloc->min = mgr->page_size - 3;
541 if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size )
542 return bt_mgrclose (mgr), NULL;
544 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
545 return bt_mgrclose (mgr), NULL;
547 if( *amt < mgr->page_size )
548 return bt_mgrclose (mgr), NULL;
552 // create empty page area by writing last page of first
553 // segment area (other pages are zeroed by O/S)
555 if( mgr->poolmask ) {
556 memset(alloc, 0, mgr->page_size);
557 last = mgr->poolmask;
559 while( last < MIN_lvl + 1 )
560 last += mgr->poolmask + 1;
563 pwrite(mgr->idx, alloc, mgr->page_size, last << mgr->page_bits);
565 SetFilePointer (mgr->idx, last << mgr->page_bits, NULL, FILE_BEGIN);
566 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
567 return bt_mgrclose (mgr), NULL;
568 if( *amt < mgr->page_size )
569 return bt_mgrclose (mgr), NULL;
577 VirtualFree (alloc, 0, MEM_RELEASE);
582 // open BTree access method
583 // based on buffer manager
585 BtDb *bt_open (BtMgr *mgr)
587 BtDb *bt = malloc (sizeof(*bt));
589 memset (bt, 0, sizeof(*bt));
592 bt->mem = malloc (3 *mgr->page_size);
594 bt->mem = VirtualAlloc(NULL, 3 * mgr->page_size, MEM_COMMIT, PAGE_READWRITE);
596 bt->frame = (BtPage)bt->mem;
597 bt->zero = (BtPage)(bt->mem + 1 * mgr->page_size);
598 bt->cursor = (BtPage)(bt->mem + 2 * mgr->page_size);
600 memset (bt->zero, 0, mgr->page_size);
604 // compare two keys, returning > 0, = 0, or < 0
605 // as the comparison value
607 int keycmp (BtKey key1, unsigned char *key2, uint len2)
609 uint len1 = key1->len;
612 if( ans = memcmp (key1->key, key2, len1 > len2 ? len2 : len1) )
625 // wait until write lock mode is clear
626 // and add 1 to the share count
628 void bt_readlock(BtLatch *latch)
633 // obtain latch mutex
635 if( __sync_fetch_and_or((ushort *)latch, Mutex) & Mutex )
638 if( prev = _InterlockedOr16((ushort *)latch, Mutex) & Mutex )
641 // see if exclusive request is granted or pending
643 if( prev = !(latch->exclusive | latch->pending) )
645 __sync_fetch_and_add((ushort *)latch, Share);
647 _InterlockedExchangeAdd16 ((ushort *)latch, Share);
651 __sync_fetch_and_and ((ushort *)latch, ~Mutex);
653 _InterlockedAnd16((ushort *)latch, ~Mutex);
659 } while( sched_yield(), 1 );
661 } while( SwitchToThread(), 1 );
665 // wait for other read and write latches to relinquish
667 void bt_writelock(BtLatch *latch)
673 if( __sync_fetch_and_or((ushort *)latch, Mutex | Pending) & Mutex )
676 if( _InterlockedOr16((ushort *)latch, Mutex | Pending) & Mutex )
679 if( prev = !(latch->share | latch->exclusive) )
681 __sync_fetch_and_or((ushort *)latch, Write);
683 _InterlockedOr16((ushort *)latch, Write);
687 __sync_fetch_and_and ((ushort *)latch, ~(Mutex | Pending));
689 _InterlockedAnd16((ushort *)latch, ~(Mutex | Pending));
695 } while( sched_yield(), 1 );
697 } while( SwitchToThread(), 1 );
701 // try to obtain write lock
703 // return 1 if obtained,
706 int bt_writetry(BtLatch *latch)
711 if( prev = __sync_fetch_and_or((ushort *)latch, Mutex), prev & Mutex )
714 if( prev = _InterlockedOr16((ushort *)latch, Mutex), prev & Mutex )
717 // take write access if all bits are clear
721 __sync_fetch_and_or ((ushort *)latch, Write);
723 _InterlockedOr16((ushort *)latch, Write);
727 __sync_fetch_and_and ((ushort *)latch, ~Mutex);
729 _InterlockedAnd16((ushort *)latch, ~Mutex);
736 void bt_releasewrite(BtLatch *latch)
739 __sync_fetch_and_and ((ushort *)latch, ~Write);
741 _InterlockedAnd16((ushort *)latch, ~Write);
745 // decrement reader count
747 void bt_releaseread(BtLatch *latch)
750 __sync_fetch_and_add((ushort *)latch, -Share);
752 _InterlockedExchangeAdd16 ((ushort *)latch, -Share);
758 // find segment in pool
759 // must be called with hashslot idx locked
760 // return NULL if not there
761 // otherwise return node
763 BtPool *bt_findpool(BtDb *bt, uid page_no, uint idx)
768 // compute start of hash chain in pool
770 if( slot = bt->mgr->hash[idx] )
771 pool = bt->mgr->pool + slot;
775 page_no &= ~bt->mgr->poolmask;
777 while( pool->basepage != page_no )
778 if( pool = pool->hashnext )
786 // add segment to hash table
788 void bt_linkhash(BtDb *bt, BtPool *pool, uid page_no, int idx)
793 pool->hashprev = pool->hashnext = NULL;
794 pool->basepage = page_no & ~bt->mgr->poolmask;
797 if( slot = bt->mgr->hash[idx] ) {
798 node = bt->mgr->pool + slot;
799 pool->hashnext = node;
800 node->hashprev = pool;
803 bt->mgr->hash[idx] = pool->slot;
806 // find best segment to evict from buffer pool
808 BtPool *bt_findlru (BtDb *bt, uint hashslot)
810 unsigned long long int target = ~0LL;
811 BtPool *pool = NULL, *node;
816 node = bt->mgr->pool + hashslot;
818 // scan pool entries under hash table slot
823 if( node->lru > target )
827 } while( node = node->hashnext );
832 // map new buffer pool segment to virtual memory
834 BTERR bt_mapsegment(BtDb *bt, BtPool *pool, uid page_no)
836 off64_t off = (page_no & ~bt->mgr->poolmask) << bt->mgr->page_bits;
837 off64_t limit = off + ((bt->mgr->poolmask+1) << bt->mgr->page_bits);
841 flag = PROT_READ | ( bt->mgr->mode == BT_ro ? 0 : PROT_WRITE );
842 pool->map = mmap (0, (bt->mgr->poolmask+1) << bt->mgr->page_bits, flag, MAP_SHARED, bt->mgr->idx, off);
843 if( pool->map == MAP_FAILED )
844 return bt->err = BTERR_map;
846 // clear out madvise issued bits
847 memset (bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8) / 8), 0, (bt->mgr->poolmask + 8)/8);
849 flag = ( bt->mgr->mode == BT_ro ? PAGE_READONLY : PAGE_READWRITE );
850 pool->hmap = CreateFileMapping(bt->mgr->idx, NULL, flag, (DWORD)(limit >> 32), (DWORD)limit, NULL);
852 return bt->err = BTERR_map;
854 flag = ( bt->mgr->mode == BT_ro ? FILE_MAP_READ : FILE_MAP_WRITE );
855 pool->map = MapViewOfFile(pool->hmap, flag, (DWORD)(off >> 32), (DWORD)off, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
857 return bt->err = BTERR_map;
862 // find or place requested page in segment-pool
863 // return pool table entry, incrementing pin
865 BtPool *bt_pinpage(BtDb *bt, uid page_no)
867 BtPool *pool, *node, *next;
868 uint slot, idx, victim;
870 // lock hash table chain
872 idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
873 bt_readlock (&bt->mgr->latch[idx]);
875 // look up in hash table
877 if( pool = bt_findpool(bt, page_no, idx) ) {
879 __sync_fetch_and_add(&pool->pin, 1);
881 _InterlockedIncrement (&pool->pin);
883 bt_releaseread (&bt->mgr->latch[idx]);
888 // upgrade to write lock
890 bt_releaseread (&bt->mgr->latch[idx]);
891 bt_writelock (&bt->mgr->latch[idx]);
893 // try to find page in pool with write lock
895 if( pool = bt_findpool(bt, page_no, idx) ) {
897 __sync_fetch_and_add(&pool->pin, 1);
899 _InterlockedIncrement (&pool->pin);
901 bt_releasewrite (&bt->mgr->latch[idx]);
906 // allocate a new pool node
907 // and add to hash table
910 slot = __sync_fetch_and_add(&bt->mgr->poolcnt, 1);
912 slot = _InterlockedIncrement (&bt->mgr->poolcnt) - 1;
915 if( ++slot < bt->mgr->poolmax ) {
916 pool = bt->mgr->pool + slot;
919 if( bt_mapsegment(bt, pool, page_no) )
922 bt_linkhash(bt, pool, page_no, idx);
924 __sync_fetch_and_add(&pool->pin, 1);
926 _InterlockedIncrement (&pool->pin);
928 bt_releasewrite (&bt->mgr->latch[idx]);
932 // pool table is full
933 // find best pool entry to evict
936 __sync_fetch_and_add(&bt->mgr->poolcnt, -1);
938 _InterlockedDecrement (&bt->mgr->poolcnt);
943 victim = __sync_fetch_and_add(&bt->mgr->evicted, 1);
945 victim = _InterlockedIncrement (&bt->mgr->evicted) - 1;
947 victim %= bt->mgr->hashsize;
949 // try to get write lock
950 // skip entry if not obtained
952 if( !bt_writetry (&bt->mgr->latch[victim]) )
955 // if pool entry is empty
956 // or any pages are pinned
959 if( !(pool = bt_findlru(bt, bt->mgr->hash[victim])) ) {
960 bt_releasewrite (&bt->mgr->latch[victim]);
964 // unlink victim pool node from hash table
966 if( node = pool->hashprev )
967 node->hashnext = pool->hashnext;
968 else if( node = pool->hashnext )
969 bt->mgr->hash[victim] = node->slot;
971 bt->mgr->hash[victim] = 0;
973 if( node = pool->hashnext )
974 node->hashprev = pool->hashprev;
976 bt_releasewrite (&bt->mgr->latch[victim]);
978 // remove old file mapping
980 munmap (pool->map, (bt->mgr->poolmask+1) << bt->mgr->page_bits);
982 FlushViewOfFile(pool->map, 0);
983 UnmapViewOfFile(pool->map);
984 CloseHandle(pool->hmap);
988 // create new pool mapping
989 // and link into hash table
991 if( bt_mapsegment(bt, pool, page_no) )
994 bt_linkhash(bt, pool, page_no, idx);
996 __sync_fetch_and_add(&pool->pin, 1);
998 _InterlockedIncrement (&pool->pin);
1000 bt_releasewrite (&bt->mgr->latch[idx]);
1005 // place write, read, or parent lock on requested page_no.
1006 // pin to buffer pool and return page pointer
1008 BTERR bt_lockpage(BtDb *bt, uid page_no, BtLock mode, BtPage *pageptr)
1014 // find/create maping in pool table
1015 // and pin our pool slot
1017 if( pool = bt_pinpage(bt, page_no) )
1018 subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping
1022 page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits));
1025 uint idx = subpage / 8;
1026 uint bit = subpage % 8;
1028 if( ~((bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8)/8))[idx] >> bit) & 1 ) {
1029 madvise (page, bt->mgr->page_size, MADV_WILLNEED);
1030 (bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8)/8))[idx] |= 1 << bit;
1037 bt_readlock (page->latch->readwr);
1040 bt_writelock (page->latch->readwr);
1043 bt_readlock (page->latch->access);
1046 bt_writelock (page->latch->access);
1049 bt_writelock (page->latch->parent);
1052 return bt->err = BTERR_lock;
1060 // remove write, read, or parent lock on requested page
1062 BTERR bt_unlockpage(BtDb *bt, uid page_no, BtLock mode)
1068 // since page is pinned
1069 // it should still be in the buffer pool
1070 // and is in no danger of being a victim for reuse
1072 idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
1073 bt_readlock (&bt->mgr->latch[idx]);
1075 if( pool = bt_findpool(bt, page_no, idx) )
1076 subpage = (uint)(page_no & bt->mgr->poolmask);
1078 return bt->err = BTERR_hash;
1080 bt_releaseread (&bt->mgr->latch[idx]);
1081 page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits));
1085 bt_releaseread (page->latch->readwr);
1088 bt_releasewrite (page->latch->readwr);
1091 bt_releaseread (page->latch->access);
1094 bt_releasewrite (page->latch->access);
1097 bt_releasewrite (page->latch->parent);
1100 return bt->err = BTERR_lock;
1104 __sync_fetch_and_add(&pool->pin, -1);
1106 _InterlockedDecrement (&pool->pin);
1111 // deallocate a deleted page
1112 // place on free chain out of allocator page
1114 BTERR bt_freepage(BtDb *bt, uid page_no)
1116 // obtain delete lock on deleted page
1118 if( bt_lockpage(bt, page_no, BtLockDelete, NULL) )
1121 // obtain write lock on deleted page
1123 if( bt_lockpage(bt, page_no, BtLockWrite, &bt->temp) )
1126 // lock allocation page
1128 if ( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) )
1131 // store chain in second right
1132 bt_putid(bt->temp->right, bt_getid(bt->alloc[1].right));
1133 bt_putid(bt->alloc[1].right, page_no);
1137 if( bt_unlockpage(bt, ALLOC_page, BtLockWrite) )
1140 // remove write lock on deleted node
1142 if( bt_unlockpage(bt, page_no, BtLockWrite) )
1145 // remove delete lock on deleted node
1147 if( bt_unlockpage(bt, page_no, BtLockDelete) )
1153 // allocate a new page and write page into it
1155 uid bt_newpage(BtDb *bt, BtPage page)
1164 if( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) )
1167 // use empty chain first
1168 // else allocate empty page
1170 if( new_page = bt_getid(bt->alloc[1].right) ) {
1171 if( bt_lockpage (bt, new_page, BtLockWrite, &bt->temp) )
1173 bt_putid(bt->alloc[1].right, bt_getid(bt->temp->right));
1174 if( bt_unlockpage (bt, new_page, BtLockWrite) )
1178 new_page = bt_getid(bt->alloc->right);
1179 bt_putid(bt->alloc->right, new_page+1);
1184 // if writing first page of pool block
1185 // expand file thru last page in the block
1187 if( !reuse && (new_page & bt->mgr->poolmask) == 0 )
1188 if( pwrite(bt->mgr->idx, bt->zero, bt->mgr->page_size, (new_page | bt->mgr->poolmask) << bt->mgr->page_bits) < bt->mgr->page_size )
1189 return bt->err = BTERR_wrt, 0;
1191 // unlock page allocation page
1193 if( bt_unlockpage(bt, ALLOC_page, BtLockWrite) )
1196 // bring new page into pool and copy page.
1197 // on Windows, this will extend the file into the new page.
1199 if( bt_lockpage(bt, new_page, BtLockWrite, &pmap) )
1202 // copy source page but leave latch area intact
1204 memcpy((char *)pmap + sizeof(BtLatchSet), (char *)page + sizeof(BtLatchSet), bt->mgr->page_size - sizeof(BtLatchSet));
1206 if( bt_unlockpage (bt, new_page, BtLockWrite) )
1212 // find slot in page for given key at a given level
1214 int bt_findslot (BtDb *bt, unsigned char *key, uint len)
1216 uint diff, higher = bt->page->cnt, low = 1, slot;
1219 // make stopper key an infinite fence value
1221 if( bt_getid (bt->page->right) )
1226 // low is the next candidate, higher is already
1227 // tested as .ge. the given key, loop ends when they meet
1229 while( diff = higher - low ) {
1230 slot = low + ( diff >> 1 );
1231 if( keycmp (keyptr(bt->page, slot), key, len) < 0 )
1234 higher = slot, good++;
1237 // return zero if key is on right link page
1239 return good ? higher : 0;
1242 // find and load page at given level for given key
1243 // leave page rd or wr locked as requested
1245 int bt_loadpage (BtDb *bt, unsigned char *key, uint len, uint lvl, uint lock)
1247 uid page_no = ROOT_page, prevpage = 0;
1248 uint drill = 0xff, slot;
1249 uint mode, prevmode;
1251 // start at root of btree and drill down
1254 // determine lock mode of drill level
1255 mode = (lock == BtLockWrite) && (drill == lvl) ? BtLockWrite : BtLockRead;
1257 bt->page_no = page_no;
1259 // obtain access lock using lock chaining with Access mode
1261 if( page_no > ROOT_page )
1262 if( bt_lockpage(bt, page_no, BtLockAccess, NULL) )
1266 if( bt_unlockpage(bt, prevpage, prevmode) )
1269 // obtain read lock using lock chaining
1270 // and pin page contents
1272 if( bt_lockpage(bt, page_no, mode, &bt->page) )
1275 if( page_no > ROOT_page )
1276 if( bt_unlockpage(bt, page_no, BtLockAccess) )
1279 // re-read and re-lock root after determining actual level of root
1281 if( bt->page->lvl != drill) {
1282 if ( bt->page_no != ROOT_page )
1283 return bt->err = BTERR_struct, 0;
1285 drill = bt->page->lvl;
1287 if( lock == BtLockWrite && drill == lvl )
1288 if( bt_unlockpage(bt, page_no, mode) )
1294 // find key on page at this level
1295 // and descend to requested level
1297 if( !bt->page->kill && (slot = bt_findslot (bt, key, len)) ) {
1301 while( slotptr(bt->page, slot)->dead )
1302 if( slot++ < bt->page->cnt )
1305 page_no = bt_getid(bt->page->right);
1309 page_no = bt_getid(slotptr(bt->page, slot)->id);
1313 // or slide right into next page
1314 // (slide left from deleted page)
1317 page_no = bt_getid(bt->page->right);
1319 // continue down / right using overlapping locks
1320 // to protect pages being killed or split.
1323 prevpage = bt->page_no;
1327 // return error on end of right chain
1329 bt->err = BTERR_struct;
1330 return 0; // return error
1333 // find and delete key on page by marking delete flag bit
1334 // when page becomes empty, delete it
1336 BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl)
1338 unsigned char lowerkey[256], higherkey[256];
1343 if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) )
1344 ptr = keyptr(bt->page, slot);
1348 // if key is found delete it, otherwise ignore request
1350 if( !keycmp (ptr, key, len) )
1351 if( slotptr(bt->page, slot)->dead == 0 ) {
1352 slotptr(bt->page,slot)->dead = 1;
1353 if( slot < bt->page->cnt )
1354 bt->page->dirty = 1;
1358 // return if page is not empty, or it has no right sibling
1360 right = bt_getid(bt->page->right);
1361 page_no = bt->page_no;
1363 if( !right || bt->page->act )
1364 return bt_unlockpage(bt, page_no, BtLockWrite);
1366 // obtain Parent lock over write lock
1368 if( bt_lockpage(bt, page_no, BtLockParent, NULL) )
1371 // keep copy of key to delete
1373 ptr = keyptr(bt->page, bt->page->cnt);
1374 memcpy(lowerkey, ptr, ptr->len + 1);
1376 // lock and map right page
1378 if ( bt_lockpage(bt, right, BtLockWrite, &bt->temp) )
1381 // pull contents of next page into current empty page
1382 memcpy((char *)bt->page + sizeof(BtLatchSet), (char *)bt->temp + sizeof(BtLatchSet), bt->mgr->page_size - sizeof(BtLatchSet));
1384 // keep copy of key to update
1385 ptr = keyptr(bt->temp, bt->temp->cnt);
1386 memcpy(higherkey, ptr, ptr->len + 1);
1388 // Mark right page as deleted and point it to left page
1389 // until we can post updates at higher level.
1391 bt_putid(bt->temp->right, page_no);
1395 if( bt_unlockpage(bt, right, BtLockWrite) )
1397 if( bt_unlockpage(bt, page_no, BtLockWrite) )
1400 // delete old lower key to consolidated node
1402 if( bt_deletekey (bt, lowerkey + 1, *lowerkey, lvl + 1) )
1405 // redirect higher key directly to consolidated node
1407 tod = (uint)time(NULL);
1409 if( bt_insertkey (bt, higherkey+1, *higherkey, lvl + 1, page_no, tod) )
1412 // obtain write lock and
1413 // add right block to free chain
1415 if( bt_freepage (bt, right) )
1418 // remove ParentModify lock
1420 if( bt_unlockpage(bt, page_no, BtLockParent) )
1426 // find key in leaf level and return row-id
1428 uid bt_findkey (BtDb *bt, unsigned char *key, uint len)
1434 if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) )
1435 ptr = keyptr(bt->page, slot);
1439 // if key exists, return row-id
1440 // otherwise return 0
1442 if( ptr->len == len && !memcmp (ptr->key, key, len) )
1443 id = bt_getid(slotptr(bt->page,slot)->id);
1447 if ( bt_unlockpage(bt, bt->page_no, BtLockRead) )
1453 // check page for space available,
1454 // clean if necessary and return
1455 // 0 - page needs splitting
1458 uint bt_cleanpage(BtDb *bt, uint amt)
1460 uint nxt = bt->mgr->page_size;
1461 BtPage page = bt->page;
1462 uint cnt = 0, idx = 0;
1463 uint max = page->cnt;
1466 if( page->min >= (page->cnt+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1469 // skip cleanup if nothing to reclaim
1474 memcpy (bt->frame, page, bt->mgr->page_size);
1476 // skip page info and set rest of page to zero
1478 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1482 while( cnt++ < max ) {
1483 // always leave fence key in list
1484 if( cnt < max && slotptr(bt->frame,cnt)->dead )
1488 key = keyptr(bt->frame, cnt);
1489 nxt -= key->len + 1;
1490 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1493 memcpy(slotptr(page, ++idx)->id, slotptr(bt->frame, cnt)->id, BtId);
1494 if( !(slotptr(page, idx)->dead = slotptr(bt->frame, cnt)->dead) )
1496 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1497 slotptr(page, idx)->off = nxt;
1502 if( page->min >= (page->cnt+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 )
1508 // split the root and raise the height of the btree
1510 BTERR bt_splitroot(BtDb *bt, unsigned char *newkey, unsigned char *oldkey, uid page_no2)
1512 uint nxt = bt->mgr->page_size;
1513 BtPage root = bt->page;
1516 // Obtain an empty page to use, and copy the current
1517 // root contents into it
1519 if( !(new_page = bt_newpage(bt, root)) )
1522 // preserve the page info at the bottom
1523 // and set rest to zero
1525 memset(root+1, 0, bt->mgr->page_size - sizeof(*root));
1527 // insert first key on newroot page
1530 memcpy ((unsigned char *)root + nxt, newkey, *newkey + 1);
1531 bt_putid(slotptr(root, 1)->id, new_page);
1532 slotptr(root, 1)->off = nxt;
1534 // insert second key on newroot page
1535 // and increase the root height
1538 memcpy ((unsigned char *)root + nxt, oldkey, *oldkey + 1);
1539 bt_putid(slotptr(root, 2)->id, page_no2);
1540 slotptr(root, 2)->off = nxt;
1542 bt_putid(root->right, 0);
1543 root->min = nxt; // reset lowest used offset and key count
1548 // release root (bt->page)
1550 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1553 // split already locked full node
1556 BTERR bt_splitpage (BtDb *bt)
1558 uint cnt = 0, idx = 0, max, nxt = bt->mgr->page_size;
1559 unsigned char oldkey[256], lowerkey[256];
1560 uid page_no = bt->page_no, right;
1561 BtPage page = bt->page;
1562 uint lvl = page->lvl;
1567 // split higher half of keys to bt->frame
1568 // the last key (fence key) might be dead
1570 tod = (uint)time(NULL);
1572 memset (bt->frame, 0, bt->mgr->page_size);
1573 max = (int)page->cnt;
1577 while( cnt++ < max ) {
1578 key = keyptr(page, cnt);
1579 nxt -= key->len + 1;
1580 memcpy ((unsigned char *)bt->frame + nxt, key, key->len + 1);
1581 memcpy(slotptr(bt->frame,++idx)->id, slotptr(page,cnt)->id, BtId);
1582 if( !(slotptr(bt->frame, idx)->dead = slotptr(page, cnt)->dead) )
1584 slotptr(bt->frame, idx)->tod = slotptr(page, cnt)->tod;
1585 slotptr(bt->frame, idx)->off = nxt;
1588 // remember existing fence key for new page to the right
1590 memcpy (oldkey, key, key->len + 1);
1592 bt->frame->bits = bt->mgr->page_bits;
1593 bt->frame->min = nxt;
1594 bt->frame->cnt = idx;
1595 bt->frame->lvl = lvl;
1599 if( page_no > ROOT_page ) {
1600 right = bt_getid (page->right);
1601 bt_putid(bt->frame->right, right);
1604 // get new free page and write frame to it.
1606 if( !(new_page = bt_newpage(bt, bt->frame)) )
1609 // update lower keys to continue in old page
1611 memcpy (bt->frame, page, bt->mgr->page_size);
1612 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1613 nxt = bt->mgr->page_size;
1618 // assemble page of smaller keys
1619 // (they're all active keys)
1621 while( cnt++ < max / 2 ) {
1622 key = keyptr(bt->frame, cnt);
1623 nxt -= key->len + 1;
1624 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1625 memcpy(slotptr(page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId);
1626 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1627 slotptr(page, idx)->off = nxt;
1631 // remember fence key for old page
1633 memcpy(lowerkey, key, key->len + 1);
1634 bt_putid(page->right, new_page);
1638 // if current page is the root page, split it
1640 if( page_no == ROOT_page )
1641 return bt_splitroot (bt, lowerkey, oldkey, new_page);
1643 // obtain Parent/Write locks
1644 // for left and right node pages
1646 if( bt_lockpage (bt, new_page, BtLockParent, NULL) )
1649 if( bt_lockpage (bt, page_no, BtLockParent, NULL) )
1652 // release wr lock on left page
1654 if( bt_unlockpage (bt, page_no, BtLockWrite) )
1657 // insert new fence for reformulated left block
1659 if( bt_insertkey (bt, lowerkey+1, *lowerkey, lvl + 1, page_no, tod) )
1662 // fix old fence for newly allocated right block page
1664 if( bt_insertkey (bt, oldkey+1, *oldkey, lvl + 1, new_page, tod) )
1667 // release Parent & Write locks
1669 if( bt_unlockpage (bt, new_page, BtLockParent) )
1672 if( bt_unlockpage (bt, page_no, BtLockParent) )
1678 // Insert new key into the btree at requested level.
1679 // Level zero pages are leaf pages and are unlocked at exit.
1680 // Interior pages remain locked.
1682 BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod)
1689 if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) )
1690 ptr = keyptr(bt->page, slot);
1694 bt->err = BTERR_ovflw;
1698 // if key already exists, update id and return
1702 if( !keycmp (ptr, key, len) ) {
1703 slotptr(page, slot)->dead = 0;
1704 slotptr(page, slot)->tod = tod;
1705 bt_putid(slotptr(page,slot)->id, id);
1706 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1709 // check if page has enough space
1711 if( bt_cleanpage (bt, len) )
1714 if( bt_splitpage (bt) )
1718 // calculate next available slot and copy key into page
1720 page->min -= len + 1; // reset lowest used offset
1721 ((unsigned char *)page)[page->min] = len;
1722 memcpy ((unsigned char *)page + page->min +1, key, len );
1724 for( idx = slot; idx < page->cnt; idx++ )
1725 if( slotptr(page, idx)->dead )
1728 // now insert key into array before slot
1729 // preserving the fence slot
1731 if( idx == page->cnt )
1737 *slotptr(page, idx) = *slotptr(page, idx -1), idx--;
1739 bt_putid(slotptr(page,slot)->id, id);
1740 slotptr(page, slot)->off = page->min;
1741 slotptr(page, slot)->tod = tod;
1742 slotptr(page, slot)->dead = 0;
1744 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1747 // cache page of keys into cursor and return starting slot for given key
1749 uint bt_startkey (BtDb *bt, unsigned char *key, uint len)
1753 // cache page for retrieval
1754 if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) )
1755 memcpy (bt->cursor, bt->page, bt->mgr->page_size);
1756 bt->cursor_page = bt->page_no;
1757 if ( bt_unlockpage(bt, bt->page_no, BtLockRead) )
1763 // return next slot for cursor page
1764 // or slide cursor right into next page
1766 uint bt_nextkey (BtDb *bt, uint slot)
1771 right = bt_getid(bt->cursor->right);
1772 while( slot++ < bt->cursor->cnt )
1773 if( slotptr(bt->cursor,slot)->dead )
1775 else if( right || (slot < bt->cursor->cnt))
1783 bt->cursor_page = right;
1785 if( bt_lockpage(bt, right, BtLockRead, &bt->page) )
1788 memcpy (bt->cursor, bt->page, bt->mgr->page_size);
1790 if ( bt_unlockpage(bt, right, BtLockRead) )
1799 BtKey bt_key(BtDb *bt, uint slot)
1801 return keyptr(bt->cursor, slot);
1804 uid bt_uid(BtDb *bt, uint slot)
1806 return bt_getid(slotptr(bt->cursor,slot)->id);
1809 uint bt_tod(BtDb *bt, uint slot)
1811 return slotptr(bt->cursor,slot)->tod;
1824 // standalone program to index file of keys
1825 // then list them onto std-out
1828 void *index_file (void *arg)
1830 uint __stdcall index_file (void *arg)
1833 int line = 0, found = 0, cnt = 0;
1834 uid next, page_no = LEAF_page; // start on first page of leaves
1835 unsigned char key[256];
1836 ThreadArg *args = arg;
1837 int ch, len = 0, slot;
1844 bt = bt_open (args->mgr);
1847 switch(args->type | 0x20)
1850 fprintf(stderr, "started indexing for %s\n", args->infile);
1851 if( in = fopen (args->infile, "rb") )
1852 while( ch = getc(in), ch != EOF )
1857 if( args->num == 1 )
1858 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
1859 else if( args->num )
1860 sprintf((char *)key+len, "%.9d", line+args->idx * args->num), len += 9;
1862 if( bt_insertkey (bt, key, len, 0, line, *tod) )
1863 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
1866 else if( len < 255 )
1868 fprintf(stderr, "finished %s for %d keys\n", args->infile, line);
1872 fprintf(stderr, "started deleting keys for %s\n", args->infile);
1873 if( in = fopen (args->infile, "rb") )
1874 while( ch = getc(in), ch != EOF )
1878 if( args->num == 1 )
1879 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
1880 else if( args->num )
1881 sprintf((char *)key+len, "%.9d", line+args->idx * args->num), len += 9;
1883 if( bt_deletekey (bt, key, len, 0) )
1884 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
1887 else if( len < 255 )
1889 fprintf(stderr, "finished %s for keys, %d \n", args->infile, line);
1893 fprintf(stderr, "started finding keys for %s\n", args->infile);
1894 if( in = fopen (args->infile, "rb") )
1895 while( ch = getc(in), ch != EOF )
1899 if( args->num == 1 )
1900 sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9;
1901 else if( args->num )
1902 sprintf((char *)key+len, "%.9d", line+args->idx * args->num), len += 9;
1904 if( bt_findkey (bt, key, len) )
1907 fprintf(stderr, "Error %d Syserr %d Line: %d\n", bt->err, errno, line), exit(0);
1910 else if( len < 255 )
1912 fprintf(stderr, "finished %s for %d keys, found %d\n", args->infile, line, found);
1918 fprintf(stderr, "started reading\n");
1920 if( slot = bt_startkey (bt, key, len) )
1923 fprintf(stderr, "Error %d in StartKey. Syserror: %d\n", bt->err, errno), exit(0);
1925 while( slot = bt_nextkey (bt, slot) ) {
1926 ptr = bt_key(bt, slot);
1927 fwrite (ptr->key, ptr->len, 1, stdout);
1928 fputc ('\n', stdout);
1934 fprintf(stderr, "started reading\n");
1937 bt_lockpage (bt, page_no, BtLockRead, &page);
1939 next = bt_getid (page->right);
1940 bt_unlockpage (bt, page_no, BtLockRead);
1941 } while( page_no = next );
1943 cnt--; // remove stopper key
1944 fprintf(stderr, " Total keys read %d\n", cnt);
1956 typedef struct timeval timer;
1958 int main (int argc, char **argv)
1960 int idx, cnt, len, slot, err;
1961 int segsize, bits = 16;
1966 time_t start[1], stop[1];
1979 fprintf (stderr, "Usage: %s idx_file Read/Write/Scan/Delete/Find [page_bits mapped_segments seg_bits line_numbers src_file1 src_file2 ... ]\n", argv[0]);
1980 fprintf (stderr, " where page_bits is the page size in bits\n");
1981 fprintf (stderr, " mapped_segments is the number of mmap segments in buffer pool\n");
1982 fprintf (stderr, " seg_bits is the size of individual segments in buffer pool in pages in bits\n");
1983 fprintf (stderr, " line_numbers set to 1 to append line numbers to input lines\n");
1984 fprintf (stderr, " src_file1 thru src_filen are files of keys separated by newline\n");
1989 gettimeofday(&start, NULL);
1995 bits = atoi(argv[3]);
1998 poolsize = atoi(argv[4]);
2001 fprintf (stderr, "Warning: mapped_pool has no segments\n");
2003 if( poolsize > 65535 )
2004 fprintf (stderr, "Warning: mapped_pool > 65535 segments\n");
2007 segsize = atoi(argv[5]);
2009 segsize = 4; // 16 pages per mmap segment
2012 num = atoi(argv[6]);
2016 threads = malloc (cnt * sizeof(pthread_t));
2018 threads = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, cnt * sizeof(HANDLE));
2020 args = malloc (cnt * sizeof(ThreadArg));
2022 mgr = bt_mgr ((argv[1]), BT_rw, bits, poolsize, segsize, poolsize / 8);
2025 fprintf(stderr, "Index Open Error %s\n", argv[1]);
2031 for( idx = 0; idx < cnt; idx++ ) {
2032 args[idx].infile = argv[idx + 7];
2033 args[idx].type = argv[2][0];
2034 args[idx].mgr = mgr;
2035 args[idx].num = num;
2036 args[idx].idx = idx;
2038 if( err = pthread_create (threads + idx, NULL, index_file, args + idx) )
2039 fprintf(stderr, "Error creating thread %d\n", err);
2041 threads[idx] = (HANDLE)_beginthreadex(NULL, 65536, index_file, args + idx, 0, NULL);
2045 // wait for termination
2048 for( idx = 0; idx < cnt; idx++ )
2049 pthread_join (threads[idx], NULL);
2050 gettimeofday(&stop, NULL);
2051 real_time = 1000.0 * ( stop.tv_sec - start.tv_sec ) + 0.001 * (stop.tv_usec - start.tv_usec );
2053 WaitForMultipleObjects (cnt, threads, TRUE, INFINITE);
2055 for( idx = 0; idx < cnt; idx++ )
2056 CloseHandle(threads[idx]);
2059 real_time = 1000 * (*stop - *start);
2061 fprintf(stderr, " Time to complete: %.2f seconds\n", real_time/1000);