4 // author: karl malbrain, malbrain@cal.berkeley.edu
7 This work, including the source code, documentation
8 and related data, is placed into the public domain.
10 The orginal author is Karl Malbrain.
12 THIS SOFTWARE IS PROVIDED AS-IS WITHOUT WARRANTY
13 OF ANY KIND, NOT EVEN THE IMPLIED WARRANTY OF
14 MERCHANTABILITY. THE AUTHOR OF THIS SOFTWARE,
15 ASSUMES _NO_ RESPONSIBILITY FOR ANY CONSEQUENCE
16 RESULTING FROM THE USE, MODIFICATION, OR
17 REDISTRIBUTION OF THIS SOFTWARE.
20 // Please see the project home page for documentation
21 // code.google.com/p/high-concurrency-btree
23 #define _FILE_OFFSET_BITS 64
24 #define _LARGEFILE64_SOURCE
40 #define WIN32_LEAN_AND_MEAN
52 typedef unsigned long long uid;
55 typedef unsigned long long off64_t;
56 typedef unsigned short ushort;
57 typedef unsigned int uint;
60 #define BT_ro 0x6f72 // ro
61 #define BT_rw 0x7772 // rw
63 #define BT_maxbits 24 // maximum page size in bits
64 #define BT_minbits 9 // minimum page size in bits
65 #define BT_minpage (1 << BT_minbits) // minimum page size
66 #define BT_maxpage (1 << BT_maxbits) // maximum page size
69 There are five lock types for each node in three independent sets:
70 1. (set 1) AccessIntent: Sharable. Going to Read the node. Incompatible with NodeDelete.
71 2. (set 1) NodeDelete: Exclusive. About to release the node. Incompatible with AccessIntent.
72 3. (set 2) ReadLock: Sharable. Read the node. Incompatible with WriteLock.
73 4. (set 2) WriteLock: Exclusive. Modify the node. Incompatible with ReadLock and other WriteLocks.
74 5. (set 3) ParentLock: Exclusive. Have parent adopt/delete maximum foster child from the node.
85 // Define the length of the page and key pointers
89 // Page key slot definition.
91 // If BT_maxbits is 15 or less, you can save 4 bytes
92 // for each key stored by making the first two uints
93 // into ushorts. You can also save 4 bytes by removing
94 // the tod field from the key.
96 // Keys are marked dead, but remain on the page until
97 // it cleanup is called. The fence key (highest key) for
98 // the page is always present, even after cleanup.
101 uint off:BT_maxbits; // page offset for key start
102 uint dead:1; // set for deleted key
103 uint tod; // time-stamp for key
104 unsigned char id[BtId]; // id associated with key
107 // The key structure occupies space at the upper end of
108 // each page. It's a length byte followed by the value
113 unsigned char key[1];
116 // The first part of an index page.
117 // It is immediately followed
118 // by the BtSlot array of keys.
120 typedef struct Page {
121 uint cnt; // count of keys in page
122 uint act; // count of active keys
123 uint min; // next key offset
124 uint foster; // count of foster children
125 unsigned char bits:7; // page size in bits
126 unsigned char kill:1; // page is being deleted
127 unsigned char lvl; // level of page
128 unsigned char right[BtId]; // page number to right
131 // mode & definition for latch table implementation
138 // latch table lock structure
140 // mode is set for write access
141 // share is count of read accessors
142 // grant write lock when share == 0
150 BtLatch readwr[1]; // read/write page lock
151 BtLatch access[1]; // Access Intent/Page delete
152 BtLatch parent[1]; // adoption of foster children
155 // The memory mapping hash table buffer manager entry
158 unsigned long long int lru; // number of times accessed
159 uid basepage; // mapped base page number
160 char *map; // mapped memory pointer
161 uint pin; // mapped page pin counter
162 uint slot; // slot index in this array
163 void *hashprev; // previous cache block for the same hash idx
164 void *hashnext; // next cache block for the same hash idx
168 // array of page latch sets, one for each page in map segment
169 BtLatchSet pagelatch[0];
172 // The object structure for Btree access
175 uint page_size; // page size
176 uint page_bits; // page size in bits
177 uint seg_bits; // seg size in pages in bits
178 uint mode; // read-write mode
184 uint nodecnt; // highest page cache node in use
185 uint nodemax; // highest page cache node allocated
186 uint hashmask; // number of pages in mmap segment
187 uint hashsize; // size of Hash Table
188 uint evicted; // last evicted hash slot
189 ushort *cache; // hash index for memory pool
190 BtLatch *latch; // latches for hash table slots
191 char *nodes; // memory pool page hash nodes
195 BtMgr *mgr; // buffer manager for thread
196 BtPage temp; // temporary frame buffer (memory mapped/file IO)
197 BtPage alloc; // frame buffer for alloc page ( page 0 )
198 BtPage cursor; // cached frame for start/next (never mapped)
199 BtPage frame; // spare frame for the page split (never mapped)
200 BtPage zero; // page frame for zeroes at end of file
201 BtPage page; // current page
202 uid page_no; // current page number
203 uid cursor_page; // current cursor page number
204 unsigned char *mem; // frame, cursor, page memory buffer
205 int err; // last error
220 extern void bt_close (BtDb *bt);
221 extern BtDb *bt_open (BtMgr *mgr);
222 extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uid id, uint tod);
223 extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl);
224 extern uid bt_findkey (BtDb *bt, unsigned char *key, uint len);
225 extern uint bt_startkey (BtDb *bt, unsigned char *key, uint len);
226 extern uint bt_nextkey (BtDb *bt, uint slot);
229 extern BtMgr *bt_mgr (char *name, uint mode, uint bits, uint cacheblk, uint segsize, uint hashsize);
230 void bt_mgrclose (BtMgr *mgr);
232 // Helper functions to return cursor slot values
234 extern BtKey bt_key (BtDb *bt, uint slot);
235 extern uid bt_uid (BtDb *bt, uint slot);
236 extern uint bt_tod (BtDb *bt, uint slot);
238 // BTree page number constants
242 // Number of levels to create in a new BTree
246 // The page is allocated from low and hi ends.
247 // The key offsets and row-id's are allocated
248 // from the bottom, while the text of the key
249 // is allocated from the top. When the two
250 // areas meet, the page is split into two.
252 // A key consists of a length byte, two bytes of
253 // index number (0 - 65534), and up to 253 bytes
254 // of key value. Duplicate keys are discarded.
255 // Associated with each key is a 48 bit row-id.
257 // The b-tree root is always located at page 1.
258 // The first leaf page of level zero is always
259 // located on page 2.
261 // When to root page fills, it is split in two and
262 // the tree height is raised by a new root at page
263 // one with two keys.
265 // Deleted keys are marked with a dead bit until
266 // page cleanup The fence key for a node is always
267 // present, even after deletion and cleanup.
269 // Groups of pages called segments from the btree are
270 // cached with memory mapping. A hash table is used to keep
271 // track of the cached segments. This behaviour is controlled
272 // by the cache block size parameter to bt_open.
274 // To achieve maximum concurrency one page is locked at a time
275 // as the tree is traversed to find leaf key in question.
277 // An adoption traversal leaves the parent node locked as the
278 // tree is traversed to the level in quesiton.
280 // Page 0 is dedicated to lock for new page extensions,
281 // and chains empty pages together for reuse.
283 // Empty pages are chained together through the ALLOC page and reused.
285 // Access macros to address slot and key values from the page
287 #define slotptr(page, slot) (((BtSlot *)(page+1)) + (slot-1))
288 #define keyptr(page, slot) ((BtKey)((unsigned char*)(page) + slotptr(page, slot)->off))
290 void bt_putid(unsigned char *dest, uid id)
295 dest[i] = (unsigned char)id, id >>= 8;
298 uid bt_getid(unsigned char *src)
303 for( i = 0; i < BtId; i++ )
304 id <<= 8, id |= *src++;
309 void bt_mgrclose (BtMgr *mgr)
314 // release mapped pages
316 for( slot = 0; slot < mgr->nodemax; slot++ ) {
317 hash = (BtHash *)(mgr->nodes + slot * (sizeof(BtHash) + (mgr->hashmask + 1) * sizeof(BtLatchSet)));
320 munmap (hash->map, (mgr->hashmask+1) << mgr->page_bits);
323 FlushViewOfFile(hash->map, 0);
324 UnmapViewOfFile(hash->map);
325 CloseHandle(hash->hmap);
336 FlushFileBuffers(mgr->idx);
337 CloseHandle(mgr->idx);
338 GlobalFree (mgr->nodes);
339 GlobalFree (mgr->cache);
340 GlobalFree (mgr->latch);
344 // close and release memory
346 void bt_close (BtDb *bt)
354 VirtualFree (bt->mem, 0, MEM_RELEASE);
359 // open/create new btree buffer manager
361 // call with file_name, BT_openmode, bits in page size (e.g. 16),
362 // size of mapped page cache (e.g. 8192)
364 BtMgr *bt_mgr (char *name, uint mode, uint bits, uint nodemax, uint segsize, uint hashsize)
366 uint lvl, attr, cacheblk, last;
375 SYSTEM_INFO sysinfo[1];
378 // determine sanity of page size and buffer pool
380 if( bits > BT_maxbits )
382 else if( bits < BT_minbits )
386 return NULL; // must have buffer pool
389 mgr = calloc (1, sizeof(BtMgr));
391 switch (mode & 0x7fff)
394 mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666);
400 mgr->idx = open ((char*)name, O_RDONLY);
405 return free(mgr), NULL;
407 cacheblk = 4096; // minimum mmap segment size for unix
410 mgr = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, sizeof(BtMgr));
411 attr = FILE_ATTRIBUTE_NORMAL;
412 switch (mode & 0x7fff)
415 mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL);
421 mgr->idx = CreateFile(name, GENERIC_READ, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_EXISTING, attr, NULL);
425 if( mgr->idx == INVALID_HANDLE_VALUE )
426 return GlobalFree(mgr), NULL;
428 // normalize cacheblk to multiple of sysinfo->dwAllocationGranularity
429 GetSystemInfo(sysinfo);
430 cacheblk = sysinfo->dwAllocationGranularity;
434 alloc = malloc (BT_maxpage);
437 // read minimum page size to get root info
439 if( size = lseek (mgr->idx, 0L, 2) ) {
440 if( pread(mgr->idx, alloc, BT_minpage, 0) == BT_minpage )
443 return free(mgr), free(alloc), NULL;
444 } else if( mode == BT_ro )
445 return bt_mgrclose (mgr), NULL;
447 alloc = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE);
448 size = GetFileSize(mgr->idx, amt);
451 if( !ReadFile(mgr->idx, (char *)alloc, BT_minpage, amt, NULL) )
452 return bt_mgrclose (mgr), NULL;
454 } else if( mode == BT_ro )
455 return bt_mgrclose (mgr), NULL;
458 mgr->page_size = 1 << bits;
459 mgr->page_bits = bits;
461 mgr->nodemax = nodemax;
464 if( cacheblk < mgr->page_size )
465 cacheblk = mgr->page_size;
467 // mask for partial memmaps
469 mgr->hashmask = (cacheblk >> bits) - 1;
471 // see if requested number of pages per memmap is greater
473 if( (1 << segsize) > mgr->hashmask )
474 mgr->hashmask = (1 << segsize) - 1;
478 while( (1 << mgr->seg_bits) <= mgr->hashmask )
481 mgr->hashsize = hashsize;
484 mgr->nodes = calloc (cacheblk, (sizeof(BtHash) + (mgr->hashmask + 1) * sizeof(BtLatchSet)));
485 mgr->cache = calloc (hashsize, sizeof(ushort));
486 mgr->latch = calloc (hashsize, sizeof(BtLatch));
488 mgr->nodes = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, cacheblk * (sizeof(BtHash) + (mgr->hashmask + 1) * sizeof(BtLatchSet)));
489 mgr->cache = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(ushort));
490 mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtLatch));
496 // initializes an empty b-tree with root page and page of leaves
498 memset (alloc, 0, 1 << bits);
499 bt_putid(slotptr(alloc, 2)->id, MIN_lvl+1);
500 alloc->bits = mgr->page_bits;
503 if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size )
504 return bt_mgrclose (mgr), NULL;
506 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
507 return bt_mgrclose (mgr), NULL;
509 if( *amt < mgr->page_size )
510 return bt_mgrclose (mgr), NULL;
513 memset (alloc, 0, 1 << bits);
514 alloc->bits = mgr->page_bits;
516 for( lvl=MIN_lvl; lvl--; ) {
517 slotptr(alloc, 1)->off = mgr->page_size - 3;
518 bt_putid(slotptr(alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number
519 key = keyptr(alloc, 1);
520 key->len = 2; // create stopper key
523 alloc->min = mgr->page_size - 3;
528 if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size )
529 return bt_mgrclose (mgr), NULL;
531 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
532 return bt_mgrclose (mgr), NULL;
534 if( *amt < mgr->page_size )
535 return bt_mgrclose (mgr), NULL;
539 // create empty page area by writing last page of first
540 // cache area (other pages are zeroed by O/S)
542 if( mgr->hashmask ) {
543 memset(alloc, 0, mgr->page_size);
544 last = mgr->hashmask;
546 while( last < MIN_lvl + 1 )
547 last += mgr->hashmask + 1;
550 pwrite(mgr->idx, alloc, mgr->page_size, last << mgr->page_bits);
552 SetFilePointer (mgr->idx, last << mgr->page_bits, NULL, FILE_BEGIN);
553 if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) )
554 return bt_mgrclose (mgr), NULL;
555 if( *amt < mgr->page_size )
556 return bt_mgrclose (mgr), NULL;
564 VirtualFree (alloc, 0, MEM_RELEASE);
569 // open BTree access method
570 // based on buffer manager
572 BtDb *bt_open (BtMgr *mgr)
574 BtDb *bt = malloc (sizeof(*bt));
576 memset (bt, 0, sizeof(*bt));
579 bt->mem = malloc (3 *mgr->page_size);
581 bt->mem = VirtualAlloc(NULL, 3 * mgr->page_size, MEM_COMMIT, PAGE_READWRITE);
583 bt->frame = (BtPage)bt->mem;
584 bt->zero = (BtPage)(bt->mem + 1 * mgr->page_size);
585 bt->cursor = (BtPage)(bt->mem + 2 * mgr->page_size);
589 // compare two keys, returning > 0, = 0, or < 0
590 // as the comparison value
592 int keycmp (BtKey key1, unsigned char *key2, uint len2)
594 uint len1 = key1->len;
597 if( ans = memcmp (key1->key, key2, len1 > len2 ? len2 : len1) )
610 // wait until write lock mode is clear
611 // and add 1 to the share count
613 void bt_readlock(BtLatch *latch)
616 // add one to counter, check write bit
619 if( ~__sync_fetch_and_add((int *)latch, Share) & Write )
622 if( ~InterlockedAdd((int *)latch, Share) & Write )
625 // didn't get latch, reset counter by one
628 __sync_fetch_and_add((int *)latch, -Share);
630 InterlockedAdd ((int *)latch, -Share);
642 // wait for other read and write latches to relinquish
644 void bt_writelock(BtLatch *latch)
649 // see if we can get write access
652 prev = __sync_fetch_and_or((int *)latch, Write);
654 prev = InterlockedOr((int *)latch, Write);
660 if( !(prev >> 1) && ours )
673 // try to obtain write lock
675 // return 1 if obtained,
676 // 0 if already write locked
678 int bt_writetry(BtLatch *latch)
683 // see if we can get write access
686 prev = __sync_fetch_and_or((int *)latch, Write);
688 prev = InterlockedOr((int *)latch, Write);
697 if( !(prev >> 1) && ours )
711 void bt_releasewrite(BtLatch *latch)
714 __sync_fetch_and_and((int *)latch, ~Write);
716 InterlockedAnd ((int *)latch, ~Write);
720 // decrement reader count
722 void bt_releaseread(BtLatch *latch)
725 __sync_fetch_and_add((int *)latch, -Share);
727 InterlockedAdd((int *)latch, -Share);
733 // find segment in cache
734 // return NULL if not there
735 // otherwise return node
737 BtHash *bt_findhash(BtDb *bt, uid page_no, uint idx)
742 // compute cache block first page and hash idx
744 if( slot = bt->mgr->cache[idx] )
745 hash = (BtHash *)(bt->mgr->nodes + slot * (sizeof(BtHash) + (bt->mgr->hashmask + 1) * sizeof(BtLatchSet)));
749 page_no &= ~bt->mgr->hashmask;
751 while( hash->basepage != page_no )
752 if( hash = hash->hashnext )
760 // add segment to hash table
762 void bt_linkhash(BtDb *bt, BtHash *hash, uid page_no, int idx)
767 hash->hashprev = hash->hashnext = NULL;
768 hash->basepage = page_no & ~bt->mgr->hashmask;
772 if( slot = bt->mgr->cache[idx] ) {
773 node = (BtHash *)(bt->mgr->nodes + slot * (sizeof(BtHash) + (bt->mgr->hashmask + 1) * sizeof(BtLatchSet)));
774 hash->hashnext = node;
775 node->hashprev = hash;
778 bt->mgr->cache[idx] = hash->slot;
781 // find best segment to evict from buffer pool
783 BtHash *bt_findlru (BtDb *bt, uint slot)
785 unsigned long long int target = ~0LL;
786 BtHash *hash = NULL, *node;
791 node = (BtHash *)(bt->mgr->nodes + slot * (sizeof(BtHash) + (bt->mgr->hashmask + 1) * sizeof(BtLatchSet)));
796 if( node->lru > target )
800 } while( node = node->hashnext );
805 // map new segment to virtual memory
807 BTERR bt_mapsegment(BtDb *bt, BtHash *hash, uid page_no)
809 off64_t off = (page_no & ~bt->mgr->hashmask) << bt->mgr->page_bits;
810 off64_t limit = off + ((bt->mgr->hashmask+1) << bt->mgr->page_bits);
814 flag = PROT_READ | ( bt->mgr->mode == BT_ro ? 0 : PROT_WRITE );
815 hash->map = mmap (0, (bt->mgr->hashmask+1) << bt->mgr->page_bits, flag, MAP_SHARED, bt->mgr->idx, off);
816 if( hash->map == MAP_FAILED )
817 return bt->err = BTERR_map;
819 flag = ( bt->mgr->mode == BT_ro ? PAGE_READONLY : PAGE_READWRITE );
820 hash->hmap = CreateFileMapping(bt->mgr->idx, NULL, flag, (DWORD)(limit >> 32), (DWORD)limit, NULL);
822 return bt->err = BTERR_map;
824 flag = ( bt->mgr->mode == BT_ro ? FILE_MAP_READ : FILE_MAP_WRITE );
825 hash->map = MapViewOfFile(hash->hmap, flag, (DWORD)(off >> 32), (DWORD)off, (bt->mgr->hashmask+1) << bt->mgr->page_bits);
827 return bt->err = BTERR_map;
832 // find or place requested page in segment-cache
833 // return hash table entry
835 BtHash *bt_hashpage(BtDb *bt, uid page_no)
837 BtHash *hash, *node, *next;
838 uint slot, idx, victim;
841 // lock hash table chain
843 idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
844 bt_readlock (&bt->mgr->latch[idx]);
846 // look up in hash table
848 if( hash = bt_findhash(bt, page_no, idx) ) {
850 __sync_fetch_and_add(&hash->pin, 1);
852 InterlockedIncrement (&hash->pin);
854 bt_releaseread (&bt->mgr->latch[idx]);
859 // upgrade to write lock
861 bt_releaseread (&bt->mgr->latch[idx]);
862 bt_writelock (&bt->mgr->latch[idx]);
864 // try to find page in cache with write lock
866 if( hash = bt_findhash(bt, page_no, idx) ) {
868 __sync_fetch_and_add(&hash->pin, 1);
870 InterlockedIncrement (&hash->pin);
872 bt_releasewrite (&bt->mgr->latch[idx]);
877 // allocate a new hash node
878 // and add to hash table
881 slot = __sync_fetch_and_add(&bt->mgr->nodecnt, 1);
883 slot = InterlockedIncrement (&bt->mgr->nodecnt) - 1;
886 if( ++slot < bt->mgr->nodemax ) {
887 hash = (BtHash *)(bt->mgr->nodes + slot * (sizeof(BtHash) + (bt->mgr->hashmask + 1) * sizeof(BtLatchSet)));
890 if( bt_mapsegment(bt, hash, page_no) )
893 bt_linkhash(bt, hash, page_no, idx);
894 bt_releasewrite (&bt->mgr->latch[idx]);
898 // hash table is full
899 // find best cache entry to evict
902 __sync_fetch_and_add(&bt->mgr->nodecnt, -1);
904 InterlockedDecrement (&bt->mgr->nodecnt);
909 victim = __sync_fetch_and_add(&bt->mgr->evicted, 1);
911 victim = InterlockedIncrement (&bt->mgr->evicted) - 1;
913 victim %= bt->mgr->hashsize;
915 // try to get write lock
916 // skip entry if not obtained
918 if( !bt_writetry (&bt->mgr->latch[victim]) )
921 // if cache entry is empty
922 // or no slots are unpinned
925 if( !(hash = bt_findlru(bt, bt->mgr->cache[victim])) ) {
926 bt_releasewrite (&bt->mgr->latch[victim]);
930 // unlink victim hash node from hash table
932 if( node = hash->hashprev )
933 node->hashnext = hash->hashnext;
934 else if( node = hash->hashnext )
935 bt->mgr->cache[victim] = node->slot;
937 bt->mgr->cache[victim] = 0;
939 if( node = hash->hashnext )
940 node->hashprev = hash->hashprev;
942 // remove old file mapping
944 munmap (hash->map, (bt->mgr->hashmask+1) << bt->mgr->page_bits);
946 FlushViewOfFile(hash->map, 0);
947 UnmapViewOfFile(hash->map);
948 CloseHandle(hash->hmap);
951 bt_releasewrite (&bt->mgr->latch[victim]);
953 // create new file mapping
954 // and link into hash table
956 if( bt_mapsegment(bt, hash, page_no) )
959 bt_linkhash(bt, hash, page_no, idx);
960 bt_releasewrite (&bt->mgr->latch[idx]);
965 // place write, read, or parent lock on requested page_no.
966 // pin to buffer pool
968 BTERR bt_lockpage(BtDb *bt, uid page_no, BtLock mode, BtPage *page)
974 // find/create maping in hash table
976 if( hash = bt_hashpage(bt, page_no) )
977 subpage = (uint)(page_no & bt->mgr->hashmask); // page within mapping
981 set = hash->pagelatch + subpage;
985 bt_readlock (set->readwr);
988 bt_writelock (set->readwr);
991 bt_readlock (set->access);
994 bt_writelock (set->access);
997 bt_writelock (set->parent);
1000 return bt->err = BTERR_lock;
1004 *page = (BtPage)(hash->map + (subpage << bt->mgr->page_bits));
1009 // remove write, read, or parent lock on requested page_no.
1011 BTERR bt_unlockpage(BtDb *bt, uid page_no, BtLock mode)
1017 // since page is pinned
1018 // it should still be in the buffer pool
1020 idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize;
1021 bt_readlock (&bt->mgr->latch[idx]);
1023 if( hash = bt_findhash(bt, page_no, idx) )
1024 subpage = (uint)(page_no & bt->mgr->hashmask);
1026 return bt->err = BTERR_hash;
1028 bt_releaseread (&bt->mgr->latch[idx]);
1029 set = hash->pagelatch + subpage;
1033 bt_releaseread (set->readwr);
1036 bt_releasewrite (set->readwr);
1039 bt_releaseread (set->access);
1042 bt_releasewrite (set->access);
1045 bt_releasewrite (set->parent);
1048 return bt->err = BTERR_lock;
1052 __sync_fetch_and_add(&hash->pin, -1);
1054 InterlockedDecrement (&hash->pin);
1059 // deallocate a deleted page that has no tree pointers
1060 // place on free chain out of allocator page
1062 BTERR bt_freepage(BtDb *bt, uid page_no)
1064 // obtain delete lock on deleted page
1066 if( bt_lockpage(bt, page_no, BtLockDelete, NULL) )
1069 // obtain write lock on deleted page
1071 if( bt_lockpage(bt, page_no, BtLockWrite, &bt->temp) )
1074 // lock allocation page
1076 if ( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) )
1079 // store chain in first key
1080 bt_putid(slotptr(bt->temp, 1)->id, bt_getid(slotptr(bt->alloc, 1)->id));
1081 bt_putid(slotptr(bt->alloc, 1)->id, page_no);
1085 if( bt_unlockpage(bt, ALLOC_page, BtLockWrite) )
1088 // remove write lock on deleted node
1090 if( bt_unlockpage(bt, page_no, BtLockWrite) )
1093 // remove delete lock on deleted node
1095 if( bt_unlockpage(bt, page_no, BtLockDelete) )
1101 // allocate a new page and write page into it
1103 uid bt_newpage(BtDb *bt, BtPage page)
1111 if ( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) )
1114 // use empty chain first
1115 // else allocate empty page
1117 if( new_page = bt_getid(slotptr(bt->alloc, 1)->id) ) {
1118 if( bt_lockpage (bt, new_page, BtLockWrite, &bt->temp) )
1120 bt_putid(slotptr(bt->alloc, 1)->id, bt_getid(slotptr(bt->temp, 1)->id));
1121 if( bt_unlockpage (bt, new_page, BtLockWrite) )
1125 new_page = bt_getid(slotptr(bt->alloc, 2)->id);
1126 bt_putid(slotptr(bt->alloc, 2)->id, new_page+1);
1130 if ( pwrite(bt->mgr->idx, page, bt->mgr->page_size, new_page << bt->mgr->page_bits) < bt->mgr->page_size )
1131 return bt->err = BTERR_wrt, 0;
1133 // if writing first page of hash block, zero last page in the block
1135 if ( !reuse && bt->mgr->hashmask > 0 && (new_page & bt->mgr->hashmask) == 0 )
1137 // use zero buffer to write zeros
1138 memset(bt->zero, 0, bt->mgr->page_size);
1139 if ( pwrite(bt->mgr->idx,bt->zero, bt->mgr->page_size, (new_page | bt->mgr->hashmask) << bt->mgr->page_bits) < bt->mgr->page_size )
1140 return bt->err = BTERR_wrt, 0;
1143 // bring new page into page-cache and copy page.
1144 // this will extend the file into the new pages.
1146 if( bt_lockpage(bt, new_page, BtLockWrite, &pmap) )
1149 memcpy(pmap, page, bt->mgr->page_size);
1151 if( bt_unlockpage (bt, new_page, BtLockWrite) )
1156 if ( bt_unlockpage(bt, ALLOC_page, BtLockWrite) )
1162 // find slot in page for given key at a given level
1164 int bt_findslot (BtDb *bt, unsigned char *key, uint len)
1166 uint diff, higher = bt->page->cnt, low = 1, slot;
1168 // low is the lowest candidate, higher is already
1169 // tested as .ge. the given key, loop ends when they meet
1171 while( diff = higher - low ) {
1172 slot = low + ( diff >> 1 );
1173 if( keycmp (keyptr(bt->page, slot), key, len) < 0 )
1182 // find and load page at given level for given key
1183 // leave page rd or wr locked as requested
1185 int bt_loadpage (BtDb *bt, unsigned char *key, uint len, uint lvl, uint lock)
1187 uid page_no = ROOT_page, prevpage = 0;
1188 uint drill = 0xff, slot;
1189 uint mode, prevmode;
1191 // start at root of btree and drill down
1194 // determine lock mode of drill level
1195 mode = (lock == BtLockWrite) && (drill == lvl) ? BtLockWrite : BtLockRead;
1197 bt->page_no = page_no;
1199 // obtain access lock using lock chaining with Access mode
1201 if( page_no > ROOT_page )
1202 if( bt_lockpage(bt, page_no, BtLockAccess, NULL) )
1206 if( bt_unlockpage(bt, prevpage, prevmode) )
1209 // obtain read lock using lock chaining
1210 // and pin page contents
1212 if( bt_lockpage(bt, page_no, mode, &bt->page) )
1215 if( page_no > ROOT_page )
1216 if( bt_unlockpage(bt, page_no, BtLockAccess) )
1219 // re-read and re-lock root after determining actual level of root
1221 if( bt->page_no == ROOT_page )
1222 if( bt->page->lvl != drill) {
1223 drill = bt->page->lvl;
1225 if( lock == BtLockWrite && drill == lvl )
1226 if( bt_unlockpage(bt, page_no, mode) )
1232 // if page is being deleted,
1233 // move back to preceeding page
1235 if( bt->page->kill ) {
1236 page_no = bt_getid (bt->page->right);
1240 // find key on page at this level
1241 // and descend to requested level
1243 slot = bt_findslot (bt, key, len);
1245 // is this slot a foster child?
1247 if( slot <= bt->page->cnt - bt->page->foster )
1253 while( slotptr(bt->page, slot)->dead )
1254 if( slot++ < bt->page->cnt )
1257 return bt->err = BTERR_struct, 0;
1259 // continue down / right using overlapping locks
1260 // to protect pages being killed or split.
1263 prevpage = bt->page_no;
1264 page_no = bt_getid(slotptr(bt->page, slot)->id);
1267 // return error on end of chain
1269 bt->err = BTERR_struct;
1270 return 0; // return error
1273 // find and delete key on page by marking delete flag bit
1274 // when page becomes empty, delete it from the btree
1276 BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl)
1278 unsigned char leftkey[256], rightkey[256];
1283 if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) )
1284 ptr = keyptr(bt->page, slot);
1288 // if key is found delete it, otherwise ignore request
1290 if( !keycmp (ptr, key, len) )
1291 if( slotptr(bt->page, slot)->dead == 0 )
1292 slotptr(bt->page,slot)->dead = 1, bt->page->act--;
1294 // return if page is not empty, or it has no right sibling
1296 right = bt_getid(bt->page->right);
1297 page_no = bt->page_no;
1299 if( !right || bt->page->act )
1300 return bt_unlockpage(bt, page_no, BtLockWrite);
1302 // obtain Parent lock over write lock
1304 if( bt_lockpage(bt, page_no, BtLockParent, NULL) )
1307 // cache copy of key to delete
1309 ptr = keyptr(bt->page, bt->page->cnt);
1310 memcpy(leftkey, ptr, ptr->len + 1);
1312 // lock and map right page
1314 if ( bt_lockpage(bt, right, BtLockWrite, &bt->temp) )
1317 // pull contents of next page into current empty page
1318 memcpy (bt->page, bt->temp, bt->mgr->page_size);
1320 // cache copy of key to update
1321 ptr = keyptr(bt->temp, bt->temp->cnt);
1322 memcpy(rightkey, ptr, ptr->len + 1);
1324 // Mark right page as deleted and point it to left page
1325 // until we can post updates at higher level.
1327 bt_putid(bt->temp->right, page_no);
1331 if( bt_unlockpage(bt, right, BtLockWrite) )
1333 if( bt_unlockpage(bt, page_no, BtLockWrite) )
1336 // delete old lower key to consolidated node
1338 if( bt_deletekey (bt, leftkey + 1, *leftkey, lvl + 1) )
1341 // redirect higher key directly to consolidated node
1343 if( slot = bt_loadpage (bt, rightkey+1, *rightkey, lvl+1, BtLockWrite) )
1344 ptr = keyptr(bt->page, slot);
1348 // since key already exists, update id
1350 if( keycmp (ptr, rightkey+1, *rightkey) )
1351 return bt->err = BTERR_struct;
1353 slotptr(bt->page, slot)->dead = 0;
1354 bt_putid(slotptr(bt->page,slot)->id, page_no);
1355 bt_unlockpage(bt, bt->page_no, BtLockWrite);
1357 // obtain write lock and
1358 // add right block to free chain
1360 if( bt_freepage (bt, right) )
1363 // remove ParentModify lock
1365 if( bt_unlockpage(bt, page_no, BtLockParent) )
1371 // find key in leaf level and return row-id
1373 uid bt_findkey (BtDb *bt, unsigned char *key, uint len)
1379 if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) )
1380 ptr = keyptr(bt->page, slot);
1384 // if key exists, return row-id
1385 // otherwise return 0
1387 if( ptr->len == len && !memcmp (ptr->key, key, len) )
1388 id = bt_getid(slotptr(bt->page,slot)->id);
1392 if ( bt_unlockpage(bt, bt->page_no, BtLockRead) )
1398 void bt_cleanpage(BtDb *bt)
1400 uint nxt = bt->mgr->page_size;
1401 BtPage page = bt->page;
1402 uint cnt = 0, idx = 0;
1403 uint max = page->cnt;
1406 memcpy (bt->frame, page, bt->mgr->page_size);
1408 // skip page info and set rest of page to zero
1409 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1412 // try cleaning up page first
1414 while( cnt++ < max ) {
1415 // always leave fence key in list
1416 if( cnt < max && slotptr(bt->frame,cnt)->dead )
1420 key = keyptr(bt->frame, cnt);
1421 nxt -= key->len + 1;
1422 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1425 memcpy(slotptr(page, ++idx)->id, slotptr(bt->frame, cnt)->id, BtId);
1426 if( !(slotptr(page, idx)->dead = slotptr(bt->frame, cnt)->dead) )
1428 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1429 slotptr(page, idx)->off = nxt;
1436 // return with page unlocked
1438 BTERR bt_addkeytopage (BtDb *bt, uint slot, unsigned char *key, uint len, uid id, uint tod)
1440 BtPage page = bt->page;
1443 // calculate next available slot and copy key into page
1445 page->min -= len + 1;
1446 ((unsigned char *)page)[page->min] = len;
1447 memcpy ((unsigned char *)page + page->min +1, key, len );
1449 for( idx = slot; idx < page->cnt; idx++ )
1450 if( slotptr(page, idx)->dead )
1453 // now insert key into array before slot
1454 // preserving the fence slot
1456 if( idx == page->cnt )
1462 *slotptr(page, idx) = *slotptr(page, idx -1), idx--;
1464 bt_putid(slotptr(page,slot)->id, id);
1465 slotptr(page, slot)->off = page->min;
1466 slotptr(page, slot)->tod = tod;
1467 slotptr(page, slot)->dead = 0;
1469 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1472 // split the root and raise the height of the btree
1474 BTERR bt_splitroot(BtDb *bt, uid right)
1476 uint nxt = bt->mgr->page_size;
1477 unsigned char fencekey[256];
1478 BtPage root = bt->page;
1482 // Obtain an empty page to use, and copy the left page
1483 // contents into it. Strip foster child key.
1484 // Save left fence key.
1489 key = keyptr(bt->page, bt->page->cnt);
1490 memcpy (fencekey, key, key->len + 1);
1492 if( !(new_page = bt_newpage(bt, bt->page)) )
1495 // preserve the page info at the bottom
1496 // and set rest to zero
1498 memset (root+1, 0, bt->mgr->page_size - sizeof(*root));
1500 // insert left fence key on newroot page
1502 nxt -= *fencekey + 1;
1503 memcpy ((unsigned char *)root + nxt, fencekey, *fencekey + 1);
1504 bt_putid(slotptr(root, 1)->id, new_page);
1505 slotptr(root, 1)->off = nxt;
1507 // insert stopper key on newroot page
1508 // and increase the root height
1514 memcpy ((unsigned char *)root + nxt, fencekey, *fencekey + 1);
1515 bt_putid(slotptr(root, 2)->id, right);
1516 slotptr(root, 2)->off = nxt;
1518 bt_putid(root->right, 0);
1519 root->min = nxt; // reset lowest used offset and key count
1524 // release root (bt->page)
1526 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1529 // split already locked full node
1532 BTERR bt_splitpage (BtDb *bt, uint len)
1534 uint slot, cnt, idx, max, nxt = bt->mgr->page_size;
1535 unsigned char fencekey[256];
1536 uid page_no = bt->page_no;
1537 BtPage page = bt->page;
1538 uint tod = time(NULL);
1539 uint lvl = page->lvl;
1540 uid new_page, right;
1547 // return if enough space now
1549 if( page->min >= (page->cnt + 1) * sizeof(BtSlot) + sizeof(*page) + len + 1)
1550 return bt_unlockpage(bt, page_no, BtLockWrite);
1552 // initialize frame buffer
1554 memset (bt->frame, 0, bt->mgr->page_size);
1555 max = page->cnt - page->foster;
1556 tod = (uint)time(NULL);
1560 // split higher half of keys to bt->frame
1561 // leaving foster children in the left node.
1563 while( cnt++ < max ) {
1564 key = keyptr(page, cnt);
1565 nxt -= key->len + 1;
1566 memcpy ((unsigned char *)bt->frame + nxt, key, key->len + 1);
1567 memcpy(slotptr(bt->frame,++idx)->id, slotptr(page,cnt)->id, BtId);
1568 slotptr(bt->frame, idx)->tod = slotptr(page, cnt)->tod;
1569 slotptr(bt->frame, idx)->off = nxt;
1573 // transfer right link node
1575 if( page_no > ROOT_page ) {
1576 right = bt_getid (page->right);
1577 bt_putid(bt->frame->right, right);
1580 bt->frame->bits = bt->mgr->page_bits;
1581 bt->frame->min = nxt;
1582 bt->frame->cnt = idx;
1583 bt->frame->lvl = lvl;
1585 // get new free page and write frame to it.
1587 if( !(new_page = bt_newpage(bt, bt->frame)) )
1590 // update lower keys and foster children to continue in old page
1592 memcpy (bt->frame, page, bt->mgr->page_size);
1593 memset (page+1, 0, bt->mgr->page_size - sizeof(*page));
1594 nxt = bt->mgr->page_size;
1599 // assemble page of smaller keys
1600 // to remain in the old page
1602 while( cnt++ < max / 2 ) {
1603 key = keyptr(bt->frame, cnt);
1604 nxt -= key->len + 1;
1605 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1606 memcpy (slotptr(page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId);
1607 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1608 slotptr(page, idx)->off = nxt;
1612 // assemble old foster child keys
1613 // add new foster child fence
1615 cnt = bt->frame->cnt - bt->frame->foster - 1;
1617 while( cnt++ < bt->frame->cnt ) {
1618 key = keyptr(bt->frame, cnt);
1619 nxt -= key->len + 1;
1620 memcpy ((unsigned char *)page + nxt, key, key->len + 1);
1621 memcpy (slotptr(page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId);
1622 slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod;
1623 slotptr(page, idx)->off = nxt;
1627 // link new right page
1629 bt_putid (page->right, new_page);
1631 // put new page as smallest foster child key
1634 cnt = page->cnt - page->foster++;
1635 bt_putid (slotptr(page,cnt)->id, new_page);
1637 // if current page is the root page, split it
1639 if( page_no == ROOT_page )
1640 return bt_splitroot (bt, new_page);
1642 // release wr lock on page
1644 if( bt_unlockpage (bt, page_no, BtLockWrite) )
1647 // obtain ParentModification lock for current page
1648 // to fix highest foster child on page
1650 if( bt_lockpage (bt, page_no, BtLockParent, NULL) )
1653 if( bt_lockpage (bt, page_no, BtLockRead, &page) )
1656 // get our old fence key
1658 key = keyptr(page, page->cnt);
1659 memcpy (fencekey, key, key->len+1);
1661 // get our new fence key length
1663 key = keyptr(page, page->cnt - 1);
1666 if( bt_unlockpage (bt, page_no, BtLockRead) )
1670 slot = bt_loadpage (bt, fencekey + 1, *fencekey, lvl + 1, BtLockWrite);
1675 // check if parent page has enough space
1677 if( bt->page->min < (bt->page->cnt + 1) * sizeof(BtSlot) + sizeof(*bt->page) + len + 1)
1678 if( bt_splitpage (bt, len) )
1686 // wait for readers from parent get their locks
1688 if( bt_lockpage (bt, page_no, BtLockDelete, NULL) )
1691 if( bt_lockpage (bt, page_no, BtLockWrite, &page) )
1694 // switch parent fence key to foster child
1696 if( slotptr(page, page->cnt)->dead )
1697 slotptr(bt->page, slot)->dead = 1;
1699 bt_putid (slotptr(bt->page, slot)->id, bt_getid(slotptr(page, page->cnt)->id));
1701 // remove foster child from our page
1702 // add our new fence key to parent
1707 key = keyptr(page, page->cnt);
1709 if( bt_addkeytopage (bt, slot, key->key, key->len, page_no, tod) )
1712 if( bt_unlockpage (bt, page_no, BtLockDelete) )
1715 if( bt_unlockpage (bt, page_no, BtLockParent) )
1718 return bt_unlockpage (bt, page_no, BtLockWrite);
1721 // Insert new key into the btree at leaf level.
1723 BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uid id, uint tod)
1730 if( slot = bt_loadpage (bt, key, len, 0, BtLockWrite) )
1731 ptr = keyptr(bt->page, slot);
1735 bt->err = BTERR_ovflw;
1739 // if key already exists, update id and return
1743 if( !keycmp (ptr, key, len) ) {
1744 slotptr(page, slot)->dead = 0;
1745 slotptr(page, slot)->tod = tod;
1746 bt_putid(slotptr(page,slot)->id, id);
1747 return bt_unlockpage(bt, bt->page_no, BtLockWrite);
1750 // check if page has enough space
1752 if( page->min >= (page->cnt + 1) * sizeof(BtSlot) + sizeof(*page) + len + 1)
1755 if( bt_splitpage (bt, len) )
1759 return bt_addkeytopage (bt, slot, key, len, id, tod);
1762 // cache page of keys into cursor and return starting slot for given key
1764 uint bt_startkey (BtDb *bt, unsigned char *key, uint len)
1768 // cache page for retrieval
1769 if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) )
1770 memcpy (bt->cursor, bt->page, bt->mgr->page_size);
1771 bt->cursor_page = bt->page_no;
1772 if ( bt_unlockpage(bt, bt->page_no, BtLockRead) )
1778 // return next slot for cursor page
1779 // or slide cursor right into next page
1781 uint bt_nextkey (BtDb *bt, uint slot)
1787 right = bt_getid(bt->cursor->right);
1788 while( slot++ < bt->cursor->cnt - bt->cursor->foster )
1789 if( slotptr(bt->cursor,slot)->dead )
1791 else if( right || (slot < bt->cursor->cnt - bt->cursor->foster) )
1799 bt->cursor_page = right;
1801 if( bt_lockpage(bt, right, BtLockRead, &page) )
1804 memcpy (bt->cursor, page, bt->mgr->page_size);
1806 if ( bt_unlockpage(bt, right, BtLockRead) )
1815 BtKey bt_key(BtDb *bt, uint slot)
1817 return keyptr(bt->cursor, slot);
1820 uid bt_uid(BtDb *bt, uint slot)
1822 return bt_getid(slotptr(bt->cursor,slot)->id);
1825 uint bt_tod(BtDb *bt, uint slot)
1827 return slotptr(bt->cursor,slot)->tod;
1839 // standalone program to index file of keys
1840 // then list them onto std-out
1843 void *index_file (void *arg)
1845 uint __stdcall index_file (void *arg)
1848 int line = 0, found = 0;
1849 unsigned char key[256];
1850 ThreadArg *args = arg;
1851 int ch, len = 0, slot;
1857 bt = bt_open (args->mgr);
1860 switch(args->type | 0x20)
1863 fprintf(stderr, "started indexing for %s\n", args->infile);
1864 if( in = fopen (args->infile, "rb") )
1865 while( ch = getc(in), ch != EOF )
1868 if( bt_insertkey (bt, key, len, ++line, *tod) )
1869 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
1872 else if( len < 255 )
1874 fprintf(stderr, "finished %s for %d keys\n", args->infile, line);
1878 fprintf(stderr, "started deleting keys for %s\n", args->infile);
1879 if( in = fopen (args->infile, "rb") )
1880 while( ch = getc(in), ch != EOF )
1884 if( bt_deletekey (bt, key, len, 0) )
1885 fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0);
1888 else if( len < 255 )
1890 fprintf(stderr, "finished %s for keys, %d \n", args->infile, line);
1894 fprintf(stderr, "started finding keys for %s\n", args->infile);
1895 if( in = fopen (args->infile, "rb") )
1896 while( ch = getc(in), ch != EOF )
1900 if( bt_findkey (bt, key, len) )
1903 fprintf(stderr, "Error %d Syserr %d Line: %d\n", bt->err, errno, line), exit(0);
1906 else if( len < 255 )
1908 fprintf(stderr, "finished %s for %d keys, found %d\n", args->infile, line, found);
1914 fprintf(stderr, "started reading\n");
1916 if( slot = bt_startkey (bt, key, len) )
1919 fprintf(stderr, "Error %d in StartKey. Syserror: %d\n", bt->err, errno), exit(0);
1921 while( slot = bt_nextkey (bt, slot) ) {
1922 ptr = bt_key(bt, slot);
1923 fwrite (ptr->key, ptr->len, 1, stdout);
1924 fputc ('\n', stdout);
1936 typedef struct timeval timer;
1938 int main (int argc, char **argv)
1940 int idx, cnt, len, slot, err;
1941 int segsize, bits = 16;
1946 time_t start[1], stop[1];
1958 fprintf (stderr, "Usage: %s idx_file Read/Write/Scan/Delete/Find [page_bits mapped_segments seg_bits hash_size src_file1 src_file2 ... ]\n", argv[0]);
1959 fprintf (stderr, " where page_bits is the page size in bits\n");
1960 fprintf (stderr, " mapped_segments is the number of mmap segments in buffer pool\n");
1961 fprintf (stderr, " seg_bits is the size of individual segments in buffer pool in pages in bits\n");
1962 fprintf (stderr, " hash_size is the size of buffer pool hash table\n");
1963 fprintf (stderr, " src_file1 thru src_filen are files of keys separated by newline\n");
1968 gettimeofday(&start, NULL);
1974 bits = atoi(argv[3]);
1977 map = atoi(argv[4]);
1980 fprintf (stderr, "Warning: mapped_pool > 65536 segments\n");
1983 segsize = atoi(argv[5]);
1985 segsize = 4; // 16 pages per mmap segment
1989 threads = malloc (cnt * sizeof(pthread_t));
1991 threads = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, cnt * sizeof(HANDLE));
1993 args = malloc (cnt * sizeof(ThreadArg));
1995 mgr = bt_mgr ((argv[1]), BT_rw, bits, map, segsize, map / 8);
1998 fprintf(stderr, "Index Open Error %s\n", argv[1]);
2004 for( idx = 0; idx < cnt; idx++ ) {
2005 args[idx].infile = argv[idx + 6];
2006 args[idx].type = argv[2][0];
2007 args[idx].mgr = mgr;
2009 if( err = pthread_create (threads + idx, NULL, index_file, args + idx) )
2010 fprintf(stderr, "Error creating thread %d\n", err);
2012 threads[idx] = (HANDLE)_beginthreadex(NULL, 65536, index_file, args + idx, 0, NULL);
2016 // wait for termination
2019 for( idx = 0; idx < cnt; idx++ )
2020 pthread_join (threads[idx], NULL);
2021 gettimeofday(&stop, NULL);
2022 real_time = 1000.0 * ( stop.tv_sec - start.tv_sec ) + 0.001 * (stop.tv_usec - start.tv_usec );
2024 WaitForMultipleObjects (cnt, threads, TRUE, INFINITE);
2026 for( idx = 0; idx < cnt; idx++ )
2027 CloseHandle(threads[idx]);
2030 real_time = 1000 * (*stop - *start);
2032 fprintf(stderr, " Time to complete: %.2f seconds\n", real_time/1000);
2038 fprintf(stderr, "started reading\n");
2040 if( slot = bt_startkey (bt, key, len) )
2043 fprintf(stderr, "Error %d in StartKey. Syserror: %d\n", bt->err, errno), exit(0);
2045 while( slot = bt_nextkey (bt, slot) )
2048 fprintf(stderr, " Total keys read %d\n", cnt);