X-Git-Url: https://pd.if.org/git/?p=btree;a=blobdiff_plain;f=fosterbtreeg.c;h=e43f57677abd410f569741dd144f7f7f31b682b0;hp=fdd42be4f507186b1c078104ce34a2b3525d9f18;hb=0756819e460acb958eeb073f4d5ca9ddf66ec6c7;hpb=81d755dfab3eb03bf41b888a1e64629c3e396854 diff --git a/fosterbtreeg.c b/fosterbtreeg.c index fdd42be..e43f576 100644 --- a/fosterbtreeg.c +++ b/fosterbtreeg.c @@ -1,5 +1,5 @@ // foster btree version g -// 02 JAN 2014 +// 29 JAN 2014 // author: karl malbrain, malbrain@cal.berkeley.edu @@ -25,6 +25,8 @@ REDISTRIBUTION OF THIS SOFTWARE. #ifdef linux #define _GNU_SOURCE +#include +#define SYS_futex 202 #endif #ifdef unix @@ -36,6 +38,7 @@ REDISTRIBUTION OF THIS SOFTWARE. #include #include #include +#include #else #define WIN32_LEAN_AND_MEAN #include @@ -61,6 +64,8 @@ typedef unsigned int uint; #define BT_ro 0x6f72 // ro #define BT_rw 0x7772 // rw +#define BT_latchtable 128 // number of latch manager slots + #define BT_maxbits 24 // maximum page size in bits #define BT_minbits 9 // minimum page size in bits #define BT_minpage (1 << BT_minbits) // minimum page size @@ -95,7 +100,7 @@ typedef enum{ // the tod field from the key. // Keys are marked dead, but remain on the page until -// it cleanup is called. The fence key (highest key) for +// cleanup is called. The fence key (highest key) for // the page is always present, even after cleanup. typedef struct { @@ -114,52 +119,75 @@ typedef struct { unsigned char key[1]; } *BtKey; -// mode & definition for spin latch implementation - -enum { - Mutex = 1, - Write = 2, - Pending = 4, - Share = 8 -} LockMode; - -// mutex locks the other fields -// exclusive is set for write access -// share is count of read accessors - -typedef struct { - volatile ushort mutex:1; - volatile ushort exclusive:1; - volatile ushort pending:1; - volatile ushort share:13; -} BtSpinLatch; - // The first part of an index page. // It is immediately followed // by the BtSlot array of keys. typedef struct Page { - BtSpinLatch readwr[1]; // read/write lock - BtSpinLatch access[1]; // access intent lock - BtSpinLatch parent[1]; // parent SMO lock - ushort foster; // count of foster children - uint cnt; // count of keys in page - uint act; // count of active keys - uint min; // next key offset + volatile uint cnt; // count of keys in page + volatile uint act; // count of active keys + volatile uint min; // next key offset + volatile uint foster; // count of foster children unsigned char bits; // page size in bits - unsigned char lvl:6; // level of page - unsigned char kill:1; // page is being deleted + unsigned char lvl:7; // level of page unsigned char dirty:1; // page needs to be cleaned unsigned char right[BtId]; // page number to right } *BtPage; +// mode & definition for latch implementation + +enum { + Mutex = 1 << 0, // the mutex bit + Write = 1 << 1, // the writers bit + Share = 1 << 2, // reader count + PendRd = 1 << 12, // reader contended count + PendWr = 1 << 22 // writer contended count +} LockMode; + +enum { + QueRd = 1, // reader queue + QueWr = 2 // writer queue +} RWQueue; + +// share is count of read accessors +// grant write lock when share == 0 + +typedef struct { + volatile uint mutex:1; // 1 = busy + volatile uint write:1; // 1 = exclusive + volatile uint share:10; // count of readers holding locks + volatile uint readwait:10; // count of readers waiting + volatile uint writewait:10; // count of writers waiting +} BtLatch; + +// hash table entries + +typedef struct { + BtLatch latch[1]; + volatile ushort slot; // Latch table entry at head of chain +} BtHashEntry; + +// latch manager table structure + +typedef struct { + BtLatch readwr[1]; // read/write page lock + BtLatch access[1]; // Access Intent/Page delete + BtLatch parent[1]; // adoption of foster children + BtLatch busy[1]; // slot is being moved between chains + volatile ushort next; // next entry in hash table chain + volatile ushort prev; // prev entry in hash table chain + volatile ushort pin; // number of outstanding locks + volatile ushort hash; // hash slot entry is under + volatile uid page_no; // latch set page number +} BtLatchSet; + // The memory mapping pool table buffer manager entry typedef struct { unsigned long long int lru; // number of times accessed uid basepage; // mapped base page number char *map; // mapped memory pointer - volatile ushort pin; // mapped page pin counter + ushort pin; // mapped page pin counter ushort slot; // slot index in this array void *hashprev; // previous pool entry for the same hash idx void *hashnext; // next pool entry for the same hash idx @@ -168,6 +196,19 @@ typedef struct { #endif } BtPool; +// structure for latch manager on ALLOC_page + +typedef struct { + struct Page alloc[2]; // next & free page_nos in right ptr + BtLatch lock[1]; // allocation area lite latch + ushort latchdeployed; // highest number of latch entries deployed + ushort nlatchpage; // number of latch pages at BT_latch + ushort latchtotal; // number of page latch entries + ushort latchhash; // number of latch hash table slots + ushort latchvictim; // next latch entry to examine + BtHashEntry table[0]; // the hash table +} BtLatchMgr; + // The object structure for Btree access typedef struct { @@ -177,34 +218,37 @@ typedef struct { uint mode; // read-write mode #ifdef unix int idx; - char *pooladvise; // bit maps for pool page advisements #else HANDLE idx; #endif - volatile ushort poolcnt; // highest page pool node in use - volatile ushort evicted; // last evicted hash table slot + ushort poolcnt; // highest page pool node in use ushort poolmax; // highest page pool node allocated - ushort poolmask; // total size of pages in mmap segment - 1 + ushort poolmask; // total number of pages in mmap segment - 1 ushort hashsize; // size of Hash Table for pool entries + ushort evicted; // last evicted hash table slot ushort *hash; // hash table of pool entries BtPool *pool; // memory pool page segments - BtSpinLatch *latch; // latches for pool hash slots + BtLatch *latch; // latches for pool hash slots + BtLatchMgr *latchmgr; // mapped latch page from allocation page + BtLatchSet *latchsets; // mapped latch set from latch pages #ifndef unix - HANDLE halloc, hlatch; // allocation and latch table handles + HANDLE halloc; // allocation and latch table handle #endif } BtMgr; typedef struct { BtMgr *mgr; // buffer manager for thread - BtPage temp; // temporary frame buffer (memory mapped/file IO) - BtPage alloc; // frame buffer for alloc page ( page 0 ) BtPage cursor; // cached frame for start/next (never mapped) BtPage frame; // spare frame for the page split (never mapped) BtPage zero; // page frame for zeroes at end of file BtPage page; // current page uid page_no; // current page number uid cursor_page; // current cursor page number + BtLatchSet *set; // current page latch set + BtPool *pool; // current page pool unsigned char *mem; // frame, cursor, page memory buffer + int foster; // last search was to foster child + int found; // last delete was found int err; // last error } BtDb; @@ -222,12 +266,17 @@ typedef enum { // B-Tree functions extern void bt_close (BtDb *bt); extern BtDb *bt_open (BtMgr *mgr); -extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uid id, uint tod); -extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl); +extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uid id, uint tod, uint lvl); +extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len); extern uid bt_findkey (BtDb *bt, unsigned char *key, uint len); extern uint bt_startkey (BtDb *bt, unsigned char *key, uint len); extern uint bt_nextkey (BtDb *bt, uint slot); +// internal functions +BTERR bt_splitpage (BtDb *bt, BtPage page, BtPool *pool, BtLatchSet *set, uid page_no); +uint bt_cleanpage(BtDb *bt, BtPage page, uint amt, uint slot); +BTERR bt_mergeleft (BtDb *bt, BtPage page, BtPool *pool, BtLatchSet *set, uid page_no, uint lvl); + // manager functions extern BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolsize, uint segsize, uint hashsize); void bt_mgrclose (BtMgr *mgr); @@ -239,9 +288,10 @@ extern uid bt_uid (BtDb *bt, uint slot); extern uint bt_tod (BtDb *bt, uint slot); // BTree page number constants -#define ALLOC_page 0 // allocation of new pages +#define ALLOC_page 0 // allocation & lock manager hash table #define ROOT_page 1 // root of the btree #define LEAF_page 2 // first page of leaves +#define LATCH_page 3 // pages for lock manager // Number of levels to create in a new BTree @@ -310,129 +360,336 @@ int i; return id; } +// Latch Manager + +int sys_futex(void *addr1, int op, int val1, struct timespec *timeout, void *addr2, int val3) +{ + return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3); +} + // wait until write lock mode is clear // and add 1 to the share count -void bt_spinreadlock(BtSpinLatch *latch) +void bt_spinreadlock(BtLatch *latch, int private) { -ushort prev; +uint prev; - do { -#ifdef unix - while( __sync_fetch_and_or((ushort *)latch, Mutex) & Mutex ) + if( private ) + private = FUTEX_PRIVATE_FLAG; + + while( 1 ) { + // obtain latch mutex + if( __sync_fetch_and_or((uint *)latch, Mutex) & Mutex ) { sched_yield(); -#else - while( _InterlockedOr16((ushort *)latch, Mutex) & Mutex ) - SwitchToThread(); -#endif - // see if exclusive request is granted or pending + continue; + } - if( prev = !(latch->exclusive | latch->pending) ) -#ifdef unix - __sync_fetch_and_add((ushort *)latch, Share); -#else - _InterlockedExchangeAdd16 ((ushort *)latch, Share); -#endif + // wait for writers to clear + // increment read waiters and wait -#ifdef unix - __sync_fetch_and_and ((ushort *)latch, ~Mutex); -#else - _InterlockedAnd16((ushort *)latch, ~Mutex); -#endif + if( latch->write || latch->writewait ) { + __sync_fetch_and_add ((uint *)latch, PendRd); + prev = __sync_fetch_and_and ((uint *)latch, ~Mutex) & ~Mutex; + sys_futex( (uint *)latch, FUTEX_WAIT_BITSET | private, prev, NULL, NULL, QueRd ); + __sync_fetch_and_sub ((uint *)latch, PendRd); + continue; + } + + // increment reader lock count + // and release latch mutex - if( prev ) - return; -#ifdef unix - } while( sched_yield(), 1 ); -#else - } while( SwitchToThread(), 1 ); -#endif + __sync_fetch_and_add ((uint *)latch, Share); + __sync_fetch_and_and ((uint *)latch, ~Mutex); + return; + } } // wait for other read and write latches to relinquish -void bt_spinwritelock(BtSpinLatch *latch) +void bt_spinwritelock(BtLatch *latch, int private) { -ushort prev; +uint prev; - do { -#ifdef unix - while( __sync_fetch_and_or((ushort *)latch, Mutex | Pending) & Mutex ) + if( private ) + private = FUTEX_PRIVATE_FLAG; + + while( 1 ) { + // obtain latch mutex + if( __sync_fetch_and_or((uint *)latch, Mutex) & Mutex ) { sched_yield(); -#else - while( _InterlockedOr16((ushort *)latch, Mutex | Pending) & Mutex ) - SwitchToThread(); -#endif - if( prev = !(latch->share | latch->exclusive) ) + continue; + } + + // wait for write and reader count to clear + + if( latch->write || latch->share ) { + __sync_fetch_and_add ((uint *)latch, PendWr); + prev = __sync_fetch_and_and ((uint *)latch, ~Mutex) & ~Mutex; + sys_futex( (uint *)latch, FUTEX_WAIT_BITSET | private, prev, NULL, NULL, QueWr ); + __sync_fetch_and_sub ((uint *)latch, PendWr); + continue; + } + + // take write mutex + // release latch mutex + + __sync_fetch_and_or ((uint *)latch, Write); + __sync_fetch_and_and ((uint *)latch, ~Mutex); + return; + } +} + +// try to obtain write lock + +// return 1 if obtained, +// 0 otherwise + +int bt_spinwritetry(BtLatch *latch) +{ +int ans; + + // try for mutex, + // abandon request if not taken + + if( __sync_fetch_and_or((uint *)latch, Mutex) & Mutex ) + return 0; + + // see if write mode is available + + if( !latch->write && !latch->share ) { + __sync_fetch_and_or ((uint *)latch, Write); + ans = 1; + } else + ans = 0; + + // release latch mutex + + __sync_fetch_and_and ((uint *)latch, ~Mutex); + return ans; +} + +// clear write lock + +void bt_spinreleasewrite(BtLatch *latch, int private) +{ + if( private ) + private = FUTEX_PRIVATE_FLAG; + + // obtain latch mutex + + while( __sync_fetch_and_or((uint *)latch, Mutex) & Mutex ) + sched_yield(); + + __sync_fetch_and_and ((uint *)latch, ~Write); + + // favor writers + + if( latch->writewait ) + if( sys_futex( (uint *)latch, FUTEX_WAKE_BITSET | private, 1, NULL, NULL, QueWr ) ) + goto wakexit; + + if( latch->readwait ) + sys_futex( (uint *)latch, FUTEX_WAKE_BITSET | private, INT_MAX, NULL, NULL, QueRd ); + + // release latch mutex + +wakexit: + __sync_fetch_and_and ((uint *)latch, ~Mutex); +} + +// decrement reader count + +void bt_spinreleaseread(BtLatch *latch, int private) +{ + if( private ) + private = FUTEX_PRIVATE_FLAG; + + // obtain latch mutex + + while( __sync_fetch_and_or((uint *)latch, Mutex) & Mutex ) + sched_yield(); + + __sync_fetch_and_sub ((uint *)latch, Share); + + // wake waiting writers + + if( !latch->share && latch->writewait ) + sys_futex( (uint *)latch, FUTEX_WAKE_BITSET | private, 1, NULL, NULL, QueWr ); + + // release latch mutex + + __sync_fetch_and_and ((uint *)latch, ~Mutex); +} + +// link latch table entry into latch hash table + +void bt_latchlink (BtDb *bt, ushort hashidx, ushort victim, uid page_no) +{ +BtLatchSet *set = bt->mgr->latchsets + victim; + + if( set->next = bt->mgr->latchmgr->table[hashidx].slot ) + bt->mgr->latchsets[set->next].prev = victim; + + bt->mgr->latchmgr->table[hashidx].slot = victim; + set->page_no = page_no; + set->hash = hashidx; + set->prev = 0; +} + +// release latch pin + +void bt_unpinlatch (BtLatchSet *set) +{ #ifdef unix - __sync_fetch_and_or((ushort *)latch, Write); + __sync_fetch_and_add(&set->pin, -1); #else - _InterlockedOr16((ushort *)latch, Write); + _InterlockedDecrement16 (&set->pin); #endif +} + +// find existing latchset or inspire new one +// return with latchset pinned + +BtLatchSet *bt_pinlatch (BtDb *bt, uid page_no) +{ +ushort hashidx = page_no % bt->mgr->latchmgr->latchhash; +ushort slot, avail = 0, victim, idx; +BtLatchSet *set; + + // obtain read lock on hash table entry + + bt_spinreadlock(bt->mgr->latchmgr->table[hashidx].latch, 0); + + if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do + { + set = bt->mgr->latchsets + slot; + if( page_no == set->page_no ) + break; + } while( slot = set->next ); + if( slot ) { #ifdef unix - __sync_fetch_and_and ((ushort *)latch, ~(Mutex | Pending)); + __sync_fetch_and_add(&set->pin, 1); #else - _InterlockedAnd16((ushort *)latch, ~(Mutex | Pending)); + _InterlockedIncrement16 (&set->pin); #endif + } - if( prev ) - return; -#ifdef unix - sched_yield(); -#else - SwitchToThread(); -#endif - } while( 1 ); -} + bt_spinreleaseread (bt->mgr->latchmgr->table[hashidx].latch, 0); -// try to obtain write lock + if( slot ) + return set; -// return 1 if obtained, -// 0 otherwise + // try again, this time with write lock -int bt_spinwritetry(BtSpinLatch *latch) -{ -ushort prev; + bt_spinwritelock(bt->mgr->latchmgr->table[hashidx].latch, 0); + + if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do + { + set = bt->mgr->latchsets + slot; + if( page_no == set->page_no ) + break; + if( !set->pin && !avail ) + avail = slot; + } while( slot = set->next ); + + // found our entry, or take over an unpinned one + if( slot || (slot = avail) ) { + set = bt->mgr->latchsets + slot; #ifdef unix - if( prev = __sync_fetch_and_or((ushort *)latch, Mutex), prev & Mutex ) - return 0; + __sync_fetch_and_add(&set->pin, 1); #else - if( prev = _InterlockedOr16((ushort *)latch, Mutex), prev & Mutex ) - return 0; + _InterlockedIncrement16 (&set->pin); #endif - // take write access if all bits are clear + set->page_no = page_no; + bt_spinreleasewrite(bt->mgr->latchmgr->table[hashidx].latch, 0); + return set; + } - if( !prev ) + // see if there are any unused entries #ifdef unix - __sync_fetch_and_or ((ushort *)latch, Write); + victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, 1) + 1; #else - _InterlockedOr16((ushort *)latch, Write); + victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchdeployed); #endif - return !prev; -} -// clear write mode + if( victim < bt->mgr->latchmgr->latchtotal ) { + set = bt->mgr->latchsets + victim; +#ifdef unix + __sync_fetch_and_add(&set->pin, 1); +#else + _InterlockedIncrement16 (&set->pin); +#endif + bt_latchlink (bt, hashidx, victim, page_no); + bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch, 0); + return set; + } + +#ifdef unix + victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, -1); +#else + victim = _InterlockedDecrement16 (&bt->mgr->latchmgr->latchdeployed); +#endif + // find and reuse previous lock entry -void bt_spinreleasewrite(BtSpinLatch *latch) -{ + while( 1 ) { #ifdef unix - __sync_fetch_and_and ((ushort *)latch, ~Write); + victim = __sync_fetch_and_add(&bt->mgr->latchmgr->latchvictim, 1); #else - _InterlockedAnd16((ushort *)latch, ~Write); + victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchvictim) - 1; #endif -} + // we don't use slot zero -// decrement reader count + if( victim %= bt->mgr->latchmgr->latchtotal ) + set = bt->mgr->latchsets + victim; + else + continue; -void bt_spinreleaseread(BtSpinLatch *latch) -{ + // take control of our slot + // from other threads + + if( set->pin || !bt_spinwritetry (set->busy) ) + continue; + + idx = set->hash; + + // try to get write lock on hash chain + // skip entry if not obtained + // or has outstanding locks + + if( !bt_spinwritetry (bt->mgr->latchmgr->table[idx].latch) ) { + bt_spinreleasewrite (set->busy, 0); + continue; + } + + if( set->pin ) { + bt_spinreleasewrite (set->busy, 0); + bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch, 0); + continue; + } + + // unlink our available victim from its hash chain + + if( set->prev ) + bt->mgr->latchsets[set->prev].next = set->next; + else + bt->mgr->latchmgr->table[idx].slot = set->next; + + if( set->next ) + bt->mgr->latchsets[set->next].prev = set->prev; + + bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch, 0); #ifdef unix - __sync_fetch_and_add((ushort *)latch, -Share); + __sync_fetch_and_add(&set->pin, 1); #else - _InterlockedExchangeAdd16 ((ushort *)latch, -Share); + _InterlockedIncrement16 (&set->pin); #endif + bt_latchlink (bt, hashidx, victim, page_no); + bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch, 0); + bt_spinreleasewrite (set->busy, 0); + return set; + } } void bt_mgrclose (BtMgr *mgr) @@ -457,17 +714,26 @@ uint slot; #endif } +#ifdef unix + munmap (mgr->latchsets, mgr->latchmgr->nlatchpage * mgr->page_size); + munmap (mgr->latchmgr, mgr->page_size); +#else + FlushViewOfFile(mgr->latchmgr, 0); + UnmapViewOfFile(mgr->latchmgr); + CloseHandle(mgr->halloc); +#endif #ifdef unix close (mgr->idx); free (mgr->pool); free (mgr->hash); - free (mgr->pooladvise); + free (mgr->latch); free (mgr); #else FlushFileBuffers(mgr->idx); CloseHandle(mgr->idx); GlobalFree (mgr->pool); GlobalFree (mgr->hash); + GlobalFree (mgr->latch); GlobalFree (mgr); #endif } @@ -494,12 +760,13 @@ void bt_close (BtDb *bt) BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolmax, uint segsize, uint hashsize) { uint lvl, attr, cacheblk, last, slot, idx; -BtPage alloc; -int lockmode; +uint nlatchpage, latchhash; +BtLatchMgr *latchmgr; off64_t size; uint amt[1]; BtMgr* mgr; BtKey key; +int flag; #ifndef unix SYSTEM_INFO sysinfo[1]; @@ -518,19 +785,8 @@ SYSTEM_INFO sysinfo[1]; #ifdef unix mgr = calloc (1, sizeof(BtMgr)); - switch (mode & 0x7fff) - { - case BT_rw: - mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666); - lockmode = 1; - break; + mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666); - case BT_ro: - default: - mgr->idx = open ((char*)name, O_RDONLY); - lockmode = 0; - break; - } if( mgr->idx == -1 ) return free(mgr), NULL; @@ -539,19 +795,8 @@ SYSTEM_INFO sysinfo[1]; #else mgr = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, sizeof(BtMgr)); attr = FILE_ATTRIBUTE_NORMAL; - switch (mode & 0x7fff) - { - case BT_rw: - mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL); - lockmode = 1; - break; + mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL); - case BT_ro: - default: - mgr->idx = CreateFile(name, GENERIC_READ, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_EXISTING, attr, NULL); - lockmode = 0; - break; - } if( mgr->idx == INVALID_HANDLE_VALUE ) return GlobalFree(mgr), NULL; @@ -561,26 +806,26 @@ SYSTEM_INFO sysinfo[1]; #endif #ifdef unix - alloc = malloc (BT_maxpage); + latchmgr = malloc (BT_maxpage); *amt = 0; // read minimum page size to get root info if( size = lseek (mgr->idx, 0L, 2) ) { - if( pread(mgr->idx, alloc, BT_minpage, 0) == BT_minpage ) - bits = alloc->bits; + if( pread(mgr->idx, latchmgr, BT_minpage, 0) == BT_minpage ) + bits = latchmgr->alloc->bits; else - return free(mgr), free(alloc), NULL; + return free(mgr), free(latchmgr), NULL; } else if( mode == BT_ro ) - return bt_mgrclose (mgr), NULL; + return free(latchmgr), bt_mgrclose (mgr), NULL; #else - alloc = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE); + latchmgr = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE); size = GetFileSize(mgr->idx, amt); if( size || *amt ) { - if( !ReadFile(mgr->idx, (char *)alloc, BT_minpage, amt, NULL) ) + if( !ReadFile(mgr->idx, (char *)latchmgr, BT_minpage, amt, NULL) ) return bt_mgrclose (mgr), NULL; - bits = alloc->bits; + bits = latchmgr->alloc->bits; } else if( mode == BT_ro ) return bt_mgrclose (mgr), NULL; #endif @@ -613,52 +858,68 @@ SYSTEM_INFO sysinfo[1]; #ifdef unix mgr->pool = calloc (poolmax, sizeof(BtPool)); mgr->hash = calloc (hashsize, sizeof(ushort)); - mgr->latch = calloc (hashsize, sizeof(BtSpinLatch)); - mgr->pooladvise = calloc (poolmax, (mgr->poolmask + 8) / 8); + mgr->latch = calloc (hashsize, sizeof(BtLatch)); #else - mgr->pool = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, poolmax * (sizeof(BtPool))); + mgr->pool = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, poolmax * sizeof(BtPool)); mgr->hash = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(ushort)); - mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtSpinLatch)); + mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtLatch)); #endif + if( size || *amt ) - goto mgrxit; + goto mgrlatch; - // initializes an empty b-tree with root page and page of leaves + // initialize an empty b-tree with latch page, root page, page of leaves + // and page(s) of latches - memset (alloc, 0, 1 << bits); - bt_putid(alloc->right, MIN_lvl+1); - alloc->bits = mgr->page_bits; + memset (latchmgr, 0, 1 << bits); + nlatchpage = BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1; + bt_putid(latchmgr->alloc->right, MIN_lvl+1+nlatchpage); + latchmgr->alloc->bits = mgr->page_bits; + + latchmgr->nlatchpage = nlatchpage; + latchmgr->latchtotal = nlatchpage * (mgr->page_size / sizeof(BtLatchSet)); + + // initialize latch manager + + latchhash = (mgr->page_size - sizeof(BtLatchMgr)) / sizeof(BtHashEntry); + + // size of hash table = total number of latchsets + + if( latchhash > latchmgr->latchtotal ) + latchhash = latchmgr->latchtotal; + + latchmgr->latchhash = latchhash; #ifdef unix - if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size ) + if( write (mgr->idx, latchmgr, mgr->page_size) < mgr->page_size ) return bt_mgrclose (mgr), NULL; #else - if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) ) + if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) ) return bt_mgrclose (mgr), NULL; if( *amt < mgr->page_size ) return bt_mgrclose (mgr), NULL; #endif - memset (alloc, 0, 1 << bits); - alloc->bits = mgr->page_bits; + memset (latchmgr, 0, 1 << bits); + latchmgr->alloc->bits = mgr->page_bits; for( lvl=MIN_lvl; lvl--; ) { - slotptr(alloc, 1)->off = mgr->page_size - 3; - bt_putid(slotptr(alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number - key = keyptr(alloc, 1); + slotptr(latchmgr->alloc, 1)->off = mgr->page_size - 3; + bt_putid(slotptr(latchmgr->alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number + key = keyptr(latchmgr->alloc, 1); key->len = 2; // create stopper key key->key[0] = 0xff; key->key[1] = 0xff; - alloc->min = mgr->page_size - 3; - alloc->lvl = lvl; - alloc->cnt = 1; - alloc->act = 1; + latchmgr->alloc->min = mgr->page_size - 3; + latchmgr->alloc->lvl = lvl; + latchmgr->alloc->cnt = 1; + latchmgr->alloc->act = 1; #ifdef unix - if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size ) + if( write (mgr->idx, latchmgr, mgr->page_size) < mgr->page_size ) return bt_mgrclose (mgr), NULL; #else - if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) ) + if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) ) return bt_mgrclose (mgr), NULL; if( *amt < mgr->page_size ) @@ -666,32 +927,52 @@ SYSTEM_INFO sysinfo[1]; #endif } - // create empty page area by writing last page of first - // segment area (other pages are zeroed by O/S) + // clear out latch manager locks + // and rest of pages to round out segment - if( mgr->poolmask ) { - memset(alloc, 0, mgr->page_size); - last = mgr->poolmask; - - while( last < MIN_lvl + 1 ) - last += mgr->poolmask + 1; + memset(latchmgr, 0, mgr->page_size); + last = MIN_lvl + 1; + while( last <= ((MIN_lvl + 1 + nlatchpage) | mgr->poolmask) ) { #ifdef unix - pwrite(mgr->idx, alloc, mgr->page_size, last << mgr->page_bits); + pwrite(mgr->idx, latchmgr, mgr->page_size, last << mgr->page_bits); #else SetFilePointer (mgr->idx, last << mgr->page_bits, NULL, FILE_BEGIN); - if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) ) + if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) ) return bt_mgrclose (mgr), NULL; if( *amt < mgr->page_size ) return bt_mgrclose (mgr), NULL; #endif + last++; } -mgrxit: +mgrlatch: +#ifdef unix + flag = PROT_READ | PROT_WRITE; + mgr->latchmgr = mmap (0, mgr->page_size, flag, MAP_SHARED, mgr->idx, ALLOC_page * mgr->page_size); + if( mgr->latchmgr == MAP_FAILED ) + return bt_mgrclose (mgr), NULL; + mgr->latchsets = (BtLatchSet *)mmap (0, mgr->latchmgr->nlatchpage * mgr->page_size, flag, MAP_SHARED, mgr->idx, LATCH_page * mgr->page_size); + if( mgr->latchsets == MAP_FAILED ) + return bt_mgrclose (mgr), NULL; +#else + flag = PAGE_READWRITE; + mgr->halloc = CreateFileMapping(mgr->idx, NULL, flag, 0, (BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1 + LATCH_page) * mgr->page_size, NULL); + if( !mgr->halloc ) + return bt_mgrclose (mgr), NULL; + + flag = FILE_MAP_WRITE; + mgr->latchmgr = MapViewOfFile(mgr->halloc, flag, 0, 0, (BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1 + LATCH_page) * mgr->page_size); + if( !mgr->latchmgr ) + return GetLastError(), bt_mgrclose (mgr), NULL; + + mgr->latchsets = (void *)((char *)mgr->latchmgr + LATCH_page * mgr->page_size); +#endif + #ifdef unix - free (alloc); + free (latchmgr); #else - VirtualFree (alloc, 0, MEM_RELEASE); + VirtualFree (latchmgr, 0, MEM_RELEASE); #endif return mgr; } @@ -713,6 +994,8 @@ BtDb *bt = malloc (sizeof(*bt)); bt->frame = (BtPage)bt->mem; bt->zero = (BtPage)(bt->mem + 1 * mgr->page_size); bt->cursor = (BtPage)(bt->mem + 2 * mgr->page_size); + + memset(bt->zero, 0, mgr->page_size); return bt; } @@ -824,8 +1107,6 @@ int flag; pool->map = mmap (0, (bt->mgr->poolmask+1) << bt->mgr->page_bits, flag, MAP_SHARED, bt->mgr->idx, off); if( pool->map == MAP_FAILED ) return bt->err = BTERR_map; - // clear out madvise issued bits - memset (bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8) / 8), 0, (bt->mgr->poolmask + 8)/8); #else flag = ( bt->mgr->mode == BT_ro ? PAGE_READONLY : PAGE_READWRITE ); pool->hmap = CreateFileMapping(bt->mgr->idx, NULL, flag, (DWORD)(limit >> 32), (DWORD)limit, NULL); @@ -840,18 +1121,41 @@ int flag; return bt->err = 0; } +// calculate page within pool + +BtPage bt_page (BtDb *bt, BtPool *pool, uid page_no) +{ +uint subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping +BtPage page; + + page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits)); + return page; +} + +// release pool pin + +void bt_unpinpool (BtPool *pool) +{ +#ifdef unix + __sync_fetch_and_add(&pool->pin, -1); +#else + _InterlockedDecrement16 (&pool->pin); +#endif +} + // find or place requested page in segment-pool // return pool table entry, incrementing pin -BtPool *bt_pinpage(BtDb *bt, uid page_no) +BtPool *bt_pinpool(BtDb *bt, uid page_no) { BtPool *pool, *node, *next; uint slot, idx, victim; +BtLatchSet *set; // lock hash table chain idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize; - bt_spinreadlock (&bt->mgr->latch[idx]); + bt_spinreadlock (&bt->mgr->latch[idx], 1); // look up in hash table @@ -861,15 +1165,15 @@ uint slot, idx, victim; #else _InterlockedIncrement16 (&pool->pin); #endif - bt_spinreleaseread (&bt->mgr->latch[idx]); + bt_spinreleaseread (&bt->mgr->latch[idx], 1); pool->lru++; return pool; } // upgrade to write lock - bt_spinreleaseread (&bt->mgr->latch[idx]); - bt_spinwritelock (&bt->mgr->latch[idx]); + bt_spinreleaseread (&bt->mgr->latch[idx], 1); + bt_spinwritelock (&bt->mgr->latch[idx], 1); // try to find page in pool with write lock @@ -879,7 +1183,7 @@ uint slot, idx, victim; #else _InterlockedIncrement16 (&pool->pin); #endif - bt_spinreleasewrite (&bt->mgr->latch[idx]); + bt_spinreleasewrite (&bt->mgr->latch[idx], 1); pool->lru++; return pool; } @@ -906,7 +1210,7 @@ uint slot, idx, victim; #else _InterlockedIncrement16 (&pool->pin); #endif - bt_spinreleasewrite (&bt->mgr->latch[idx]); + bt_spinreleasewrite (&bt->mgr->latch[idx], 1); return pool; } @@ -938,7 +1242,7 @@ uint slot, idx, victim; // skip this entry if( !(pool = bt_findlru(bt, bt->mgr->hash[victim])) ) { - bt_spinreleasewrite (&bt->mgr->latch[victim]); + bt_spinreleasewrite (&bt->mgr->latch[victim], 1); continue; } @@ -954,7 +1258,7 @@ uint slot, idx, victim; if( node = pool->hashnext ) node->hashprev = pool->hashprev; - bt_spinreleasewrite (&bt->mgr->latch[victim]); + bt_spinreleasewrite (&bt->mgr->latch[victim], 1); // remove old file mapping #ifdef unix @@ -978,227 +1282,122 @@ uint slot, idx, victim; #else _InterlockedIncrement16 (&pool->pin); #endif - bt_spinreleasewrite (&bt->mgr->latch[idx]); + bt_spinreleasewrite (&bt->mgr->latch[idx], 1); return pool; } } // place write, read, or parent lock on requested page_no. -// pin to buffer pool and return page pointer +// pin to buffer pool and return latchset pointer -BTERR bt_lockpage(BtDb *bt, uid page_no, BtLock mode, BtPage *pageptr) +void bt_lockpage(BtLock mode, BtLatchSet *set) { -BtPool *pool; -uint subpage; -BtPage page; - - // find/create maping in pool table - // and pin our pool slot - - if( pool = bt_pinpage(bt, page_no) ) - subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping - else - return bt->err; - - page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits)); - -#ifdef unix - { - uint idx = subpage / 8; - uint bit = subpage % 8; - - if( ~((bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8)/8))[idx] >> bit) & 1 ) { - madvise (page, bt->mgr->page_size, MADV_WILLNEED); - (bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8)/8))[idx] |= 1 << bit; - } - } -#endif - switch( mode ) { case BtLockRead: - bt_spinreadlock (page->readwr); + bt_spinreadlock (set->readwr, 0); break; case BtLockWrite: - bt_spinwritelock (page->readwr); + bt_spinwritelock (set->readwr, 0); break; case BtLockAccess: - bt_spinreadlock (page->access); + bt_spinreadlock (set->access, 0); break; case BtLockDelete: - bt_spinwritelock (page->access); + bt_spinwritelock (set->access, 0); break; case BtLockParent: - bt_spinwritelock (page->parent); + bt_spinwritelock (set->parent, 0); break; - default: - return bt->err = BTERR_lock; } - - if( pageptr ) - *pageptr = page; - - return bt->err = 0; } // remove write, read, or parent lock on requested page_no. -BTERR bt_unlockpage(BtDb *bt, uid page_no, BtLock mode) +void bt_unlockpage(BtLock mode, BtLatchSet *set) { -BtPool *pool; -uint subpage; -BtPage page; -uint idx; - - // since page is pinned - // it should still be in the buffer pool - // and is in no danger of being a victim for reuse - - idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize; - bt_spinreadlock (&bt->mgr->latch[idx]); - - if( !(pool = bt_findpool(bt, page_no, idx)) ) - return bt->err = BTERR_hash; - - bt_spinreleaseread (&bt->mgr->latch[idx]); - - subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping - page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits)); - switch( mode ) { case BtLockRead: - bt_spinreleaseread (page->readwr); + bt_spinreleaseread (set->readwr, 0); break; case BtLockWrite: - bt_spinreleasewrite (page->readwr); + bt_spinreleasewrite (set->readwr, 0); break; case BtLockAccess: - bt_spinreleaseread (page->access); + bt_spinreleaseread (set->access, 0); break; case BtLockDelete: - bt_spinreleasewrite (page->access); + bt_spinreleasewrite (set->access, 0); break; case BtLockParent: - bt_spinreleasewrite (page->parent); + bt_spinreleasewrite (set->parent, 0); break; - default: - return bt->err = BTERR_lock; } - -#ifdef unix - __sync_fetch_and_add(&pool->pin, -1); -#else - _InterlockedDecrement16 (&pool->pin); -#endif - return bt->err = 0; } -// deallocate a deleted page -// place on free chain out of allocator page -// fence key must already be removed from parent +// allocate a new page and write page into it -BTERR bt_freepage(BtDb *bt, uid page_no) -{ - // obtain delete lock on deleted page - - if( bt_lockpage(bt, page_no, BtLockDelete, NULL) ) - return bt->err; - - // obtain write lock on deleted page - - if( bt_lockpage(bt, page_no, BtLockWrite, &bt->temp) ) - return bt->err; - - // lock allocation page - - if ( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) ) - return bt->err; - - // store free chain in allocation page second right - bt_putid(bt->temp->right, bt_getid(bt->alloc[1].right)); - bt_putid(bt->alloc[1].right, page_no); - - // unlock allocation page - - if( bt_unlockpage(bt, ALLOC_page, BtLockWrite) ) - return bt->err; - - // remove write lock on deleted node - - if( bt_unlockpage(bt, page_no, BtLockWrite) ) - return bt->err; - - // remove delete lock on deleted node - - if( bt_unlockpage(bt, page_no, BtLockDelete) ) - return bt->err; - - return 0; -} - -// allocate a new page and write page into it - -uid bt_newpage(BtDb *bt, BtPage page) +uid bt_newpage(BtDb *bt, BtPage page) { +BtLatchSet *set; +BtPool *pool; uid new_page; BtPage pmap; -int subpage; int reuse; - // lock page zero + // lock allocation page - if( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) ) - return 0; + bt_spinwritelock(bt->mgr->latchmgr->lock, 0); // use empty chain first // else allocate empty page - if( new_page = bt_getid(bt->alloc[1].right) ) { - if( bt_lockpage (bt, new_page, BtLockWrite, &bt->temp) ) - return 0; - bt_putid(bt->alloc[1].right, bt_getid(bt->temp->right)); - if( bt_unlockpage (bt, new_page, BtLockWrite) ) + if( new_page = bt_getid(bt->mgr->latchmgr->alloc[1].right) ) { + if( pool = bt_pinpool (bt, new_page) ) + pmap = bt_page (bt, pool, new_page); + else return 0; + bt_putid(bt->mgr->latchmgr->alloc[1].right, bt_getid(pmap->right)); + bt_unpinpool (pool); reuse = 1; } else { - new_page = bt_getid(bt->alloc->right); - bt_putid(bt->alloc->right, new_page+1); + new_page = bt_getid(bt->mgr->latchmgr->alloc->right); + bt_putid(bt->mgr->latchmgr->alloc->right, new_page+1); reuse = 0; } - #ifdef unix - memset(bt->zero, 0, 3 * sizeof(BtSpinLatch)); // clear locks - memcpy((char *)bt->zero + 3 * sizeof(BtSpinLatch), (char *)page + 3 * sizeof(BtSpinLatch), bt->mgr->page_size - 3 * sizeof(BtSpinLatch)); - if ( pwrite(bt->mgr->idx, bt->zero, bt->mgr->page_size, new_page << bt->mgr->page_bits) < bt->mgr->page_size ) - return bt->err = BTERR_wrt, 0; - // if writing first page of pool block, zero last page in the block if ( !reuse && bt->mgr->poolmask > 0 && (new_page & bt->mgr->poolmask) == 0 ) { // use zero buffer to write zeros - memset(bt->zero, 0, bt->mgr->page_size); if ( pwrite(bt->mgr->idx,bt->zero, bt->mgr->page_size, (new_page | bt->mgr->poolmask) << bt->mgr->page_bits) < bt->mgr->page_size ) return bt->err = BTERR_wrt, 0; } -#else - // bring new page into pool and copy page. - // this will extend the file into the new pages. - if( bt_lockpage(bt, new_page, BtLockWrite, &pmap) ) - return 0; + // unlock allocation latch - // copy source page, but leave latch area intact + bt_spinreleasewrite(bt->mgr->latchmgr->lock, 0); - memcpy((char *)pmap + 3 * sizeof(BtSpinLatch), (char *)page + 3 * sizeof(BtSpinLatch), bt->mgr->page_size - 3 * sizeof(BtSpinLatch)); + if ( pwrite(bt->mgr->idx, page, bt->mgr->page_size, new_page << bt->mgr->page_bits) < bt->mgr->page_size ) + return bt->err = BTERR_wrt, 0; - if( bt_unlockpage (bt, new_page, BtLockWrite) ) - return 0; -#endif - // unlock allocation latch and return new page no +#else + // unlock allocation latch + + bt_spinreleasewrite(bt->mgr->latchmgr->lock, 0); - if ( bt_unlockpage(bt, ALLOC_page, BtLockWrite) ) + // bring new page into pool and copy page. + // this will extend the file into the new pages. + // NB -- no latch required + + if( pool = bt_pinpool (bt, new_page) ) + pmap = bt_page (bt, pool, new_page); + else return 0; + memcpy(pmap, page, bt->mgr->page_size); + bt_unpinpool (pool); +#endif return new_page; } @@ -1207,29 +1406,47 @@ int reuse; int bt_findslot (BtDb *bt, unsigned char *key, uint len) { uint diff, higher = bt->page->cnt, low = 1, slot; +uint good = 0; - // low is the lowest candidate, higher is already - // tested as .ge. the given key, loop ends when they meet + // if no right link + // make stopper key an infinite fence value + // by setting the good flag + + if( bt_getid (bt->page->right) ) + higher++; + else + good++; + + // low is the next candidate. + // loop ends when they meet + + // if good, higher is already + // tested as .ge. the given key. while( diff = higher - low ) { slot = low + ( diff >> 1 ); if( keycmp (keyptr(bt->page, slot), key, len) < 0 ) low = slot + 1; else - higher = slot; + higher = slot, good++; } - return higher; + // return zero if key is on right link page + + return good ? higher : 0; } // find and load page at given level for given key // leave page rd or wr locked as requested -int bt_loadpage (BtDb *bt, unsigned char *key, uint len, uint lvl, uint lock) +uint bt_loadpage (BtDb *bt, unsigned char *key, uint len, uint lvl, BtLock lock) { uid page_no = ROOT_page, prevpage = 0; +BtLatchSet *set, *prevset; uint drill = 0xff, slot; uint mode, prevmode; +BtPool *prevpool; +int foster = 0; // start at root of btree and drill down @@ -1237,84 +1454,104 @@ uint mode, prevmode; // determine lock mode of drill level mode = (lock == BtLockWrite) && (drill == lvl) ? BtLockWrite : BtLockRead; + // obtain latch set for this page + + bt->set = bt_pinlatch (bt, page_no); bt->page_no = page_no; + // pin page contents + + if( bt->pool = bt_pinpool (bt, page_no) ) + bt->page = bt_page (bt, bt->pool, page_no); + else + return 0; + // obtain access lock using lock chaining with Access mode if( page_no > ROOT_page ) - if( bt_lockpage(bt, page_no, BtLockAccess, NULL) ) - return 0; + bt_lockpage(BtLockAccess, bt->set); - // now unlock our (possibly foster) parent + // now unlock and unpin our (possibly foster) parent - if( prevpage ) - if( bt_unlockpage(bt, prevpage, prevmode) ) - return 0; - else - prevpage = 0; + if( prevpage ) { + bt_unlockpage(prevmode, prevset); + bt_unpinlatch (prevset); + bt_unpinpool (prevpool); + prevpage = 0; + } // obtain read lock using lock chaining - // and pin page contents - if( bt_lockpage(bt, page_no, mode, &bt->page) ) - return 0; + bt_lockpage(mode, bt->set); if( page_no > ROOT_page ) - if( bt_unlockpage(bt, page_no, BtLockAccess) ) - return 0; + bt_unlockpage(BtLockAccess, bt->set); // re-read and re-lock root after determining actual level of root - if( bt->page_no == ROOT_page ) + if( page_no == ROOT_page ) if( bt->page->lvl != drill) { drill = bt->page->lvl; - if( lock == BtLockWrite && drill == lvl ) - if( bt_unlockpage(bt, page_no, mode) ) - return 0; - else - continue; + if( lock == BtLockWrite && drill == lvl ) { + bt_unlockpage(mode, bt->set); + bt_unpinlatch (bt->set); + bt_unpinpool (bt->pool); + continue; + } } - prevpage = bt->page_no; - prevmode = mode; - - // if page is being deleted, - // move back to preceeding page - - if( bt->page->kill ) { - page_no = bt_getid (bt->page->right); - continue; - } - // find key on page at this level - // and descend to requested level + // and either descend to requested level + // or return key slot - slot = bt_findslot (bt, key, len); + if( slot = bt_findslot (bt, key, len) ) { + // is this slot < foster child area + // on the requested level? - // is this slot a foster child? + // if so, return actual slot even if dead - if( slot <= bt->page->cnt - bt->page->foster ) - if( drill == lvl ) - return slot; + if( slot <= bt->page->cnt - bt->page->foster ) + if( drill == lvl ) + return bt->foster = foster, slot; - while( slotptr(bt->page, slot)->dead ) - if( slot++ < bt->page->cnt ) + // find next active slot + // note: foster children are never dead + + while( slotptr(bt->page, slot)->dead ) + if( slot++ < bt->page->cnt ) continue; - else + else { + // we are waiting for fence key posting + page_no = bt_getid(bt->page->right); goto slideright; + } + + // is this slot < foster child area + // if so, drill to next level + + if( slot <= bt->page->cnt - bt->page->foster ) + foster = 0, drill--; + else + foster = 1; - if( slot <= bt->page->cnt - bt->page->foster ) - drill--; + // continue right onto foster child + // or down to next level. - // continue down / right using overlapping locks - // to protect pages being killed or split. + page_no = bt_getid(slotptr(bt->page, slot)->id); - page_no = bt_getid(slotptr(bt->page, slot)->id); - continue; + // or slide right into next page + + } else { + page_no = bt_getid(bt->page->right); + foster = 1; + } slideright: - page_no = bt_getid(bt->page->right); + prevpage = bt->page_no; + prevpool = bt->pool; + prevset = bt->set; + prevmode = mode; } while( page_no ); @@ -1324,108 +1561,386 @@ slideright: return 0; // return error } -// find and delete key on page by marking delete flag bit -// when page becomes empty, delete it from the btree +// remove empty page from the B-tree +// by pulling our right node left over ourselves + +// call with bt->page, etc, set to page's locked parent +// returns with page locked. -BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl) +BTERR bt_mergeright (BtDb *bt, BtPage page, BtPool *pool, BtLatchSet *set, uid page_no, uint lvl, uint slot) { -unsigned char leftkey[256], rightkey[256]; -uid page_no, right; -uint slot, tod; +BtLatchSet *rset, *pset, *rpset; +BtPool *rpool, *ppool, *rppool; +BtPage rpage, ppage, rppage; +uid right, parent, rparent; BtKey ptr; +uint idx; - if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) ) - ptr = keyptr(bt->page, slot); + // cache node's parent page + + parent = bt->page_no; + ppage = bt->page; + ppool = bt->pool; + pset = bt->set; + + // lock and map our right page + // note that it cannot be our foster child + // since the our node is empty + // and it cannot be NULL because of the stopper + // in the last right page + + bt_lockpage (BtLockWrite, set); + + // if we aren't dead yet + + if( page->act ) + goto rmergexit; + + if( right = bt_getid (page->right) ) + if( rpool = bt_pinpool (bt, right) ) + rpage = bt_page (bt, rpool, right); + else + return bt->err; else + return bt->err = BTERR_struct; + + rset = bt_pinlatch (bt, right); + + // find our right neighbor + + if( ppage->act > 1 ) { + for( idx = slot; idx++ < ppage->cnt; ) + if( !slotptr(ppage, idx)->dead ) + break; + + if( idx > ppage->cnt ) + return bt->err = BTERR_struct; + + // redirect right neighbor in parent to left node + + bt_putid(slotptr(ppage,idx)->id, page_no); + } + + // if parent has only our deleted page, e.g. no right neighbor + // prepare to merge parent itself + + if( ppage->act == 1 ) { + if( rparent = bt_getid (ppage->right) ) + if( rppool = bt_pinpool (bt, rparent) ) + rppage = bt_page (bt, rppool, rparent); + else return bt->err; + else + return bt->err = BTERR_struct; - // if key is found delete it, otherwise ignore request + rpset = bt_pinlatch (bt, rparent); + bt_lockpage (BtLockWrite, rpset); - if( !keycmp (ptr, key, len) ) - if( slotptr(bt->page, slot)->dead == 0 ) { - slotptr(bt->page,slot)->dead = 1; - if( slot < bt->page->cnt ) - bt->page->dirty = 1; - bt->page->act--; + // find our right neighbor on right parent page + + for( idx = 0; idx++ < rppage->cnt; ) + if( !slotptr(rppage, idx)->dead ) { + bt_putid (slotptr(rppage, idx)->id, page_no); + break; } - // return if page is not empty, or it has no right sibling + if( idx > rppage->cnt ) + return bt->err = BTERR_struct; + } - right = bt_getid(bt->page->right); - page_no = bt->page_no; + // now that there are no more pointers to our right node + // we can wait for delete lock on it - if( !right || bt->page->act ) - return bt_unlockpage(bt, page_no, BtLockWrite); + bt_lockpage(BtLockDelete, rset); + bt_lockpage(BtLockWrite, rset); - // obtain Parent lock over write lock + // pull contents of right page into our empty page - if( bt_lockpage(bt, page_no, BtLockParent, NULL) ) - return bt->err; + memcpy (page, rpage, bt->mgr->page_size); - // cache copy of key to delete + // ready to release right parent lock + // now that we have a new page in place - ptr = keyptr(bt->page, bt->page->cnt); - memcpy(leftkey, ptr, ptr->len + 1); + if( ppage->act == 1 ) { + bt_unlockpage (BtLockWrite, rpset); + bt_unpinlatch (rpset); + bt_unpinpool (rppool); + } - // lock and map right page + // add killed right block to free chain + // lock latch mgr - if( bt_lockpage(bt, right, BtLockWrite, &bt->temp) ) - return bt->err; + bt_spinwritelock(bt->mgr->latchmgr->lock, 0); - // pull contents of next page into current empty page - memcpy (bt->page, bt->temp, bt->mgr->page_size); + // store free chain in allocation page second right - // cache copy of key to update - ptr = keyptr(bt->temp, bt->temp->cnt); - memcpy(rightkey, ptr, ptr->len + 1); + bt_putid(rpage->right, bt_getid(bt->mgr->latchmgr->alloc[1].right)); + bt_putid(bt->mgr->latchmgr->alloc[1].right, right); - // Mark right page as deleted and point it to left page - // until we can post updates at higher level. + // unlock latch mgr and right page - bt_putid(bt->temp->right, page_no); - bt->temp->kill = 1; - bt->temp->cnt = 0; + bt_unlockpage(BtLockDelete, rset); + bt_unlockpage(BtLockWrite, rset); + bt_unpinlatch (rset); + bt_unpinpool (rpool); - if( bt_unlockpage(bt, right, BtLockWrite) ) - return bt->err; - if( bt_unlockpage(bt, page_no, BtLockWrite) ) + bt_spinreleasewrite(bt->mgr->latchmgr->lock, 0); + + // delete our obsolete fence key from our parent + + slotptr(ppage, slot)->dead = 1; + ppage->dirty = 1; + + // if our parent now empty + // remove it from the tree + + if( ppage->act-- == 1 ) + if( bt_mergeleft (bt, ppage, ppool, pset, parent, lvl+1) ) return bt->err; - // delete old lower key to consolidated node +rmergexit: + bt_unlockpage (BtLockWrite, pset); + bt_unpinlatch (pset); + bt_unpinpool (ppool); + + bt->found = 1; + return bt->err = 0; +} + +// remove empty page from the B-tree +// try merging left first. If no left +// sibling, then merge right. + +// call with page loaded and locked, +// return with page locked. + +BTERR bt_mergeleft (BtDb *bt, BtPage page, BtPool *pool, BtLatchSet *set, uid page_no, uint lvl) +{ +unsigned char fencekey[256], postkey[256]; +uint slot, idx, postfence = 0; +BtLatchSet *lset, *pset; +BtPool *lpool, *ppool; +BtPage lpage, ppage; +uid left, parent; +BtKey ptr; + + ptr = keyptr(page, page->cnt); + memcpy(fencekey, ptr, ptr->len + 1); + bt_unlockpage (BtLockWrite, set); + + // load and lock our parent - if( bt_deletekey (bt, leftkey + 1, *leftkey, lvl + 1) ) +retry: + if( !(slot = bt_loadpage (bt, fencekey+1, *fencekey, lvl+1, BtLockWrite)) ) return bt->err; - // redirect higher key directly to consolidated node + parent = bt->page_no; + ppage = bt->page; + ppool = bt->pool; + pset = bt->set; - if( slot = bt_loadpage (bt, rightkey+1, *rightkey, lvl+1, BtLockWrite) ) - ptr = keyptr(bt->page, slot); + // wait until we are not a foster child + + if( bt->foster ) { + bt_unlockpage (BtLockWrite, pset); + bt_unpinlatch (pset); + bt_unpinpool (ppool); +#ifdef unix + sched_yield(); +#else + SwitchToThread(); +#endif + goto retry; + } + + // find our left neighbor in our parent page + + for( idx = slot; --idx; ) + if( !slotptr(ppage, idx)->dead ) + break; + + // if no left neighbor, do right merge + + if( !idx ) + return bt_mergeright (bt, page, pool, set, page_no, lvl, slot); + + // lock and map our left neighbor's page + + left = bt_getid (slotptr(ppage, idx)->id); + + if( lpool = bt_pinpool (bt, left) ) + lpage = bt_page (bt, lpool, left); else return bt->err; - // since key already exists, update id + lset = bt_pinlatch (bt, left); + bt_lockpage(BtLockWrite, lset); - if( keycmp (ptr, rightkey+1, *rightkey) ) - return bt->err = BTERR_struct; + // wait until foster sibling is in our parent + + if( bt_getid (lpage->right) != page_no ) { + bt_unlockpage (BtLockWrite, pset); + bt_unpinlatch (pset); + bt_unpinpool (ppool); + bt_unlockpage (BtLockWrite, lset); + bt_unpinlatch (lset); + bt_unpinpool (lpool); +#ifdef linux + sched_yield(); +#else + SwitchToThread(); +#endif + goto retry; + } - slotptr(bt->page, slot)->dead = 0; - bt_putid(slotptr(bt->page,slot)->id, page_no); + // since our page will have no more pointers to it, + // obtain Delete lock and wait for write locks to clear - if( bt_unlockpage(bt, bt->page_no, BtLockWrite) ) - return bt->err; + bt_lockpage(BtLockDelete, set); + bt_lockpage(BtLockWrite, set); + + // if we aren't dead yet, + // get ready for exit + + if( page->act ) { + bt_unlockpage(BtLockDelete, set); + bt_unlockpage(BtLockWrite, lset); + bt_unpinlatch (lset); + bt_unpinpool (lpool); + goto lmergexit; + } + + // are we are the fence key for our parent? + // if so, grab our old fence key + + if( postfence = slot == ppage->cnt ) { + ptr = keyptr (ppage, ppage->cnt); + memcpy(fencekey, ptr, ptr->len + 1); + memset(slotptr(ppage, ppage->cnt), 0, sizeof(BtSlot)); + + // clear out other dead slots + + while( --ppage->cnt ) + if( slotptr(ppage, ppage->cnt)->dead ) + memset(slotptr(ppage, ppage->cnt), 0, sizeof(BtSlot)); + else + break; + + ptr = keyptr (ppage, ppage->cnt); + memcpy(postkey, ptr, ptr->len + 1); + } else + slotptr(ppage,slot)->dead = 1; + + ppage->dirty = 1; + ppage->act--; + + // push our right neighbor pointer to our left + + memcpy (lpage->right, page->right, BtId); + + // add ourselves to free chain + // lock latch mgr - // obtain write lock and - // add right block to free chain + bt_spinwritelock(bt->mgr->latchmgr->lock, 0); - if( bt_freepage (bt, right) ) + // store free chain in allocation page second right + bt_putid(page->right, bt_getid(bt->mgr->latchmgr->alloc[1].right)); + bt_putid(bt->mgr->latchmgr->alloc[1].right, page_no); + + // unlock latch mgr and pages + + bt_spinreleasewrite(bt->mgr->latchmgr->lock, 0); + bt_unlockpage(BtLockWrite, lset); + bt_unpinlatch (lset); + bt_unpinpool (lpool); + + // release our node's delete lock + + bt_unlockpage(BtLockDelete, set); + +lmergexit: + bt_unlockpage (BtLockWrite, pset); + bt_unpinpool (ppool); + + // do we need to post parent's fence key in its parent? + + if( !postfence || parent == ROOT_page ) { + bt_unpinlatch (pset); + bt->found = 1; + return bt->err = 0; + } + + // interlock parent fence post + + bt_lockpage (BtLockParent, pset); + + // load parent's parent page +posttry: + if( !(slot = bt_loadpage (bt, fencekey+1, *fencekey, lvl+2, BtLockWrite)) ) return bt->err; - // remove ParentModify lock + if( !(slot = bt_cleanpage (bt, bt->page, *fencekey, slot)) ) + if( bt_splitpage (bt, bt->page, bt->pool, bt->set, bt->page_no) ) + return bt->err; + else + goto posttry; + + page = bt->page; + + page->min -= *postkey + 1; + ((unsigned char *)page)[page->min] = *postkey; + memcpy ((unsigned char *)page + page->min +1, postkey + 1, *postkey ); + slotptr(page, slot)->off = page->min; + + bt_unlockpage (BtLockParent, pset); + bt_unpinlatch (pset); + + bt_unlockpage (BtLockWrite, bt->set); + bt_unpinlatch (bt->set); + bt_unpinpool (bt->pool); - if( bt_unlockpage(bt, page_no, BtLockParent) ) + bt->found = 1; + return bt->err = 0; +} + +// find and delete key on page by marking delete flag bit +// if page becomes empty, delete it from the btree + +BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len) +{ +BtLatchSet *set; +BtPool *pool; +BtPage page; +uid page_no; +BtKey ptr; +uint slot; + + if( !(slot = bt_loadpage (bt, key, len, 0, BtLockWrite)) ) return bt->err; - - return 0; + + page_no = bt->page_no; + page = bt->page; + pool = bt->pool; + set = bt->set; + + // if key is found delete it, otherwise ignore request + + ptr = keyptr(page, slot); + + if( bt->found = !keycmp (ptr, key, len) ) + if( bt->found = slotptr(page, slot)->dead == 0 ) { + slotptr(page,slot)->dead = 1; + if( slot < page->cnt ) + page->dirty = 1; + if( !--page->act ) + if( bt_mergeleft (bt, page, pool, set, page_no, 0) ) + return bt->err; + } + + bt_unlockpage(BtLockWrite, set); + bt_unpinlatch (set); + bt_unpinpool (pool); + return bt->err = 0; } // find key in leaf level and return row-id @@ -1444,32 +1959,32 @@ uid id; // if key exists, return row-id // otherwise return 0 - if( ptr->len == len && !memcmp (ptr->key, key, len) ) + if( slot <= bt->page->cnt && !keycmp (ptr, key, len) ) id = bt_getid(slotptr(bt->page,slot)->id); else id = 0; - if( bt_unlockpage (bt, bt->page_no, BtLockRead) ) - return 0; - + bt_unlockpage (BtLockRead, bt->set); + bt_unpinlatch (bt->set); + bt_unpinpool (bt->pool); return id; } // check page for space available, // clean if necessary and return // 0 - page needs splitting -// 1 - go ahead +// >0 new slot value -uint bt_cleanpage(BtDb *bt, uint amt) +uint bt_cleanpage(BtDb *bt, BtPage page, uint amt, uint slot) { uint nxt = bt->mgr->page_size; -BtPage page = bt->page; uint cnt = 0, idx = 0; uint max = page->cnt; +uint newslot = max; BtKey key; if( page->min >= (max+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 ) - return 1; + return slot; // skip cleanup if nothing to reclaim @@ -1486,12 +2001,19 @@ BtKey key; // try cleaning up page first + // always leave fence key in the array + // otherwise, remove deleted key + + // note: foster children are never dead + while( cnt++ < max ) { - // always leave fence key and foster children in list - if( cnt < max - page->foster && slotptr(bt->frame,cnt)->dead ) + if( cnt == slot ) + newslot = idx + 1; + if( cnt < max && slotptr(bt->frame,cnt)->dead ) continue; // copy key + key = keyptr(bt->frame, cnt); nxt -= key->len + 1; memcpy ((unsigned char *)page + nxt, key, key->len + 1); @@ -1510,7 +2032,7 @@ BtKey key; // see if page has enough space now, or does it need splitting? if( page->min >= (idx+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 ) - return 1; + return newslot; return 0; } @@ -1518,32 +2040,33 @@ BtKey key; // add key to current page // page must already be writelocked -void bt_addkeytopage (BtDb *bt, uint slot, unsigned char *key, uint len, uid id, uint tod) +void bt_addkeytopage (BtDb *bt, BtPage page, uint slot, unsigned char *key, uint len, uid id, uint tod) { -BtPage page = bt->page; uint idx; - // calculate next available slot and copy key into page + // find next available dead slot and copy key onto page + // note that foster children on the page are never dead - page->min -= len + 1; - ((unsigned char *)page)[page->min] = len; - memcpy ((unsigned char *)page + page->min +1, key, len ); + // look for next hole, but stay back from the fence key for( idx = slot; idx < page->cnt; idx++ ) if( slotptr(page, idx)->dead ) break; - // now insert key into array before slot - // preserving the fence slot - if( idx == page->cnt ) idx++, page->cnt++; page->act++; + // now insert key into array before slot + while( idx > slot ) *slotptr(page, idx) = *slotptr(page, idx -1), idx--; + page->min -= len + 1; + ((unsigned char *)page)[page->min] = len; + memcpy ((unsigned char *)page + page->min +1, key, len ); + bt_putid(slotptr(page,slot)->id, id); slotptr(page, slot)->off = page->min; slotptr(page, slot)->tod = tod; @@ -1566,9 +2089,11 @@ BtKey key; // contents into it from the root. Strip foster child key. // (it's the stopper key) + memset (slotptr(root, root->cnt), 0, sizeof(BtSlot)); + root->dirty = 1; + root->foster--; root->act--; root->cnt--; - root->foster--; // Save left fence key. @@ -1609,66 +2134,65 @@ BtKey key; root->act = 2; root->lvl++; - // release root (bt->page) + // release and unpin root (bt->page) - return bt_unlockpage(bt, ROOT_page, BtLockWrite); + bt_unlockpage(BtLockWrite, bt->set); + bt_unpinlatch (bt->set); + bt_unpinpool (bt->pool); + return 0; } // split already locked full node -// in current page variables -// return unlocked. +// return unlocked and unpinned. -BTERR bt_splitpage (BtDb *bt) +BTERR bt_splitpage (BtDb *bt, BtPage page, BtPool *pool, BtLatchSet *set, uid page_no) { uint slot, cnt, idx, max, nxt = bt->mgr->page_size; unsigned char fencekey[256]; -uid page_no = bt->page_no; -BtPage page = bt->page; uint tod = time(NULL); uint lvl = page->lvl; -uid new_page, right; +uid new_page; BtKey key; - // initialize frame buffer + // initialize frame buffer for right node memset (bt->frame, 0, bt->mgr->page_size); max = page->cnt - page->foster; - tod = (uint)time(NULL); cnt = max / 2; idx = 0; // split higher half of keys to bt->frame - // leaving foster children in the left node. + // leaving old foster children in the left node, + // and adding a new foster child there. while( cnt++ < max ) { key = keyptr(page, cnt); nxt -= key->len + 1; memcpy ((unsigned char *)bt->frame + nxt, key, key->len + 1); memcpy(slotptr(bt->frame,++idx)->id, slotptr(page,cnt)->id, BtId); + if( !(slotptr(bt->frame, idx)->dead = slotptr(page, cnt)->dead) ) + bt->frame->act++; slotptr(bt->frame, idx)->tod = slotptr(page, cnt)->tod; slotptr(bt->frame, idx)->off = nxt; - bt->frame->act++; } - // transfer right link node + // transfer right link node to new right node - if( page_no > ROOT_page ) { - right = bt_getid (page->right); - bt_putid(bt->frame->right, right); - } + if( page_no > ROOT_page ) + memcpy (bt->frame->right, page->right, BtId); bt->frame->bits = bt->mgr->page_bits; bt->frame->min = nxt; bt->frame->cnt = idx; bt->frame->lvl = lvl; - // get new free page and write frame to it. + // get new free page and write right frame to it. if( !(new_page = bt_newpage(bt, bt->frame)) ) return bt->err; - // remember fence key for new page to add - // as foster child + // remember fence key for new right page to add + // as foster child to the left node key = keyptr(bt->frame, idx); memcpy (fencekey, key, key->len + 1); @@ -1678,6 +2202,7 @@ BtKey key; memcpy (bt->frame, page, bt->mgr->page_size); memset (page+1, 0, bt->mgr->page_size - sizeof(*page)); nxt = bt->mgr->page_size; + page->dirty = 0; page->act = 0; cnt = 0; idx = 0; @@ -1690,22 +2215,26 @@ BtKey key; nxt -= key->len + 1; memcpy ((unsigned char *)page + nxt, key, key->len + 1); memcpy (slotptr(page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId); + if( !(slotptr(page, idx)->dead = slotptr(bt->frame, cnt)->dead) ) + page->act++; slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod; slotptr(page, idx)->off = nxt; - page->act++; } - // insert new foster child at beginning of the current foster children + // insert new foster child for right page in queue + // before any of the current foster children nxt -= *fencekey + 1; memcpy ((unsigned char *)page + nxt, fencekey, *fencekey + 1); + bt_putid (slotptr(page,++idx)->id, new_page); slotptr(page, idx)->tod = tod; slotptr(page, idx)->off = nxt; page->foster++; page->act++; - // continue with old foster child keys if any + // continue with old foster child keys + // note that none will be dead cnt = bt->frame->cnt - bt->frame->foster; @@ -1733,109 +2262,77 @@ BtKey key; // release wr lock on our page - if( bt_unlockpage (bt, page_no, BtLockWrite) ) - return bt->err; + bt_unlockpage (BtLockWrite, set); // obtain ParentModification lock for current page - // to fix fence key and highest foster child on page + // to fix new fence key and oldest foster child on page - if( bt_lockpage (bt, page_no, BtLockParent, NULL) ) - return bt->err; + bt_lockpage (BtLockParent, set); - // get our highest foster child key to find in parent node + // get our new fence key to insert in parent node - if( bt_lockpage (bt, page_no, BtLockRead, &page) ) - return bt->err; + bt_lockpage (BtLockRead, set); - key = keyptr(page, page->cnt); + key = keyptr(page, page->cnt-1); memcpy (fencekey, key, key->len+1); - if( bt_unlockpage (bt, page_no, BtLockRead) ) - return bt->err; - - // update our parent -try_again: + bt_unlockpage (BtLockRead, set); - do { - slot = bt_loadpage (bt, fencekey + 1, *fencekey, lvl + 1, BtLockWrite); - - if( !slot ) + if( bt_insertkey (bt, fencekey + 1, *fencekey, page_no, tod, lvl + 1) ) return bt->err; - // check if parent page has enough space for any possible key - - if( bt_cleanpage (bt, 256) ) - break; + // lock our page for writing - if( bt_splitpage (bt) ) - return bt->err; - } while( 1 ); + bt_lockpage (BtLockRead, set); - // see if we are still a foster child from another node + // switch old parent key from us to our oldest foster child - if( bt_getid (slotptr(bt->page, slot)->id) != page_no ) { - if( bt_unlockpage (bt, bt->page_no, BtLockWrite) ) - return bt->err; -#ifdef unix - sched_yield(); -#else - SwitchToThread(); -#endif - goto try_again; - } - - // wait until readers from parent get their locks - // on our page - - if( bt_lockpage (bt, page_no, BtLockDelete, NULL) ) - return bt->err; + key = keyptr(page, page->cnt); + memcpy (fencekey, key, key->len+1); - // lock our page for writing + new_page = bt_getid (slotptr(page, page->cnt)->id); + bt_unlockpage (BtLockRead, set); - if( bt_lockpage (bt, page_no, BtLockWrite, &page) ) + if( bt_insertkey (bt, fencekey + 1, *fencekey, new_page, tod, lvl + 1) ) return bt->err; - // switch parent fence key to foster child - - if( slotptr(page, page->cnt)->dead ) - slotptr(bt->page, slot)->dead = 1; - else - bt_putid (slotptr(bt->page, slot)->id, bt_getid(slotptr(page, page->cnt)->id)); - - // remove highest foster child from our page + // now that it has its own parent pointer, + // remove oldest foster child from our page + bt_lockpage (BtLockWrite, set); + memset (slotptr(page, page->cnt), 0, sizeof(BtSlot)); + page->dirty = 1; + page->foster--; page->cnt--; page->act--; - page->foster--; - page->dirty = 1; - key = keyptr(page, page->cnt); - // add our new fence key for foster child to our parent + bt_unlockpage (BtLockParent, set); - bt_addkeytopage (bt, slot, key->key, key->len, page_no, tod); + // if this emptied page, + // undo the foster child - if( bt_unlockpage (bt, bt->page_no, BtLockWrite) ) + if( !page->act ) + if( bt_mergeleft (bt, page, pool, set, page_no, lvl) ) return bt->err; - if( bt_unlockpage (bt, page_no, BtLockDelete) ) - return bt->err; - - if( bt_unlockpage (bt, page_no, BtLockWrite) ) - return bt->err; + // unlock and unpin - return bt_unlockpage (bt, page_no, BtLockParent); + bt_unlockpage (BtLockWrite, set); + bt_unpinlatch (set); + bt_unpinpool (pool); + return 0; } // Insert new key into the btree at leaf level. -BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uid id, uint tod) +BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uid id, uint tod, uint lvl) { uint slot, idx; BtPage page; BtKey ptr; while( 1 ) { - if( slot = bt_loadpage (bt, key, len, 0, BtLockWrite) ) + if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) ) ptr = keyptr(bt->page, slot); else { @@ -1849,24 +2346,32 @@ BtKey ptr; page = bt->page; if( !keycmp (ptr, key, len) ) { + if( slotptr(page, slot)->dead ) + page->act++; slotptr(page, slot)->dead = 0; slotptr(page, slot)->tod = tod; bt_putid(slotptr(page,slot)->id, id); - return bt_unlockpage(bt, bt->page_no, BtLockWrite); + bt_unlockpage(BtLockWrite, bt->set); + bt_unpinlatch (bt->set); + bt_unpinpool (bt->pool); + return bt->err; } // check if page has enough space - if( bt_cleanpage (bt, len) ) + if( slot = bt_cleanpage (bt, bt->page, len, slot) ) break; - if( bt_splitpage (bt) ) + if( bt_splitpage (bt, bt->page, bt->pool, bt->set, bt->page_no) ) return bt->err; } - bt_addkeytopage (bt, slot, key, len, id, tod); + bt_addkeytopage (bt, bt->page, slot, key, len, id, tod); - return bt_unlockpage (bt, bt->page_no, BtLockWrite); + bt_unlockpage (BtLockWrite, bt->set); + bt_unpinlatch (bt->set); + bt_unpinpool (bt->pool); + return 0; } // cache page of keys into cursor and return starting slot for given key @@ -1878,10 +2383,12 @@ uint slot; // cache page for retrieval if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) ) memcpy (bt->cursor, bt->page, bt->mgr->page_size); + bt->cursor_page = bt->page_no; - if ( bt_unlockpage(bt, bt->page_no, BtLockRead) ) - return 0; + bt_unlockpage(BtLockRead, bt->set); + bt_unpinlatch (bt->set); + bt_unpinpool (bt->pool); return slot; } @@ -1890,6 +2397,8 @@ uint slot; uint bt_nextkey (BtDb *bt, uint slot) { +BtLatchSet *set; +BtPool *pool; BtPage page; uid right; @@ -1907,15 +2416,19 @@ uid right; break; bt->cursor_page = right; - - if( bt_lockpage(bt, right, BtLockRead, &page) ) + if( pool = bt_pinpool (bt, right) ) + page = bt_page (bt, pool, right); + else return 0; - memcpy (bt->cursor, page, bt->mgr->page_size); + set = bt_pinlatch (bt, right); + bt_lockpage(BtLockRead, set); - if ( bt_unlockpage(bt, right, BtLockRead) ) - return 0; + memcpy (bt->cursor, page, bt->mgr->page_size); + bt_unlockpage(BtLockRead, set); + bt_unpinlatch (set); + bt_unpinpool (pool); slot = 0; } while( 1 ); @@ -1940,6 +2453,54 @@ uint bt_tod(BtDb *bt, uint slot) #ifdef STANDALONE +void bt_latchaudit (BtDb *bt) +{ +ushort idx, hashidx; +BtLatchSet *set; +BtPool *pool; +BtPage page; +uid page_no; + +#ifdef unix + for( idx = 1; idx < bt->mgr->latchmgr->latchdeployed; idx++ ) { + set = bt->mgr->latchsets + idx; + if( *(ushort *)set->readwr || *(ushort *)set->access || *(ushort *)set->parent ) { + fprintf(stderr, "latchset %d locked for page %6x\n", idx, set->page_no); + *(ushort *)set->readwr = 0; + *(ushort *)set->access = 0; + *(ushort *)set->parent = 0; + } + if( set->pin ) { + fprintf(stderr, "latchset %d pinned\n", idx); + set->pin = 0; + } + } + + for( hashidx = 0; hashidx < bt->mgr->latchmgr->latchhash; hashidx++ ) { + if( *(uint *)bt->mgr->latchmgr->table[hashidx].latch ) + fprintf(stderr, "latchmgr locked\n"); + if( idx = bt->mgr->latchmgr->table[hashidx].slot ) do { + set = bt->mgr->latchsets + idx; + if( *(uint *)set->readwr || *(ushort *)set->access || *(ushort *)set->parent ) + fprintf(stderr, "latchset %d locked\n", idx); + if( set->hash != hashidx ) + fprintf(stderr, "latchset %d wrong hashidx\n", idx); + if( set->pin ) + fprintf(stderr, "latchset %d pinned\n", idx); + } while( idx = set->next ); + } + page_no = bt_getid(bt->mgr->latchmgr->alloc[1].right); + + while( page_no ) { + fprintf(stderr, "free: %.6x\n", (uint)page_no); + pool = bt_pinpool (bt, page_no); + page = bt_page (bt, pool, page_no); + page_no = bt_getid(page->right); + bt_unpinpool (pool); + } +#endif +} + typedef struct { char type, idx; char *infile; @@ -1961,7 +2522,9 @@ uid next, page_no = LEAF_page; // start on first page of leaves unsigned char key[256]; ThreadArg *args = arg; int ch, len = 0, slot; +BtLatchSet *set; time_t tod[1]; +BtPool *pool; BtPage page; BtKey ptr; BtDb *bt; @@ -1972,6 +2535,12 @@ FILE *in; switch(args->type | 0x20) { + case 'a': + fprintf(stderr, "started latch mgr audit\n"); + bt_latchaudit (bt); + fprintf(stderr, "finished latch mgr audit\n"); + break; + case 'w': fprintf(stderr, "started indexing for %s\n", args->infile); if( in = fopen (args->infile, "rb") ) @@ -1986,7 +2555,7 @@ FILE *in; else if( args->num ) sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9; - if( bt_insertkey (bt, key, len, line, *tod) ) + if( bt_insertkey (bt, key, len, line, *tod, 0) ) fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0); len = 0; } @@ -2008,7 +2577,7 @@ FILE *in; else if( args->num ) sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9; - if( bt_deletekey (bt, key, len, 0) ) + if( bt_deletekey (bt, key, len) ) fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0); len = 0; } @@ -2063,10 +2632,17 @@ FILE *in; fprintf(stderr, "started reading\n"); do { - bt_lockpage (bt, page_no, BtLockRead, &page); + if( pool = bt_pinpool (bt, page_no) ) + page = bt_page (bt, pool, page_no); + else + break; + set = bt_pinlatch (bt, page_no); + bt_lockpage (BtLockRead, set); cnt += page->act; next = bt_getid (page->right); - bt_unlockpage (bt, page_no, BtLockRead); + bt_unlockpage (BtLockRead, set); + bt_unpinlatch (set); + bt_unpinpool (pool); } while( page_no = next ); cnt--; // remove stopper key