X-Git-Url: https://pd.if.org/git/?a=blobdiff_plain;f=threads2h.c;h=11fb8b457962a71e3ef6eeb75af24220f1295191;hb=07891352e2c84646ffe3071b4ebf708a97cf5c3e;hp=1b4d630a03a256b3c8a90a916cfe80575d61a116;hpb=b4a0a87a0f8a88ac689ea1b27f8fef3670a3d00a;p=btree diff --git a/threads2h.c b/threads2h.c index 1b4d630..11fb8b4 100644 --- a/threads2h.c +++ b/threads2h.c @@ -1,5 +1,6 @@ -// btree version threads2g sched_yield version -// 24 DEC 2013 +// btree version threads2h pthread rw lock version +// with reworked bt_deletekey code +// 12 FEB 2014 // author: karl malbrain, malbrain@cal.berkeley.edu @@ -49,6 +50,7 @@ REDISTRIBUTION OF THIS SOFTWARE. #include #include +#include typedef unsigned long long uid; @@ -58,6 +60,8 @@ typedef unsigned short ushort; typedef unsigned int uint; #endif +#define BT_latchtable 128 // number of latch manager slots + #define BT_ro 0x6f72 // ro #define BT_rw 0x7772 // rw @@ -86,9 +90,10 @@ typedef enum{ // mode & definition for latch implementation enum { - Write = 1, - Pending = 2, - Share = 4 + Mutex = 1, + Write = 2, + Pending = 4, + Share = 8 } LockMode; // exclusive is set for write access @@ -96,15 +101,39 @@ enum { // grant write lock when share == 0 typedef struct { - volatile uint exclusive:1; - volatile uint request:1; - volatile uint share:30; + volatile ushort mutex:1; + volatile ushort exclusive:1; + volatile ushort pending:1; + volatile ushort share:13; +} BtSpinLatch; + +// hash table entries + +typedef struct { + BtSpinLatch latch[1]; + volatile ushort slot; // Latch table entry at head of chain +} BtHashEntry; + +// latch manager table structure + +typedef struct { +#ifdef unix + pthread_rwlock_t lock[1]; +#else + SRWLOCK srw[1]; +#endif } BtLatch; typedef struct { - BtLatch readwr[1]; // read/write page lock - BtLatch access[1]; // Access Intent/Page delete - BtLatch parent[1]; // Parent modification + BtLatch readwr[1]; // read/write page lock + BtLatch access[1]; // Access Intent/Page delete + BtLatch parent[1]; // Posting of fence key in parent + BtSpinLatch busy[1]; // slot is being moved between chains + volatile ushort next; // next entry in hash table chain + volatile ushort prev; // prev entry in hash table chain + volatile ushort pin; // number of outstanding locks + volatile ushort hash; // hash slot entry is under + volatile uid page_no; // latch set page number } BtLatchSet; // Define the length of the page and key pointers @@ -142,16 +171,19 @@ typedef struct { // It is immediately followed // by the BtSlot array of keys. -typedef struct Page { - BtLatchSet latch[1]; // Set of three latches +typedef struct BtPage_ { uint cnt; // count of keys in page uint act; // count of active keys uint min; // next key offset - unsigned char bits; // page size in bits - unsigned char lvl:6; // level of page - unsigned char kill:1; // page is being deleted + unsigned char bits:7; // page size in bits + unsigned char free:1; // page is on free list + unsigned char lvl:4; // level of page + unsigned char kill:1; // page is being killed unsigned char dirty:1; // page has deleted keys + unsigned char posted:1; // page fence is posted + unsigned char goright:1; // page is being killed, continue to right unsigned char right[BtId]; // page number to right + unsigned char fence[256]; // page fence key } *BtPage; // The memory mapping pool table buffer manager entry @@ -160,15 +192,37 @@ typedef struct { unsigned long long int lru; // number of times accessed uid basepage; // mapped base page number char *map; // mapped memory pointer - uint slot; // slot index in this array - volatile uint pin; // mapped page pin counter + ushort slot; // slot index in this array + ushort pin; // mapped page pin counter void *hashprev; // previous pool entry for the same hash idx void *hashnext; // next pool entry for the same hash idx #ifndef unix - HANDLE hmap; + HANDLE hmap; // Windows memory mapping handle #endif } BtPool; +// The loadpage interface object + +typedef struct { + uid page_no; // current page number + BtPage page; // current page pointer + BtPool *pool; // current page pool + BtLatchSet *latch; // current page latch set +} BtPageSet; + +// structure for latch manager on ALLOC_page + +typedef struct { + struct BtPage_ alloc[2]; // next & free page_nos in right ptr + BtSpinLatch lock[1]; // allocation area lite latch + ushort latchdeployed; // highest number of latch entries deployed + ushort nlatchpage; // number of latch pages at BT_latch + ushort latchtotal; // number of page latch entries + ushort latchhash; // number of latch hash table slots + ushort latchvictim; // next latch entry to examine + BtHashEntry table[0]; // the hash table +} BtLatchMgr; + // The object structure for Btree access typedef struct { @@ -177,32 +231,33 @@ typedef struct { uint seg_bits; // seg size in pages in bits uint mode; // read-write mode #ifdef unix - char *pooladvise; // bit maps for pool page advisements int idx; #else HANDLE idx; #endif - uint poolcnt; // highest page pool node in use - uint poolmax; // highest page pool node allocated - uint poolmask; // total size of pages in mmap segment - 1 - uint hashsize; // size of Hash Table for pool entries + ushort poolcnt; // highest page pool node in use + ushort poolmax; // highest page pool node allocated + ushort poolmask; // total number of pages in mmap segment - 1 + ushort hashsize; // size of Hash Table for pool entries volatile uint evicted; // last evicted hash table slot ushort *hash; // pool index for hash entries - BtLatch *latch; // latches for hash table slots + BtSpinLatch *latch; // latches for hash table slots + BtLatchMgr *latchmgr; // mapped latch page from allocation page + BtLatchSet *latchsets; // mapped latch set from latch pages BtPool *pool; // memory pool page segments +#ifndef unix + HANDLE halloc; // allocation and latch table handle +#endif } BtMgr; typedef struct { BtMgr *mgr; // buffer manager for thread - BtPage temp; // temporary frame buffer (memory mapped/file IO) - BtPage alloc; // frame buffer for alloc page ( page 0 ) BtPage cursor; // cached frame for start/next (never mapped) BtPage frame; // spare frame for the page split (never mapped) BtPage zero; // page frame for zeroes at end of file - BtPage page; // current page - uid page_no; // current page number uid cursor_page; // current cursor page number unsigned char *mem; // frame, cursor, page memory buffer + int found; // last delete or insert was found int err; // last error } BtDb; @@ -219,12 +274,15 @@ typedef enum { // B-Tree functions extern void bt_close (BtDb *bt); extern BtDb *bt_open (BtMgr *mgr); -extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod); -extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl); +extern BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uid id, uint tod, uint lvl); +extern BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len); extern uid bt_findkey (BtDb *bt, unsigned char *key, uint len); extern uint bt_startkey (BtDb *bt, unsigned char *key, uint len); extern uint bt_nextkey (BtDb *bt, uint slot); +// internal functions +BTERR bt_removepage (BtDb *bt, BtPageSet *set, uint lvl, unsigned char *pagefence); + // manager functions extern BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolsize, uint segsize, uint hashsize); void bt_mgrclose (BtMgr *mgr); @@ -236,9 +294,10 @@ extern uid bt_uid (BtDb *bt, uint slot); extern uint bt_tod (BtDb *bt, uint slot); // BTree page number constants -#define ALLOC_page 0 -#define ROOT_page 1 -#define LEAF_page 2 +#define ALLOC_page 0 // allocation & lock manager hash table +#define ROOT_page 1 // root of the btree +#define LEAF_page 2 // first page of leaves +#define LATCH_page 3 // pages for lock manager // Number of levels to create in a new BTree @@ -268,8 +327,8 @@ extern uint bt_tod (BtDb *bt, uint slot); // one with two keys. // Deleted keys are marked with a dead bit until -// page cleanup The fence key for a node is always -// present, even after deletion and cleanup. +// page cleanup The fence key for a node is +// present in a special array. // Groups of pages called segments from the btree are optionally // cached with a memory mapped pool. A hash table is used to keep @@ -284,12 +343,13 @@ extern uint bt_tod (BtDb *bt, uint slot); // Page 0 is dedicated to lock for new page extensions, // and chains empty pages together for reuse. -// The ParentModification lock on a node is obtained to prevent resplitting -// or deleting a node before its fence is posted into its upper level. +// The ParentModification lock on a node is obtained to serialize posting +// or changing the fence key for a node. // Empty pages are chained together through the ALLOC page and reused. -// Access macros to address slot and key values from the page +// Access macros to address slot and key values from the page. +// Page slots use 1 based indexing. #define slotptr(page, slot) (((BtSlot *)(page+1)) + (slot-1)) #define keyptr(page, slot) ((BtKey)((unsigned char*)(page) + slotptr(page, slot)->off)) @@ -313,6 +373,389 @@ int i; return id; } +// Latch Manager + +// wait until write lock mode is clear +// and add 1 to the share count + +void bt_spinreadlock(BtSpinLatch *latch) +{ +ushort prev; + + do { + // obtain latch mutex +#ifdef unix + if( __sync_fetch_and_or((ushort *)latch, Mutex) & Mutex ) + continue; +#else + if( prev = _InterlockedOr16((ushort *)latch, Mutex) & Mutex ) + continue; +#endif + // see if exclusive request is granted or pending + + if( prev = !(latch->exclusive | latch->pending) ) +#ifdef unix + __sync_fetch_and_add((ushort *)latch, Share); +#else + _InterlockedExchangeAdd16 ((ushort *)latch, Share); +#endif + +#ifdef unix + __sync_fetch_and_and ((ushort *)latch, ~Mutex); +#else + _InterlockedAnd16((ushort *)latch, ~Mutex); +#endif + + if( prev ) + return; +#ifdef unix + } while( sched_yield(), 1 ); +#else + } while( SwitchToThread(), 1 ); +#endif +} + +// wait for other read and write latches to relinquish + +void bt_spinwritelock(BtSpinLatch *latch) +{ + do { +#ifdef unix + if( __sync_fetch_and_or((ushort *)latch, Mutex | Pending) & Mutex ) + continue; +#else + if( _InterlockedOr16((ushort *)latch, Mutex | Pending) & Mutex ) + continue; +#endif + if( !(latch->share | latch->exclusive) ) { +#ifdef unix + __sync_fetch_and_or((ushort *)latch, Write); + __sync_fetch_and_and ((ushort *)latch, ~(Mutex | Pending)); +#else + _InterlockedOr16((ushort *)latch, Write); + _InterlockedAnd16((ushort *)latch, ~(Mutex | Pending)); +#endif + return; + } + +#ifdef unix + __sync_fetch_and_and ((ushort *)latch, ~Mutex); +#else + _InterlockedAnd16((ushort *)latch, ~Mutex); +#endif + +#ifdef unix + } while( sched_yield(), 1 ); +#else + } while( SwitchToThread(), 1 ); +#endif +} + +// try to obtain write lock + +// return 1 if obtained, +// 0 otherwise + +int bt_spinwritetry(BtSpinLatch *latch) +{ +ushort prev; + +#ifdef unix + if( prev = __sync_fetch_and_or((ushort *)latch, Mutex), prev & Mutex ) + return 0; +#else + if( prev = _InterlockedOr16((ushort *)latch, Mutex), prev & Mutex ) + return 0; +#endif + // take write access if all bits are clear + + if( !prev ) +#ifdef unix + __sync_fetch_and_or ((ushort *)latch, Write); +#else + _InterlockedOr16((ushort *)latch, Write); +#endif + +#ifdef unix + __sync_fetch_and_and ((ushort *)latch, ~Mutex); +#else + _InterlockedAnd16((ushort *)latch, ~Mutex); +#endif + return !prev; +} + +// clear write mode + +void bt_spinreleasewrite(BtSpinLatch *latch) +{ +#ifdef unix + __sync_fetch_and_and ((ushort *)latch, ~Write); +#else + _InterlockedAnd16((ushort *)latch, ~Write); +#endif +} + +// decrement reader count + +void bt_spinreleaseread(BtSpinLatch *latch) +{ +#ifdef unix + __sync_fetch_and_add((ushort *)latch, -Share); +#else + _InterlockedExchangeAdd16 ((ushort *)latch, -Share); +#endif +} + +void bt_readlock(BtLatch *latch) +{ +#ifdef unix + pthread_rwlock_rdlock (latch->lock); +#else + AcquireSRWLockShared (latch->srw); +#endif +} + +// wait for other read and write latches to relinquish + +void bt_writelock(BtLatch *latch) +{ +#ifdef unix + pthread_rwlock_wrlock (latch->lock); +#else + AcquireSRWLockExclusive (latch->srw); +#endif +} + +// try to obtain write lock + +// return 1 if obtained, +// 0 if already write or read locked + +int bt_writetry(BtLatch *latch) +{ +int result = 0; + +#ifdef unix + result = !pthread_rwlock_trywrlock (latch->lock); +#else + result = TryAcquireSRWLockExclusive (latch->srw); +#endif + return result; +} + +// clear write mode + +void bt_releasewrite(BtLatch *latch) +{ +#ifdef unix + pthread_rwlock_unlock (latch->lock); +#else + ReleaseSRWLockExclusive (latch->srw); +#endif +} + +// decrement reader count + +void bt_releaseread(BtLatch *latch) +{ +#ifdef unix + pthread_rwlock_unlock (latch->lock); +#else + ReleaseSRWLockShared (latch->srw); +#endif +} + +void bt_initlockset (BtLatchSet *set) +{ +#ifdef unix +pthread_rwlockattr_t rwattr[1]; + + pthread_rwlockattr_init (rwattr); + pthread_rwlockattr_setpshared (rwattr, PTHREAD_PROCESS_SHARED); + + pthread_rwlock_init (set->readwr->lock, rwattr); + pthread_rwlock_init (set->access->lock, rwattr); + pthread_rwlock_init (set->parent->lock, rwattr); + pthread_rwlockattr_destroy (rwattr); +#else + InitializeSRWLock (set->readwr->srw); + InitializeSRWLock (set->access->srw); + InitializeSRWLock (set->parent->srw); +#endif +} + +// link latch table entry into latch hash table + +void bt_latchlink (BtDb *bt, ushort hashidx, ushort victim, uid page_no) +{ +BtLatchSet *set = bt->mgr->latchsets + victim; + + if( set->next = bt->mgr->latchmgr->table[hashidx].slot ) + bt->mgr->latchsets[set->next].prev = victim; + + bt->mgr->latchmgr->table[hashidx].slot = victim; + set->page_no = page_no; + set->hash = hashidx; + set->prev = 0; +} + +// release latch pin + +void bt_unpinlatch (BtLatchSet *set) +{ +#ifdef unix + __sync_fetch_and_add(&set->pin, -1); +#else + _InterlockedDecrement16 (&set->pin); +#endif +} + +// find existing latchset or inspire new one +// return with latchset pinned + +BtLatchSet *bt_pinlatch (BtDb *bt, uid page_no) +{ +ushort hashidx = page_no % bt->mgr->latchmgr->latchhash; +ushort slot, avail = 0, victim, idx; +BtLatchSet *set; + + // obtain read lock on hash table entry + + bt_spinreadlock(bt->mgr->latchmgr->table[hashidx].latch); + + if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do + { + set = bt->mgr->latchsets + slot; + if( page_no == set->page_no ) + break; + } while( slot = set->next ); + + if( slot ) { +#ifdef unix + __sync_fetch_and_add(&set->pin, 1); +#else + _InterlockedIncrement16 (&set->pin); +#endif + } + + bt_spinreleaseread (bt->mgr->latchmgr->table[hashidx].latch); + + if( slot ) + return set; + + // try again, this time with write lock + + bt_spinwritelock(bt->mgr->latchmgr->table[hashidx].latch); + + if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do + { + set = bt->mgr->latchsets + slot; + if( page_no == set->page_no ) + break; + if( !set->pin && !avail ) + avail = slot; + } while( slot = set->next ); + + // found our entry, or take over an unpinned one + + if( slot || (slot = avail) ) { + set = bt->mgr->latchsets + slot; +#ifdef unix + __sync_fetch_and_add(&set->pin, 1); +#else + _InterlockedIncrement16 (&set->pin); +#endif + set->page_no = page_no; + bt_spinreleasewrite(bt->mgr->latchmgr->table[hashidx].latch); + return set; + } + + // see if there are any unused entries +#ifdef unix + victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, 1) + 1; +#else + victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchdeployed); +#endif + + if( victim < bt->mgr->latchmgr->latchtotal ) { + set = bt->mgr->latchsets + victim; +#ifdef unix + __sync_fetch_and_add(&set->pin, 1); +#else + _InterlockedIncrement16 (&set->pin); +#endif + bt_initlockset (set); + bt_latchlink (bt, hashidx, victim, page_no); + bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch); + return set; + } + +#ifdef unix + victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, -1); +#else + victim = _InterlockedDecrement16 (&bt->mgr->latchmgr->latchdeployed); +#endif + // find and reuse previous lock entry + + while( 1 ) { +#ifdef unix + victim = __sync_fetch_and_add(&bt->mgr->latchmgr->latchvictim, 1); +#else + victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchvictim) - 1; +#endif + // we don't use slot zero + + if( victim %= bt->mgr->latchmgr->latchtotal ) + set = bt->mgr->latchsets + victim; + else + continue; + + // take control of our slot + // from other threads + + if( set->pin || !bt_spinwritetry (set->busy) ) + continue; + + idx = set->hash; + + // try to get write lock on hash chain + // skip entry if not obtained + // or has outstanding locks + + if( !bt_spinwritetry (bt->mgr->latchmgr->table[idx].latch) ) { + bt_spinreleasewrite (set->busy); + continue; + } + + if( set->pin ) { + bt_spinreleasewrite (set->busy); + bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch); + continue; + } + + // unlink our available victim from its hash chain + + if( set->prev ) + bt->mgr->latchsets[set->prev].next = set->next; + else + bt->mgr->latchmgr->table[idx].slot = set->next; + + if( set->next ) + bt->mgr->latchsets[set->next].prev = set->prev; + + bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch); +#ifdef unix + __sync_fetch_and_add(&set->pin, 1); +#else + _InterlockedIncrement16 (&set->pin); +#endif + bt_latchlink (bt, hashidx, victim, page_no); + bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch); + bt_spinreleasewrite (set->busy); + return set; + } +} + void bt_mgrclose (BtMgr *mgr) { BtPool *pool; @@ -335,12 +778,19 @@ uint slot; #endif } +#ifdef unix + munmap (mgr->latchsets, mgr->latchmgr->nlatchpage * mgr->page_size); + munmap (mgr->latchmgr, mgr->page_size); +#else + FlushViewOfFile(mgr->latchmgr, 0); + UnmapViewOfFile(mgr->latchmgr); + CloseHandle(mgr->halloc); +#endif #ifdef unix close (mgr->idx); free (mgr->pool); free (mgr->hash); free (mgr->latch); - free (mgr->pooladvise); free (mgr); #else FlushFileBuffers(mgr->idx); @@ -373,13 +823,13 @@ void bt_close (BtDb *bt) BtMgr *bt_mgr (char *name, uint mode, uint bits, uint poolmax, uint segsize, uint hashsize) { -uint lvl, attr, cacheblk, last; -BtPage alloc; -int lockmode; +uint lvl, attr, cacheblk, last, slot, idx; +uint nlatchpage, latchhash; +BtLatchMgr *latchmgr; off64_t size; uint amt[1]; BtMgr* mgr; -BtKey key; +int flag; #ifndef unix SYSTEM_INFO sysinfo[1]; @@ -398,19 +848,8 @@ SYSTEM_INFO sysinfo[1]; #ifdef unix mgr = calloc (1, sizeof(BtMgr)); - switch (mode & 0x7fff) - { - case BT_rw: - mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666); - lockmode = 1; - break; + mgr->idx = open ((char*)name, O_RDWR | O_CREAT, 0666); - case BT_ro: - default: - mgr->idx = open ((char*)name, O_RDONLY); - lockmode = 0; - break; - } if( mgr->idx == -1 ) return free(mgr), NULL; @@ -419,19 +858,8 @@ SYSTEM_INFO sysinfo[1]; #else mgr = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, sizeof(BtMgr)); attr = FILE_ATTRIBUTE_NORMAL; - switch (mode & 0x7fff) - { - case BT_rw: - mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL); - lockmode = 1; - break; + mgr->idx = CreateFile(name, GENERIC_READ| GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, attr, NULL); - case BT_ro: - default: - mgr->idx = CreateFile(name, GENERIC_READ, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_EXISTING, attr, NULL); - lockmode = 0; - break; - } if( mgr->idx == INVALID_HANDLE_VALUE ) return GlobalFree(mgr), NULL; @@ -441,26 +869,26 @@ SYSTEM_INFO sysinfo[1]; #endif #ifdef unix - alloc = malloc (BT_maxpage); + latchmgr = malloc (BT_maxpage); *amt = 0; // read minimum page size to get root info if( size = lseek (mgr->idx, 0L, 2) ) { - if( pread(mgr->idx, alloc, BT_minpage, 0) == BT_minpage ) - bits = alloc->bits; + if( pread(mgr->idx, latchmgr, BT_minpage, 0) == BT_minpage ) + bits = latchmgr->alloc->bits; else - return free(mgr), free(alloc), NULL; + return free(mgr), free(latchmgr), NULL; } else if( mode == BT_ro ) - return bt_mgrclose (mgr), NULL; + return free(latchmgr), bt_mgrclose (mgr), NULL; #else - alloc = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE); + latchmgr = VirtualAlloc(NULL, BT_maxpage, MEM_COMMIT, PAGE_READWRITE); size = GetFileSize(mgr->idx, amt); if( size || *amt ) { - if( !ReadFile(mgr->idx, (char *)alloc, BT_minpage, amt, NULL) ) + if( !ReadFile(mgr->idx, (char *)latchmgr, BT_minpage, amt, NULL) ) return bt_mgrclose (mgr), NULL; - bits = alloc->bits; + bits = latchmgr->alloc->bits; } else if( mode == BT_ro ) return bt_mgrclose (mgr), NULL; #endif @@ -493,53 +921,67 @@ SYSTEM_INFO sysinfo[1]; #ifdef unix mgr->pool = calloc (poolmax, sizeof(BtPool)); mgr->hash = calloc (hashsize, sizeof(ushort)); - mgr->latch = calloc (hashsize, sizeof(BtLatch)); - mgr->pooladvise = calloc (poolmax, (mgr->poolmask + 8) / 8); + mgr->latch = calloc (hashsize, sizeof(BtSpinLatch)); #else mgr->pool = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, poolmax * sizeof(BtPool)); mgr->hash = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(ushort)); - mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtLatch)); + mgr->latch = GlobalAlloc (GMEM_FIXED|GMEM_ZEROINIT, hashsize * sizeof(BtSpinLatch)); #endif if( size || *amt ) - goto mgrxit; + goto mgrlatch; - // initializes an empty b-tree with root page and page of leaves + // initialize an empty b-tree with latch page, root page, page of leaves + // and page(s) of latches - memset (alloc, 0, 1 << bits); - bt_putid(alloc->right, MIN_lvl+1); - alloc->bits = mgr->page_bits; + memset (latchmgr, 0, 1 << bits); + nlatchpage = BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1; + bt_putid(latchmgr->alloc->right, MIN_lvl+1+nlatchpage); + latchmgr->alloc->bits = mgr->page_bits; + + latchmgr->nlatchpage = nlatchpage; + latchmgr->latchtotal = nlatchpage * (mgr->page_size / sizeof(BtLatchSet)); + + // initialize latch manager + + latchhash = (mgr->page_size - sizeof(BtLatchMgr)) / sizeof(BtHashEntry); + + // size of hash table = total number of latchsets + + if( latchhash > latchmgr->latchtotal ) + latchhash = latchmgr->latchtotal; + + latchmgr->latchhash = latchhash; #ifdef unix - if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size ) + if( write (mgr->idx, latchmgr, mgr->page_size) < mgr->page_size ) return bt_mgrclose (mgr), NULL; #else - if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) ) + if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) ) return bt_mgrclose (mgr), NULL; if( *amt < mgr->page_size ) return bt_mgrclose (mgr), NULL; #endif - memset (alloc, 0, 1 << bits); - alloc->bits = mgr->page_bits; + memset (latchmgr, 0, 1 << bits); + latchmgr->alloc->bits = mgr->page_bits; for( lvl=MIN_lvl; lvl--; ) { - slotptr(alloc, 1)->off = mgr->page_size - 3; - bt_putid(slotptr(alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number - key = keyptr(alloc, 1); - key->len = 2; // create stopper key - key->key[0] = 0xff; - key->key[1] = 0xff; - alloc->min = mgr->page_size - 3; - alloc->lvl = lvl; - alloc->cnt = 1; - alloc->act = 1; + slotptr(latchmgr->alloc, 1)->off = offsetof(struct BtPage_, fence); + bt_putid(slotptr(latchmgr->alloc, 1)->id, lvl ? MIN_lvl - lvl + 1 : 0); // next(lower) page number + latchmgr->alloc->fence[0] = 2; + latchmgr->alloc->fence[1] = 0xff; + latchmgr->alloc->fence[2] = 0xff; + latchmgr->alloc->min = mgr->page_size; + latchmgr->alloc->lvl = lvl; + latchmgr->alloc->cnt = 1; + latchmgr->alloc->act = 1; #ifdef unix - if( write (mgr->idx, alloc, mgr->page_size) < mgr->page_size ) + if( write (mgr->idx, latchmgr, mgr->page_size) < mgr->page_size ) return bt_mgrclose (mgr), NULL; #else - if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) ) + if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) ) return bt_mgrclose (mgr), NULL; if( *amt < mgr->page_size ) @@ -547,32 +989,52 @@ SYSTEM_INFO sysinfo[1]; #endif } - // create empty page area by writing last page of first - // segment area (other pages are zeroed by O/S) + // clear out latch manager locks + // and rest of pages to round out segment - if( mgr->poolmask ) { - memset(alloc, 0, mgr->page_size); - last = mgr->poolmask; - - while( last < MIN_lvl + 1 ) - last += mgr->poolmask + 1; + memset(latchmgr, 0, mgr->page_size); + last = MIN_lvl + 1; + while( last <= ((MIN_lvl + 1 + nlatchpage) | mgr->poolmask) ) { #ifdef unix - pwrite(mgr->idx, alloc, mgr->page_size, last << mgr->page_bits); + pwrite(mgr->idx, latchmgr, mgr->page_size, last << mgr->page_bits); #else SetFilePointer (mgr->idx, last << mgr->page_bits, NULL, FILE_BEGIN); - if( !WriteFile (mgr->idx, (char *)alloc, mgr->page_size, amt, NULL) ) + if( !WriteFile (mgr->idx, (char *)latchmgr, mgr->page_size, amt, NULL) ) return bt_mgrclose (mgr), NULL; if( *amt < mgr->page_size ) return bt_mgrclose (mgr), NULL; #endif + last++; } -mgrxit: +mgrlatch: #ifdef unix - free (alloc); + flag = PROT_READ | PROT_WRITE; + mgr->latchmgr = mmap (0, mgr->page_size, flag, MAP_SHARED, mgr->idx, ALLOC_page * mgr->page_size); + if( mgr->latchmgr == MAP_FAILED ) + return bt_mgrclose (mgr), NULL; + mgr->latchsets = (BtLatchSet *)mmap (0, mgr->latchmgr->nlatchpage * mgr->page_size, flag, MAP_SHARED, mgr->idx, LATCH_page * mgr->page_size); + if( mgr->latchsets == MAP_FAILED ) + return bt_mgrclose (mgr), NULL; #else - VirtualFree (alloc, 0, MEM_RELEASE); + flag = PAGE_READWRITE; + mgr->halloc = CreateFileMapping(mgr->idx, NULL, flag, 0, (BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1 + LATCH_page) * mgr->page_size, NULL); + if( !mgr->halloc ) + return bt_mgrclose (mgr), NULL; + + flag = FILE_MAP_WRITE; + mgr->latchmgr = MapViewOfFile(mgr->halloc, flag, 0, 0, (BT_latchtable / (mgr->page_size / sizeof(BtLatchSet)) + 1 + LATCH_page) * mgr->page_size); + if( !mgr->latchmgr ) + return GetLastError(), bt_mgrclose (mgr), NULL; + + mgr->latchsets = (void *)((char *)mgr->latchmgr + LATCH_page * mgr->page_size); +#endif + +#ifdef unix + free (latchmgr); +#else + VirtualFree (latchmgr, 0, MEM_RELEASE); #endif return mgr; } @@ -594,6 +1056,8 @@ BtDb *bt = malloc (sizeof(*bt)); bt->frame = (BtPage)bt->mem; bt->zero = (BtPage)(bt->mem + 1 * mgr->page_size); bt->cursor = (BtPage)(bt->mem + 2 * mgr->page_size); + + memset (bt->zero, 0, mgr->page_size); return bt; } @@ -616,155 +1080,6 @@ int ans; return 0; } -// Latch Manager - -// wait until write lock mode is clear -// and add 1 to the share count - -void bt_readlock(BtLatch *latch) -{ - do { - // see if exclusive request is pending, or granted - - if( !(volatile int)latch->request && !(volatile int)latch->exclusive ) { - // add one to counter, check write bit -#ifdef unix - if( ~__sync_fetch_and_add((volatile int *)latch, Share) & Write ) - return; -#else - if( ~_InterlockedExchangeAdd((volatile int *)latch, Share) & Write ) - return; -#endif - // didn't get latch, reduce counter by one - -#ifdef unix - __sync_fetch_and_add((volatile int *)latch, -Share); -#else - _InterlockedExchangeAdd ((volatile int *)latch, -Share); -#endif - } - - // and yield -#ifdef unix - sched_yield(); -#else - SwitchToThread(); -#endif - } while( 1 ); -} - -// wait for other read and write latches to relinquish - -void bt_writelock(BtLatch *latch) -{ -int prev; - - do { - // set exclusive access pending - -#ifdef unix - __sync_fetch_and_or((int *)latch, Pending); -#else - _InterlockedOr((int *)latch, Pending); -#endif - - // see if we can get write access - // with no readers -#ifdef unix - prev = __sync_fetch_and_or((volatile int *)latch, Write); -#else - prev = _InterlockedOr((volatile int *)latch, Write); -#endif - - // did we get exclusive access? - // if so, clear write pending - - if( !(prev & ~Pending) ) { -#ifdef unix - __sync_fetch_and_and((volatile int *)latch, ~Pending); -#else - _InterlockedAnd((volatile int *)latch, ~Pending); -#endif - return; - } - - // reset our Write mode if it was clear before - - if( !(prev & Write) ) { -#ifdef unix - __sync_fetch_and_and((volatile int *)latch, ~Write); -#else - _InterlockedAnd((volatile int *)latch, ~Write); -#endif - } - - // otherwise yield - -#ifdef unix - sched_yield(); -#else - SwitchToThread(); -#endif - } while( 1 ); -} - -// try to obtain write lock - -// return 1 if obtained, -// 0 otherwise - -int bt_writetry(BtLatch *latch) -{ -int prev; - - // see if we can get write access - // with no readers -#ifdef unix - prev = __sync_fetch_and_or((volatile int *)latch, Write); -#else - prev = _InterlockedOr((volatile int *)latch, Write); -#endif - - // did we get exclusive access? - // if so, return OK - - if( !(prev & ~Pending) ) - return 1; - - // reset our Write mode if it was clear before - - if( !(prev & Write) ) { -#ifdef unix - __sync_fetch_and_and((volatile int *)latch, ~Write); -#else - _InterlockedAnd((volatile int *)latch, ~Write); -#endif - } - return 0; -} - -// clear write mode - -void bt_releasewrite(BtLatch *latch) -{ -#ifdef unix - __sync_fetch_and_and((int *)latch, ~Write); -#else - _InterlockedAnd ((int *)latch, ~Write); -#endif -} - -// decrement reader count - -void bt_releaseread(BtLatch *latch) -{ -#ifdef unix - __sync_fetch_and_add((int *)latch, -Share); -#else - _InterlockedExchangeAdd((int *)latch, -Share); -#endif -} - // Buffer Pool mgr // find segment in pool @@ -851,12 +1166,9 @@ int flag; #ifdef unix flag = PROT_READ | ( bt->mgr->mode == BT_ro ? 0 : PROT_WRITE ); - pool->map = mmap (0, (bt->mgr->poolmask+1) << bt->mgr->page_bits, flag, MAP_SHARED, bt->mgr->idx, off); + pool->map = mmap (0, (bt->mgr->poolmask+1) << bt->mgr->page_bits, flag, MAP_SHARED | MAP_POPULATE, bt->mgr->idx, off); if( pool->map == MAP_FAILED ) return bt->err = BTERR_map; - - // clear out madvise issued bits - memset (bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8) / 8), 0, (bt->mgr->poolmask + 8)/8); #else flag = ( bt->mgr->mode == BT_ro ? PAGE_READONLY : PAGE_READWRITE ); pool->hmap = CreateFileMapping(bt->mgr->idx, NULL, flag, (DWORD)(limit >> 32), (DWORD)limit, NULL); @@ -871,10 +1183,32 @@ int flag; return bt->err = 0; } +// calculate page within pool + +BtPage bt_page (BtDb *bt, BtPool *pool, uid page_no) +{ +uint subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping +BtPage page; + + page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits)); + return page; +} + +// release pool pin + +void bt_unpinpool (BtPool *pool) +{ +#ifdef unix + __sync_fetch_and_add(&pool->pin, -1); +#else + _InterlockedDecrement16 (&pool->pin); +#endif +} + // find or place requested page in segment-pool // return pool table entry, incrementing pin -BtPool *bt_pinpage(BtDb *bt, uid page_no) +BtPool *bt_pinpool(BtDb *bt, uid page_no) { BtPool *pool, *node, *next; uint slot, idx, victim; @@ -882,7 +1216,7 @@ uint slot, idx, victim; // lock hash table chain idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize; - bt_readlock (&bt->mgr->latch[idx]); + bt_spinreadlock (&bt->mgr->latch[idx]); // look up in hash table @@ -890,17 +1224,17 @@ uint slot, idx, victim; #ifdef unix __sync_fetch_and_add(&pool->pin, 1); #else - _InterlockedIncrement (&pool->pin); + _InterlockedIncrement16 (&pool->pin); #endif - bt_releaseread (&bt->mgr->latch[idx]); + bt_spinreleaseread (&bt->mgr->latch[idx]); pool->lru++; return pool; } // upgrade to write lock - bt_releaseread (&bt->mgr->latch[idx]); - bt_writelock (&bt->mgr->latch[idx]); + bt_spinreleaseread (&bt->mgr->latch[idx]); + bt_spinwritelock (&bt->mgr->latch[idx]); // try to find page in pool with write lock @@ -908,9 +1242,9 @@ uint slot, idx, victim; #ifdef unix __sync_fetch_and_add(&pool->pin, 1); #else - _InterlockedIncrement (&pool->pin); + _InterlockedIncrement16 (&pool->pin); #endif - bt_releasewrite (&bt->mgr->latch[idx]); + bt_spinreleasewrite (&bt->mgr->latch[idx]); pool->lru++; return pool; } @@ -921,7 +1255,7 @@ uint slot, idx, victim; #ifdef unix slot = __sync_fetch_and_add(&bt->mgr->poolcnt, 1); #else - slot = _InterlockedIncrement (&bt->mgr->poolcnt) - 1; + slot = _InterlockedIncrement16 (&bt->mgr->poolcnt) - 1; #endif if( ++slot < bt->mgr->poolmax ) { @@ -935,9 +1269,9 @@ uint slot, idx, victim; #ifdef unix __sync_fetch_and_add(&pool->pin, 1); #else - _InterlockedIncrement (&pool->pin); + _InterlockedIncrement16 (&pool->pin); #endif - bt_releasewrite (&bt->mgr->latch[idx]); + bt_spinreleasewrite (&bt->mgr->latch[idx]); return pool; } @@ -947,7 +1281,7 @@ uint slot, idx, victim; #ifdef unix __sync_fetch_and_add(&bt->mgr->poolcnt, -1); #else - _InterlockedDecrement (&bt->mgr->poolcnt); + _InterlockedDecrement16 (&bt->mgr->poolcnt); #endif while( 1 ) { @@ -961,7 +1295,7 @@ uint slot, idx, victim; // try to get write lock // skip entry if not obtained - if( !bt_writetry (&bt->mgr->latch[victim]) ) + if( !bt_spinwritetry (&bt->mgr->latch[victim]) ) continue; // if pool entry is empty @@ -969,7 +1303,7 @@ uint slot, idx, victim; // skip this entry if( !(pool = bt_findlru(bt, bt->mgr->hash[victim])) ) { - bt_releasewrite (&bt->mgr->latch[victim]); + bt_spinreleasewrite (&bt->mgr->latch[victim]); continue; } @@ -985,7 +1319,7 @@ uint slot, idx, victim; if( node = pool->hashnext ) node->hashprev = pool->hashprev; - bt_releasewrite (&bt->mgr->latch[victim]); + bt_spinreleasewrite (&bt->mgr->latch[victim]); // remove old file mapping #ifdef unix @@ -1007,439 +1341,793 @@ uint slot, idx, victim; #ifdef unix __sync_fetch_and_add(&pool->pin, 1); #else - _InterlockedIncrement (&pool->pin); + _InterlockedIncrement16 (&pool->pin); #endif - bt_releasewrite (&bt->mgr->latch[idx]); + bt_spinreleasewrite (&bt->mgr->latch[idx]); return pool; } } // place write, read, or parent lock on requested page_no. -// pin to buffer pool and return page pointer -BTERR bt_lockpage(BtDb *bt, uid page_no, BtLock mode, BtPage *pageptr) +void bt_lockpage(BtLock mode, BtLatchSet *set) { -uint subpage; -BtPool *pool; -BtPage page; - - // find/create maping in pool table - // and pin our pool slot - - if( pool = bt_pinpage(bt, page_no) ) - subpage = (uint)(page_no & bt->mgr->poolmask); // page within mapping - else - return bt->err; - - page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits)); -#ifdef unix - { - uint idx = subpage / 8; - uint bit = subpage % 8; - - if( ~((bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8)/8))[idx] >> bit) & 1 ) { - madvise (page, bt->mgr->page_size, MADV_WILLNEED); - (bt->mgr->pooladvise + pool->slot * ((bt->mgr->poolmask + 8)/8))[idx] |= 1 << bit; - } - } -#endif - switch( mode ) { case BtLockRead: - bt_readlock (page->latch->readwr); + bt_readlock (set->readwr); break; case BtLockWrite: - bt_writelock (page->latch->readwr); + bt_writelock (set->readwr); break; case BtLockAccess: - bt_readlock (page->latch->access); + bt_readlock (set->access); break; case BtLockDelete: - bt_writelock (page->latch->access); + bt_writelock (set->access); break; case BtLockParent: - bt_writelock (page->latch->parent); + bt_writelock (set->parent); break; - default: - return bt->err = BTERR_lock; } - - if( pageptr ) - *pageptr = page; - return bt->err = 0; } // remove write, read, or parent lock on requested page -BTERR bt_unlockpage(BtDb *bt, uid page_no, BtLock mode) +void bt_unlockpage(BtLock mode, BtLatchSet *set) { -uint subpage, idx; -BtPool *pool; -BtPage page; - - // since page is pinned - // it should still be in the buffer pool - // and is in no danger of being a victim for reuse - - idx = (uint)(page_no >> bt->mgr->seg_bits) % bt->mgr->hashsize; - bt_readlock (&bt->mgr->latch[idx]); - - if( pool = bt_findpool(bt, page_no, idx) ) - subpage = (uint)(page_no & bt->mgr->poolmask); - else - return bt->err = BTERR_hash; - - bt_releaseread (&bt->mgr->latch[idx]); - page = (BtPage)(pool->map + (subpage << bt->mgr->page_bits)); - switch( mode ) { case BtLockRead: - bt_releaseread (page->latch->readwr); + bt_releaseread (set->readwr); break; case BtLockWrite: - bt_releasewrite (page->latch->readwr); + bt_releasewrite (set->readwr); break; case BtLockAccess: - bt_releaseread (page->latch->access); + bt_releaseread (set->access); break; case BtLockDelete: - bt_releasewrite (page->latch->access); + bt_releasewrite (set->access); break; case BtLockParent: - bt_releasewrite (page->latch->parent); + bt_releasewrite (set->parent); break; - default: - return bt->err = BTERR_lock; } +} -#ifdef unix - __sync_fetch_and_add(&pool->pin, -1); +// allocate a new page and write page into it + +uid bt_newpage(BtDb *bt, BtPage page) +{ +BtPageSet set[1]; +uid new_page; +int reuse; + + // lock allocation page + + bt_spinwritelock(bt->mgr->latchmgr->lock); + + // use empty chain first + // else allocate empty page + + if( new_page = bt_getid(bt->mgr->latchmgr->alloc[1].right) ) { + if( set->pool = bt_pinpool (bt, new_page) ) + set->page = bt_page (bt, set->pool, new_page); + else + return 0; + + bt_putid(bt->mgr->latchmgr->alloc[1].right, bt_getid(set->page->right)); + bt_unpinpool (set->pool); + reuse = 1; + } else { + new_page = bt_getid(bt->mgr->latchmgr->alloc->right); + bt_putid(bt->mgr->latchmgr->alloc->right, new_page+1); + reuse = 0; + } +#ifdef unix + if ( pwrite(bt->mgr->idx, page, bt->mgr->page_size, new_page << bt->mgr->page_bits) < bt->mgr->page_size ) + return bt->err = BTERR_wrt, 0; + + // if writing first page of pool block, zero last page in the block + + if ( !reuse && bt->mgr->poolmask > 0 && (new_page & bt->mgr->poolmask) == 0 ) + { + // use zero buffer to write zeros + if ( pwrite(bt->mgr->idx,bt->zero, bt->mgr->page_size, (new_page | bt->mgr->poolmask) << bt->mgr->page_bits) < bt->mgr->page_size ) + return bt->err = BTERR_wrt, 0; + } #else - _InterlockedDecrement (&pool->pin); + // bring new page into pool and copy page. + // this will extend the file into the new pages. + + if( set->pool = bt_pinpool (bt, new_page) ) + set->page = bt_page (bt, set->pool, new_page); + else + return 0; + + memcpy(set->page, page, bt->mgr->page_size); + bt_unpinpool (set->pool); #endif - return bt->err = 0; + // unlock allocation latch and return new page no + + bt_spinreleasewrite(bt->mgr->latchmgr->lock); + return new_page; +} + +// find slot in page for given key at a given level + +int bt_findslot (BtPageSet *set, unsigned char *key, uint len) +{ +uint diff, higher = set->page->cnt, low = 1, slot; + + // make stopper key an infinite fence value + + if( bt_getid (set->page->right) ) + higher++; + + // low is the lowest candidate. + // loop ends when they meet + + // higher is already + // tested as .ge. the given key. + + while( diff = higher - low ) { + slot = low + ( diff >> 1 ); + if( keycmp (keyptr(set->page, slot), key, len) < 0 ) + low = slot + 1; + else + higher = slot; + } + + if( higher <= set->page->cnt ) + return higher; + + // if leaf page, compare against fence value + + // return zero if key is on right link page + // or return slot beyond last key + + if( set->page->lvl || keycmp ((BtKey)set->page->fence, key, len) < 0 ) + return 0; + + return higher; } -// deallocate a deleted page -// place on free chain out of allocator page +// find and load page at given level for given key +// leave page rd or wr locked as requested -BTERR bt_freepage(BtDb *bt, uid page_no) +int bt_loadpage (BtDb *bt, BtPageSet *set, unsigned char *key, uint len, uint lvl, uint lock) { - // obtain delete lock on deleted page +uid page_no = ROOT_page, prevpage = 0; +uint drill = 0xff, slot; +BtLatchSet *prevlatch; +uint mode, prevmode; +BtPool *prevpool; + + // start at root of btree and drill down + + do { + // determine lock mode of drill level + mode = (lock == BtLockWrite) && (drill == lvl) ? BtLockWrite : BtLockRead; + + set->latch = bt_pinlatch (bt, page_no); + set->page_no = page_no; - if( bt_lockpage(bt, page_no, BtLockDelete, NULL) ) + // pin page contents + + if( set->pool = bt_pinpool (bt, page_no) ) + set->page = bt_page (bt, set->pool, page_no); + else + return 0; + + // obtain access lock using lock chaining with Access mode + + if( page_no > ROOT_page ) + bt_lockpage(BtLockAccess, set->latch); + + // release & unpin parent page + + if( prevpage ) { + bt_unlockpage(prevmode, prevlatch); + bt_unpinlatch (prevlatch); + bt_unpinpool (prevpool); + prevpage = 0; + } + + // obtain read lock using lock chaining + + bt_lockpage(mode, set->latch); + + if( page_no > ROOT_page ) + bt_unlockpage(BtLockAccess, set->latch); + + // re-read and re-lock root after determining actual level of root + + if( set->page->lvl != drill) { + if ( set->page_no != ROOT_page ) + return bt->err = BTERR_struct, 0; + + drill = set->page->lvl; + + if( lock == BtLockWrite && drill == lvl ) { + bt_unlockpage(mode, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); + continue; + } + } + + prevpage = set->page_no; + prevlatch = set->latch; + prevpool = set->pool; + prevmode = mode; + + // if page is being deleted and we should continue right + + if( set->page->kill && set->page->goright ) { + page_no = bt_getid (set->page->right); + continue; + } + + // otherwise, wait for deleted node to clear + + if( set->page->kill ) { + bt_unlockpage(mode, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); + page_no = ROOT_page; + prevpage = 0; + drill = 0xff; +#ifdef unix + sched_yield(); +#else + SwitchToThread(); +#endif + continue; + } + + // find key on page at this level + // and descend to requested level + + if( slot = bt_findslot (set, key, len) ) { + if( drill == lvl ) + return slot; + + if( slot > set->page->cnt ) + return bt->err = BTERR_struct; + + while( slotptr(set->page, slot)->dead ) + if( slot++ < set->page->cnt ) + continue; + else + return bt->err = BTERR_struct, 0; + + page_no = bt_getid(slotptr(set->page, slot)->id); + drill--; + continue; + } + + // or slide right into next page + + page_no = bt_getid(set->page->right); + + } while( page_no ); + + // return error on end of right chain + + bt->err = BTERR_struct; + return 0; // return error +} + +// drill down fixing fence values for left sibling tree + +// call with set write locked +// return with set unlocked & unpinned. + +BTERR bt_fixfences (BtDb *bt, BtPageSet *set, unsigned char *newfence) +{ +unsigned char oldfence[256]; +BtPageSet next[1]; +int chk; + + memcpy (oldfence, set->page->fence, 256); + next->page_no = bt_getid(slotptr(set->page, set->page->cnt)->id); + + while( !set->page->kill && set->page->lvl ) { + next->latch = bt_pinlatch (bt, next->page_no); + bt_lockpage (BtLockParent, next->latch); + bt_lockpage (BtLockAccess, next->latch); + bt_lockpage (BtLockWrite, next->latch); + bt_unlockpage (BtLockAccess, next->latch); + + if( next->pool = bt_pinpool (bt, next->page_no) ) + next->page = bt_page (bt, next->pool, next->page_no); + else return bt->err; - // obtain write lock on deleted page + chk = keycmp ((BtKey)next->page->fence, oldfence + 1, *oldfence); + + if( chk < 0 ) { + next->page_no = bt_getid (next->page->right); + bt_unlockpage (BtLockWrite, next->latch); + bt_unlockpage (BtLockParent, next->latch); + bt_unpinlatch (next->latch); + bt_unpinpool (next->pool); + continue; + } - if( bt_lockpage(bt, page_no, BtLockWrite, &bt->temp) ) + if( chk > 0 ) + return bt->err = BTERR_struct; + + if( bt_fixfences (bt, next, newfence) ) return bt->err; + break; + } + + memcpy (set->page->fence, newfence, 256); + + bt_unlockpage (BtLockWrite, set->latch); + bt_unlockpage (BtLockParent, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); + return 0; +} + +// return page to free list +// page must be delete & write locked + +void bt_freepage (BtDb *bt, BtPageSet *set) +{ // lock allocation page - if ( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) ) - return bt->err; + bt_spinwritelock (bt->mgr->latchmgr->lock); + + // store chain in second right + bt_putid(set->page->right, bt_getid(bt->mgr->latchmgr->alloc[1].right)); + bt_putid(bt->mgr->latchmgr->alloc[1].right, set->page_no); + set->page->free = 1; + + // unlock released page + + bt_unlockpage (BtLockDelete, set->latch); + bt_unlockpage (BtLockWrite, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); + + // unlock allocation page + + bt_spinreleasewrite (bt->mgr->latchmgr->lock); +} + +// remove the root level by promoting its only child +// call with parent and child pages + +BTERR bt_removeroot (BtDb *bt, BtPageSet *root, BtPageSet *child) +{ +uid next = 0; + + do { + if( next ) { + child->latch = bt_pinlatch (bt, next); + bt_lockpage (BtLockDelete, child->latch); + bt_lockpage (BtLockWrite, child->latch); + + if( child->pool = bt_pinpool (bt, next) ) + child->page = bt_page (bt, child->pool, next); + else + return bt->err; + + child->page_no = next; + } + + memcpy (root->page, child->page, bt->mgr->page_size); + next = bt_getid (slotptr(child->page, child->page->cnt)->id); + bt_freepage (bt, child); + } while( root->page->lvl > 1 && root->page->cnt == 1 ); + + bt_unlockpage (BtLockWrite, root->latch); + bt_unpinlatch (root->latch); + bt_unpinpool (root->pool); + return 0; +} + +// pull right page over ourselves in simple merge + +BTERR bt_mergeright (BtDb *bt, BtPageSet *set, BtPageSet *parent, BtPageSet *right, uint slot, uint idx) +{ + // install ourselves as child page + // and delete ourselves from parent - // store chain in second right - bt_putid(bt->temp->right, bt_getid(bt->alloc[1].right)); - bt_putid(bt->alloc[1].right, page_no); + bt_putid (slotptr(parent->page, idx)->id, set->page_no); + slotptr(parent->page, slot)->dead = 1; + parent->page->act--; - // unlock page zero + // collapse any empty slots - if( bt_unlockpage(bt, ALLOC_page, BtLockWrite) ) - return bt->err; + while( idx = parent->page->cnt - 1 ) + if( slotptr(parent->page, idx)->dead ) { + *slotptr(parent->page, idx) = *slotptr(parent->page, idx + 1); + memset (slotptr(parent->page, parent->page->cnt--), 0, sizeof(BtSlot)); + } else + break; - // remove write lock on deleted node + memcpy (set->page, right->page, bt->mgr->page_size); + bt_unlockpage (BtLockParent, right->latch); - if( bt_unlockpage(bt, page_no, BtLockWrite) ) - return bt->err; + bt_freepage (bt, right); - // remove delete lock on deleted node + // do we need to remove a btree level? + // (leave the first page of leaves alone) - if( bt_unlockpage(bt, page_no, BtLockDelete) ) - return bt->err; + if( parent->page_no == ROOT_page && parent->page->cnt == 1 ) + if( set->page->lvl ) + return bt_removeroot (bt, parent, set); + bt_unlockpage (BtLockWrite, parent->latch); + bt_unlockpage (BtLockDelete, set->latch); + bt_unlockpage (BtLockWrite, set->latch); + bt_unpinlatch (parent->latch); + bt_unpinpool (parent->pool); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); return 0; } -// allocate a new page and write page into it +// remove both child and parent from the btree +// from the fence position in the parent +// call with both pages locked for writing -uid bt_newpage(BtDb *bt, BtPage page) +BTERR bt_removeparent (BtDb *bt, BtPageSet *child, BtPageSet *parent, BtPageSet *right, BtPageSet *rparent, uint lvl) { -BtPool *pool; -uid new_page; -BtPage pmap; -int subpage; -int reuse; +unsigned char pagefence[256]; +uint idx; - // lock page zero + // pull right sibling over ourselves and unlock - if ( bt_lockpage(bt, ALLOC_page, BtLockWrite, &bt->alloc) ) - return 0; + memcpy (child->page, right->page, bt->mgr->page_size); - // use empty chain first - // else allocate empty page + bt_unlockpage (BtLockWrite, child->latch); + bt_unpinlatch (child->latch); + bt_unpinpool (child->pool); - if( new_page = bt_getid(bt->alloc[1].right) ) { - if( bt_lockpage (bt, new_page, BtLockWrite, &bt->temp) ) - return 0; - bt_putid(bt->alloc[1].right, bt_getid(bt->temp->right)); - if( bt_unlockpage (bt, new_page, BtLockWrite) ) - return 0; - reuse = 1; - } else { - new_page = bt_getid(bt->alloc->right); - bt_putid(bt->alloc->right, new_page+1); - reuse = 0; - } + // install ourselves into right link of old right page -#ifdef unix - memset(bt->zero, 0, sizeof(BtLatchSet)); // clear locks - memcpy((char *)bt->zero + sizeof(BtLatchSet), (char *)page + sizeof(BtLatchSet), bt->mgr->page_size - sizeof(BtLatchSet)); + bt_putid (right->page->right, child->page_no); + right->page->goright = 1; // tell bt_loadpage to go right to us + right->page->kill = 1; - if ( pwrite(bt->mgr->idx, bt->zero, bt->mgr->page_size, new_page << bt->mgr->page_bits) < bt->mgr->page_size ) - return bt->err = BTERR_wrt, 0; + bt_unlockpage (BtLockWrite, right->latch); - // if writing first page of pool block, zero last page in the block + // remove our slot from our parent + // signal to move right - if ( !reuse && bt->mgr->poolmask > 0 && (new_page & bt->mgr->poolmask) == 0 ) - { - // use zero buffer to write zeros - memset(bt->zero, 0, bt->mgr->page_size); - if ( pwrite(bt->mgr->idx,bt->zero, bt->mgr->page_size, (new_page | bt->mgr->poolmask) << bt->mgr->page_bits) < bt->mgr->page_size ) - return bt->err = BTERR_wrt, 0; - } -#else - // bring new page into pool and copy page. - // this will extend the file into the new pages. + parent->page->goright = 1; // tell bt_loadpage to go right to rparent + parent->page->kill = 1; + parent->page->act--; - if( bt_lockpage(bt, new_page, BtLockWrite, &pmap) ) - return 0; + // redirect right page pointer in right parent to us - memcpy(pmap, page, bt->mgr->page_size); + for( idx = 0; idx++ < rparent->page->cnt; ) + if( !slotptr(rparent->page, idx)->dead ) + break; - if( bt_unlockpage (bt, new_page, BtLockWrite) ) - return 0; -#endif - // unlock page allocation page + if( bt_getid (slotptr(rparent->page, idx)->id) != right->page_no ) + return bt->err = BTERR_struct; - if ( bt_unlockpage(bt, ALLOC_page, BtLockWrite) ) - return 0; + bt_putid (slotptr(rparent->page, idx)->id, child->page_no); + bt_unlockpage (BtLockWrite, rparent->latch); + bt_unpinlatch (rparent->latch); + bt_unpinpool (rparent->pool); - return new_page; + // free the right page + + bt_lockpage (BtLockDelete, right->latch); + bt_lockpage (BtLockWrite, right->latch); + bt_freepage (bt, right); + + // save parent page fence value + + memcpy (pagefence, parent->page->fence, 256); + bt_unlockpage (BtLockWrite, parent->latch); + + return bt_removepage (bt, parent, lvl, pagefence); } -// find slot in page for given key at a given level +// remove page from btree +// call with page unlocked +// returns with page on free list -int bt_findslot (BtDb *bt, unsigned char *key, uint len) +BTERR bt_removepage (BtDb *bt, BtPageSet *set, uint lvl, unsigned char *pagefence) { -uint diff, higher = bt->page->cnt, low = 1, slot; -uint good = 0; +BtPageSet parent[1], sibling[1], rparent[1]; +unsigned char newfence[256]; +uint slot, idx; +BtKey ptr; - // make stopper key an infinite fence value + // load and lock our parent - if( bt_getid (bt->page->right) ) - higher++; - else - good++; + while( 1 ) { + if( !(slot = bt_loadpage (bt, parent, pagefence+1, *pagefence, lvl+1, BtLockWrite)) ) + return bt->err; - // low is the next candidate, higher is already - // tested as .ge. the given key, loop ends when they meet + // do we show up in our parent yet? - while( diff = higher - low ) { - slot = low + ( diff >> 1 ); - if( keycmp (keyptr(bt->page, slot), key, len) < 0 ) - low = slot + 1; - else - higher = slot, good++; + if( set->page_no != bt_getid (slotptr (parent->page, slot)->id) ) { + bt_unlockpage (BtLockWrite, parent->latch); + bt_unpinlatch (parent->latch); + bt_unpinpool (parent->pool); +#ifdef linux + sched_yield(); +#else + SwitchToThread(); +#endif + continue; } - // return zero if key is on right link page + // can we do a simple merge entirely + // between siblings on the parent page? - return good ? higher : 0; -} + if( slot < parent->page->cnt ) { + // find our right neighbor + // right must exist because the stopper prevents + // the rightmost page from deleting -// find and load page at given level for given key -// leave page rd or wr locked as requested + for( idx = slot; idx++ < parent->page->cnt; ) + if( !slotptr(parent->page, idx)->dead ) + break; -int bt_loadpage (BtDb *bt, unsigned char *key, uint len, uint lvl, uint lock) -{ -uid page_no = ROOT_page, prevpage = 0; -uint drill = 0xff, slot; -uint mode, prevmode; + sibling->page_no = bt_getid (slotptr (parent->page, idx)->id); - // start at root of btree and drill down + bt_lockpage (BtLockDelete, set->latch); + bt_lockpage (BtLockWrite, set->latch); - do { - // determine lock mode of drill level - mode = (lock == BtLockWrite) && (drill == lvl) ? BtLockWrite : BtLockRead; + // merge right if sibling shows up in + // our parent and is not being killed - bt->page_no = page_no; + if( sibling->page_no == bt_getid (set->page->right) ) { + sibling->latch = bt_pinlatch (bt, sibling->page_no); + bt_lockpage (BtLockParent, sibling->latch); + bt_lockpage (BtLockDelete, sibling->latch); + bt_lockpage (BtLockWrite, sibling->latch); - // obtain access lock using lock chaining with Access mode + if( sibling->pool = bt_pinpool (bt, sibling->page_no) ) + sibling->page = bt_page (bt, sibling->pool, sibling->page_no); + else + return bt->err; - if( page_no > ROOT_page ) - if( bt_lockpage(bt, page_no, BtLockAccess, NULL) ) - return 0; + if( !sibling->page->kill ) + return bt_mergeright(bt, set, parent, sibling, slot, idx); - if( prevpage ) - if( bt_unlockpage(bt, prevpage, prevmode) ) - return 0; + // try again later - // obtain read lock using lock chaining - // and pin page contents + bt_unlockpage (BtLockWrite, sibling->latch); + bt_unlockpage (BtLockParent, sibling->latch); + bt_unlockpage (BtLockDelete, sibling->latch); + bt_unpinlatch (sibling->latch); + bt_unpinpool (sibling->pool); + } - if( bt_lockpage(bt, page_no, mode, &bt->page) ) - return 0; + bt_unlockpage (BtLockDelete, set->latch); + bt_unlockpage (BtLockWrite, set->latch); + bt_unlockpage (BtLockWrite, parent->latch); + bt_unpinlatch (parent->latch); + bt_unpinpool (parent->pool); +#ifdef linux + sched_yield(); +#else + SwitchToThread(); +#endif + continue; + } - if( page_no > ROOT_page ) - if( bt_unlockpage(bt, page_no, BtLockAccess) ) - return 0; + // find our left neighbor in our parent page - // re-read and re-lock root after determining actual level of root + for( idx = slot; --idx; ) + if( !slotptr(parent->page, idx)->dead ) + break; - if( bt->page->lvl != drill) { - if ( bt->page_no != ROOT_page ) - return bt->err = BTERR_struct, 0; - - drill = bt->page->lvl; + // if no left neighbor, delete ourselves and our parent - if( lock == BtLockWrite && drill == lvl ) - if( bt_unlockpage(bt, page_no, mode) ) - return 0; - else - continue; - } + if( !idx ) { + bt_lockpage (BtLockAccess, set->latch); + bt_lockpage (BtLockWrite, set->latch); + bt_unlockpage (BtLockAccess, set->latch); - // find key on page at this level - // and descend to requested level + rparent->page_no = bt_getid (parent->page->right); + rparent->latch = bt_pinlatch (bt, rparent->page_no); - if( !bt->page->kill && (slot = bt_findslot (bt, key, len)) ) { - if( drill == lvl ) - return slot; + bt_lockpage (BtLockAccess, rparent->latch); + bt_lockpage (BtLockWrite, rparent->latch); + bt_unlockpage (BtLockAccess, rparent->latch); - while( slotptr(bt->page, slot)->dead ) - if( slot++ < bt->page->cnt ) - continue; - else { - page_no = bt_getid(bt->page->right); - goto slideright; - } + if( rparent->pool = bt_pinpool (bt, rparent->page_no) ) + rparent->page = bt_page (bt, rparent->pool, rparent->page_no); + else + return bt->err; - page_no = bt_getid(slotptr(bt->page, slot)->id); - drill--; - } + if( !rparent->page->kill ) { + sibling->page_no = bt_getid (set->page->right); + sibling->latch = bt_pinlatch (bt, sibling->page_no); - // or slide right into next page - // (slide left from deleted page) + bt_lockpage (BtLockAccess, sibling->latch); + bt_lockpage (BtLockWrite, sibling->latch); + bt_unlockpage (BtLockAccess, sibling->latch); - else - page_no = bt_getid(bt->page->right); + if( sibling->pool = bt_pinpool (bt, sibling->page_no) ) + sibling->page = bt_page (bt, sibling->pool, sibling->page_no); + else + return bt->err; - // continue down / right using overlapping locks - // to protect pages being killed or split. + if( !sibling->page->kill ) + return bt_removeparent (bt, set, parent, sibling, rparent, lvl+1); -slideright: - prevpage = bt->page_no; - prevmode = mode; - } while( page_no ); + // try again later - // return error on end of right chain + bt_unlockpage (BtLockWrite, sibling->latch); + bt_unpinlatch (sibling->latch); + bt_unpinpool (sibling->pool); + } - bt->err = BTERR_struct; - return 0; // return error -} + bt_unlockpage (BtLockWrite, set->latch); + bt_unlockpage (BtLockWrite, rparent->latch); + bt_unpinlatch (rparent->latch); + bt_unpinpool (rparent->pool); -// find and delete key on page by marking delete flag bit -// when page becomes empty, delete it + bt_unlockpage (BtLockWrite, parent->latch); + bt_unpinlatch (parent->latch); + bt_unpinpool (parent->pool); +#ifdef linux + sched_yield(); +#else + SwitchToThread(); +#endif + continue; + } -BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len, uint lvl) -{ -unsigned char lowerkey[256], higherkey[256]; -uid page_no, right; -uint slot, tod; -BtKey ptr; + // redirect parent to our left sibling + // lock and map our left sibling's page - if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) ) - ptr = keyptr(bt->page, slot); + sibling->page_no = bt_getid (slotptr(parent->page, idx)->id); + sibling->latch = bt_pinlatch (bt, sibling->page_no); + + // wait our turn on fence key maintenance + + bt_lockpage(BtLockParent, sibling->latch); + bt_lockpage(BtLockAccess, sibling->latch); + bt_lockpage(BtLockWrite, sibling->latch); + bt_unlockpage(BtLockAccess, sibling->latch); + + if( sibling->pool = bt_pinpool (bt, sibling->page_no) ) + sibling->page = bt_page (bt, sibling->pool, sibling->page_no); else return bt->err; - // if key is found delete it, otherwise ignore request + // wait until left sibling is in our parent - if( !keycmp (ptr, key, len) ) - if( slotptr(bt->page, slot)->dead == 0 ) { - slotptr(bt->page,slot)->dead = 1; - if( slot < bt->page->cnt ) - bt->page->dirty = 1; - bt->page->act--; - } + if( bt_getid (sibling->page->right) != set->page_no ) { + bt_unlockpage (BtLockWrite, parent->latch); + bt_unlockpage (BtLockWrite, sibling->latch); + bt_unlockpage (BtLockParent, sibling->latch); + bt_unpinlatch (parent->latch); + bt_unpinpool (parent->pool); + bt_unpinlatch (sibling->latch); + bt_unpinpool (sibling->pool); +#ifdef linux + sched_yield(); +#else + SwitchToThread(); +#endif + continue; + } - // return if page is not empty, or it has no right sibling + // delete our left sibling from parent - right = bt_getid(bt->page->right); - page_no = bt->page_no; + slotptr(parent->page,idx)->dead = 1; + parent->page->dirty = 1; + parent->page->act--; - if( !right || bt->page->act ) - return bt_unlockpage(bt, page_no, BtLockWrite); + // redirect our parent slot to our left sibling - // obtain Parent lock over write lock + bt_putid (slotptr(parent->page, slot)->id, sibling->page_no); + memcpy (sibling->page->right, set->page->right, BtId); - if( bt_lockpage(bt, page_no, BtLockParent, NULL) ) - return bt->err; + // collapse dead slots from parent - // keep copy of key to delete + while( idx = parent->page->cnt - 1 ) + if( slotptr(parent->page, idx)->dead ) { + *slotptr(parent->page, idx) = *slotptr(parent->page, parent->page->cnt); + memset (slotptr(parent->page, parent->page->cnt--), 0, sizeof(BtSlot)); + } else + break; - ptr = keyptr(bt->page, bt->page->cnt); - memcpy(lowerkey, ptr, ptr->len + 1); + // free our original page - // lock and map right page + bt_lockpage (BtLockDelete, set->latch); + bt_lockpage (BtLockWrite, set->latch); + bt_freepage (bt, set); - if ( bt_lockpage(bt, right, BtLockWrite, &bt->temp) ) - return bt->err; + // go down the left node's fence keys to the leaf level + // and update the fence keys in each page - // pull contents of next page into current empty page - memcpy((char *)bt->page + sizeof(BtLatchSet), (char *)bt->temp + sizeof(BtLatchSet), bt->mgr->page_size - sizeof(BtLatchSet)); + memcpy (newfence, parent->page->fence, 256); - // keep copy of key to update - ptr = keyptr(bt->temp, bt->temp->cnt); - memcpy(higherkey, ptr, ptr->len + 1); + if( bt_fixfences (bt, sibling, newfence) ) + return bt->err; - // Mark right page as deleted and point it to left page - // until we can post updates at higher level. + // promote sibling as new root? - bt_putid(bt->temp->right, page_no); - bt->temp->kill = 1; - bt->temp->cnt = 0; + if( parent->page_no == ROOT_page && parent->page->cnt == 1 ) + if( sibling->page->lvl ) { + sibling->latch = bt_pinlatch (bt, sibling->page_no); + bt_lockpage (BtLockDelete, sibling->latch); + bt_lockpage (BtLockWrite, sibling->latch); - if( bt_unlockpage(bt, right, BtLockWrite) ) - return bt->err; - if( bt_unlockpage(bt, page_no, BtLockWrite) ) + if( sibling->pool = bt_pinpool (bt, sibling->page_no) ) + sibling->page = bt_page (bt, sibling->pool, sibling->page_no); + else return bt->err; - // delete old lower key to consolidated node + return bt_removeroot (bt, parent, sibling); + } - if( bt_deletekey (bt, lowerkey + 1, *lowerkey, lvl + 1) ) - return bt->err; + bt_unlockpage (BtLockWrite, parent->latch); + bt_unpinlatch (parent->latch); + bt_unpinpool (parent->pool); + + return 0; + } +} - // redirect higher key directly to consolidated node +// find and delete key on page by marking delete flag bit +// if page becomes empty, delete it from the btree - tod = (uint)time(NULL); +BTERR bt_deletekey (BtDb *bt, unsigned char *key, uint len) +{ +unsigned char pagefence[256]; +uint slot, idx, found; +BtPageSet set[1]; +BtKey ptr; - if( bt_insertkey (bt, higherkey+1, *higherkey, lvl + 1, page_no, tod) ) + if( slot = bt_loadpage (bt, set, key, len, 0, BtLockWrite) ) + ptr = keyptr(set->page, slot); + else return bt->err; - // obtain write lock and - // add right block to free chain + // if key is found delete it, otherwise ignore request - if( bt_freepage (bt, right) ) - return bt->err; + if( found = slot <= set->page->cnt ) + if( found = !keycmp (ptr, key, len) ) + if( found = slotptr(set->page, slot)->dead == 0 ) { + slotptr(set->page,slot)->dead = 1; + set->page->dirty = 1; + set->page->act--; + + // collapse empty slots + + while( idx = set->page->cnt - 1 ) + if( slotptr(set->page, idx)->dead ) { + *slotptr(set->page, idx) = *slotptr(set->page, idx + 1); + memset (slotptr(set->page, set->page->cnt--), 0, sizeof(BtSlot)); + } else + break; + } + + if( set->page->act ) { + bt_unlockpage(BtLockWrite, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); + return bt->found = found, 0; + } + + memcpy (pagefence, set->page->fence, 256); + set->page->kill = 1; - // remove ParentModify lock + bt_unlockpage (BtLockWrite, set->latch); - if( bt_unlockpage(bt, page_no, BtLockParent) ) + if( bt_removepage (bt, set, 0, pagefence) ) return bt->err; - + + bt->found = found; return 0; } @@ -1447,44 +2135,44 @@ BtKey ptr; uid bt_findkey (BtDb *bt, unsigned char *key, uint len) { +BtPageSet set[1]; uint slot; +uid id = 0; BtKey ptr; -uid id; - if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) ) - ptr = keyptr(bt->page, slot); + if( slot = bt_loadpage (bt, set, key, len, 0, BtLockRead) ) + ptr = keyptr(set->page, slot); else return 0; // if key exists, return row-id // otherwise return 0 - if( ptr->len == len && !memcmp (ptr->key, key, len) ) - id = bt_getid(slotptr(bt->page,slot)->id); - else - id = 0; - - if ( bt_unlockpage(bt, bt->page_no, BtLockRead) ) - return 0; + if( slot <= set->page->cnt ) + if( !keycmp (ptr, key, len) ) + id = bt_getid(slotptr(set->page,slot)->id); + bt_unlockpage (BtLockRead, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); return id; } // check page for space available, // clean if necessary and return // 0 - page needs splitting -// 1 - go ahead +// >0 new slot value -uint bt_cleanpage(BtDb *bt, uint amt) +uint bt_cleanpage(BtDb *bt, BtPage page, uint amt, uint slot) { -uint nxt = bt->mgr->page_size; -BtPage page = bt->page; +uint nxt = bt->mgr->page_size, off; uint cnt = 0, idx = 0; uint max = page->cnt; +uint newslot = max; BtKey key; - if( page->min >= (page->cnt+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 ) - return 1; + if( page->min >= (max+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 ) + return slot; // skip cleanup if nothing to reclaim @@ -1499,116 +2187,134 @@ BtKey key; page->dirty = 0; page->act = 0; + // try cleaning up page first + // by removing deleted keys + while( cnt++ < max ) { - // always leave fence key in list - if( cnt < max && slotptr(bt->frame,cnt)->dead ) + if( cnt == slot ) + newslot = idx + 1; + if( slotptr(bt->frame,cnt)->dead ) continue; - // copy key - key = keyptr(bt->frame, cnt); - nxt -= key->len + 1; - memcpy ((unsigned char *)page + nxt, key, key->len + 1); + // if its not the fence key, + // copy the key across + + off = slotptr(bt->frame,cnt)->off; + + if( off >= sizeof(*page) ) { + key = keyptr(bt->frame, cnt); + off = nxt -= key->len + 1; + memcpy ((unsigned char *)page + nxt, key, key->len + 1); + } // copy slot + memcpy(slotptr(page, ++idx)->id, slotptr(bt->frame, cnt)->id, BtId); - if( !(slotptr(page, idx)->dead = slotptr(bt->frame, cnt)->dead) ) - page->act++; slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod; - slotptr(page, idx)->off = nxt; + slotptr(page, idx)->off = off; + page->act++; } + page->min = nxt; page->cnt = idx; - if( page->min >= (page->cnt+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 ) - return 1; + // see if page has enough space now, or does it need splitting? + + if( page->min >= (idx+1) * sizeof(BtSlot) + sizeof(*page) + amt + 1 ) + return newslot; return 0; } // split the root and raise the height of the btree -BTERR bt_splitroot(BtDb *bt, unsigned char *newkey, unsigned char *oldkey, uid page_no2) +BTERR bt_splitroot(BtDb *bt, BtPageSet *root, uid page_no2) { uint nxt = bt->mgr->page_size; -BtPage root = bt->page; +unsigned char leftkey[256]; uid new_page; // Obtain an empty page to use, and copy the current - // root contents into it + // root contents into it, e.g. lower keys + + memcpy (leftkey, root->page->fence, 256); + root->page->posted = 1; - if( !(new_page = bt_newpage(bt, root)) ) + if( !(new_page = bt_newpage(bt, root->page)) ) return bt->err; // preserve the page info at the bottom - // and set rest to zero + // of higher keys and set rest to zero - memset(root+1, 0, bt->mgr->page_size - sizeof(*root)); + memset(root->page+1, 0, bt->mgr->page_size - sizeof(*root->page)); + memset(root->page->fence, 0, 256); + root->page->fence[0] = 2; + root->page->fence[1] = 0xff; + root->page->fence[2] = 0xff; - // insert first key on newroot page + // insert lower keys page fence key on newroot page - nxt -= *newkey + 1; - memcpy ((unsigned char *)root + nxt, newkey, *newkey + 1); - bt_putid(slotptr(root, 1)->id, new_page); - slotptr(root, 1)->off = nxt; + nxt -= *leftkey + 1; + memcpy ((unsigned char *)root->page + nxt, leftkey, *leftkey + 1); + bt_putid(slotptr(root->page, 1)->id, new_page); + slotptr(root->page, 1)->off = nxt; - // insert second key on newroot page + // insert stopper key on newroot page // and increase the root height - nxt -= *oldkey + 1; - memcpy ((unsigned char *)root + nxt, oldkey, *oldkey + 1); - bt_putid(slotptr(root, 2)->id, page_no2); - slotptr(root, 2)->off = nxt; + bt_putid(slotptr(root->page, 2)->id, page_no2); + slotptr(root->page, 2)->off = offsetof(struct BtPage_, fence); - bt_putid(root->right, 0); - root->min = nxt; // reset lowest used offset and key count - root->cnt = 2; - root->act = 2; - root->lvl++; + bt_putid(root->page->right, 0); + root->page->min = nxt; // reset lowest used offset and key count + root->page->cnt = 2; + root->page->act = 2; + root->page->lvl++; - // release root (bt->page) + // release and unpin root - return bt_unlockpage(bt, bt->page_no, BtLockWrite); + bt_unlockpage(BtLockWrite, root->latch); + bt_unpinlatch (root->latch); + bt_unpinpool (root->pool); + return 0; } // split already locked full node // return unlocked. -BTERR bt_splitpage (BtDb *bt) +BTERR bt_splitpage (BtDb *bt, BtPageSet *set) { -uint cnt = 0, idx = 0, max, nxt = bt->mgr->page_size; -unsigned char oldkey[256], lowerkey[256]; -uid page_no = bt->page_no, right; -BtPage page = bt->page; -uint lvl = page->lvl; -uid new_page; +uint cnt = 0, idx = 0, max, nxt = bt->mgr->page_size, off; +unsigned char fencekey[256]; +uint lvl = set->page->lvl; +uid right; BtKey key; -uint tod; // split higher half of keys to bt->frame - // the last key (fence key) might be dead - - tod = (uint)time(NULL); memset (bt->frame, 0, bt->mgr->page_size); - max = (int)page->cnt; + max = set->page->cnt; cnt = max / 2; idx = 0; while( cnt++ < max ) { - key = keyptr(page, cnt); - nxt -= key->len + 1; - memcpy ((unsigned char *)bt->frame + nxt, key, key->len + 1); - memcpy(slotptr(bt->frame,++idx)->id, slotptr(page,cnt)->id, BtId); - if( !(slotptr(bt->frame, idx)->dead = slotptr(page, cnt)->dead) ) - bt->frame->act++; - slotptr(bt->frame, idx)->tod = slotptr(page, cnt)->tod; - slotptr(bt->frame, idx)->off = nxt; + if( !lvl || cnt < max ) { + key = keyptr(set->page, cnt); + off = nxt -= key->len + 1; + memcpy ((unsigned char *)bt->frame + nxt, key, key->len + 1); + } else + off = offsetof(struct BtPage_, fence); + + memcpy(slotptr(bt->frame,++idx)->id, slotptr(set->page,cnt)->id, BtId); + slotptr(bt->frame, idx)->tod = slotptr(set->page, cnt)->tod; + slotptr(bt->frame, idx)->off = off; + bt->frame->act++; } - // remember existing fence key for new page to the right - - memcpy (oldkey, key, key->len + 1); + if( set->page_no == ROOT_page ) + bt->frame->posted = 1; + memcpy (bt->frame->fence, set->page->fence, 256); bt->frame->bits = bt->mgr->page_bits; bt->frame->min = nxt; bt->frame->cnt = idx; @@ -1616,167 +2322,191 @@ uint tod; // link right node - if( page_no > ROOT_page ) { - right = bt_getid (page->right); - bt_putid(bt->frame->right, right); - } + if( set->page_no > ROOT_page ) + memcpy (bt->frame->right, set->page->right, BtId); - // get new free page and write frame to it. + // get new free page and write higher keys to it. - if( !(new_page = bt_newpage(bt, bt->frame)) ) + if( !(right = bt_newpage(bt, bt->frame)) ) return bt->err; // update lower keys to continue in old page - memcpy (bt->frame, page, bt->mgr->page_size); - memset (page+1, 0, bt->mgr->page_size - sizeof(*page)); + memcpy (bt->frame, set->page, bt->mgr->page_size); + memset (set->page+1, 0, bt->mgr->page_size - sizeof(*set->page)); nxt = bt->mgr->page_size; - page->act = 0; + set->page->posted = 0; + set->page->dirty = 0; + set->page->act = 0; cnt = 0; idx = 0; // assemble page of smaller keys - // (they're all active keys) while( cnt++ < max / 2 ) { key = keyptr(bt->frame, cnt); - nxt -= key->len + 1; - memcpy ((unsigned char *)page + nxt, key, key->len + 1); - memcpy(slotptr(page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId); - slotptr(page, idx)->tod = slotptr(bt->frame, cnt)->tod; - slotptr(page, idx)->off = nxt; - page->act++; + + if( !lvl || cnt < max / 2 ) { + off = nxt -= key->len + 1; + memcpy ((unsigned char *)set->page + nxt, key, key->len + 1); + } else + off = offsetof(struct BtPage_, fence); + + memcpy(slotptr(set->page,++idx)->id, slotptr(bt->frame,cnt)->id, BtId); + slotptr(set->page, idx)->tod = slotptr(bt->frame, cnt)->tod; + slotptr(set->page, idx)->off = off; + set->page->act++; } - // remember fence key for old page + // install fence key for smaller key page - memcpy(lowerkey, key, key->len + 1); - bt_putid(page->right, new_page); - page->min = nxt; - page->cnt = idx; + memset(set->page->fence, 0, 256); + memcpy(set->page->fence, key, key->len + 1); - // if current page is the root page, split it + bt_putid(set->page->right, right); + set->page->min = nxt; + set->page->cnt = idx; - if( page_no == ROOT_page ) - return bt_splitroot (bt, lowerkey, oldkey, new_page); + // if current page is the root page, split it - // obtain Parent/Write locks - // for left and right node pages + if( set->page_no == ROOT_page ) + return bt_splitroot (bt, set, right); - if( bt_lockpage (bt, new_page, BtLockParent, NULL) ) - return bt->err; + bt_unlockpage (BtLockWrite, set->latch); - if( bt_lockpage (bt, page_no, BtLockParent, NULL) ) - return bt->err; + // insert new fences in their parent pages - // release wr lock on left page + while( 1 ) { + bt_lockpage (BtLockParent, set->latch); + bt_lockpage (BtLockWrite, set->latch); - if( bt_unlockpage (bt, page_no, BtLockWrite) ) - return bt->err; + memcpy (fencekey, set->page->fence, 256); + right = bt_getid (set->page->right); - // insert new fence for reformulated left block + if( set->page->posted ) { + bt_unlockpage (BtLockParent, set->latch); + bt_unlockpage (BtLockWrite, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); + return 0; + } - if( bt_insertkey (bt, lowerkey+1, *lowerkey, lvl + 1, page_no, tod) ) - return bt->err; + set->page->posted = 1; + bt_unlockpage (BtLockWrite, set->latch); - // fix old fence for newly allocated right block page + if( bt_insertkey (bt, fencekey+1, *fencekey, set->page_no, time(NULL), lvl+1) ) + return bt->err; - if( bt_insertkey (bt, oldkey+1, *oldkey, lvl + 1, new_page, tod) ) - return bt->err; + bt_unlockpage (BtLockParent, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); - // release Parent & Write locks + if( !(set->page_no = right) ) + break; - if( bt_unlockpage (bt, new_page, BtLockParent) ) - return bt->err; + set->latch = bt_pinlatch (bt, right); - if( bt_unlockpage (bt, page_no, BtLockParent) ) - return bt->err; + if( set->pool = bt_pinpool (bt, right) ) + set->page = bt_page (bt, set->pool, right); + else + return bt->err; + } return 0; } -// Insert new key into the btree at requested level. -// Level zero pages are leaf pages and are unlocked at exit. -// Interior pages remain locked. +// Insert new key into the btree at given level. -BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uint lvl, uid id, uint tod) +BTERR bt_insertkey (BtDb *bt, unsigned char *key, uint len, uid id, uint tod, uint lvl) { +BtPageSet set[1]; uint slot, idx; -BtPage page; BtKey ptr; - while( 1 ) { - if( slot = bt_loadpage (bt, key, len, lvl, BtLockWrite) ) - ptr = keyptr(bt->page, slot); - else - { - if ( !bt->err ) - bt->err = BTERR_ovflw; - return bt->err; - } - - // if key already exists, update id and return - - page = bt->page; + while( 1 ) { + if( slot = bt_loadpage (bt, set, key, len, lvl, BtLockWrite) ) + ptr = keyptr(set->page, slot); + else + { + if ( !bt->err ) + bt->err = BTERR_ovflw; + return bt->err; + } - if( !keycmp (ptr, key, len) ) { - slotptr(page, slot)->dead = 0; - slotptr(page, slot)->tod = tod; - bt_putid(slotptr(page,slot)->id, id); - return bt_unlockpage(bt, bt->page_no, BtLockWrite); - } + // if key already exists, update id and return + + if( slot <= set->page->cnt ) + if( !keycmp (ptr, key, len) ) { + if( slotptr(set->page, slot)->dead ) + set->page->act++; + slotptr(set->page, slot)->dead = 0; + slotptr(set->page, slot)->tod = tod; + bt_putid(slotptr(set->page,slot)->id, id); + bt_unlockpage(BtLockWrite, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); + return 0; + } - // check if page has enough space + // check if page has enough space - if( bt_cleanpage (bt, len) ) - break; + if( slot = bt_cleanpage (bt, set->page, len, slot) ) + break; - if( bt_splitpage (bt) ) - return bt->err; - } + if( bt_splitpage (bt, set) ) + return bt->err; + } - // calculate next available slot and copy key into page + // calculate next available slot and copy key into page - page->min -= len + 1; // reset lowest used offset - ((unsigned char *)page)[page->min] = len; - memcpy ((unsigned char *)page + page->min +1, key, len ); + set->page->min -= len + 1; // reset lowest used offset + ((unsigned char *)set->page)[set->page->min] = len; + memcpy ((unsigned char *)set->page + set->page->min +1, key, len ); - for( idx = slot; idx < page->cnt; idx++ ) - if( slotptr(page, idx)->dead ) + for( idx = slot; idx <= set->page->cnt; idx++ ) + if( slotptr(set->page, idx)->dead ) break; - // now insert key into array before slot - // preserving the fence slot + // now insert key into array before slot - if( idx == page->cnt ) - idx++, page->cnt++; + if( idx > set->page->cnt ) + set->page->cnt++; - page->act++; + set->page->act++; - while( idx > slot ) - *slotptr(page, idx) = *slotptr(page, idx -1), idx--; + while( idx > slot ) + *slotptr(set->page, idx) = *slotptr(set->page, idx -1), idx--; - bt_putid(slotptr(page,slot)->id, id); - slotptr(page, slot)->off = page->min; - slotptr(page, slot)->tod = tod; - slotptr(page, slot)->dead = 0; + bt_putid(slotptr(set->page,slot)->id, id); + slotptr(set->page, slot)->off = set->page->min; + slotptr(set->page, slot)->tod = tod; + slotptr(set->page, slot)->dead = 0; - return bt_unlockpage(bt, bt->page_no, BtLockWrite); + bt_unlockpage (BtLockWrite, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); + return 0; } // cache page of keys into cursor and return starting slot for given key uint bt_startkey (BtDb *bt, unsigned char *key, uint len) { +BtPageSet set[1]; uint slot; // cache page for retrieval - if( slot = bt_loadpage (bt, key, len, 0, BtLockRead) ) - memcpy (bt->cursor, bt->page, bt->mgr->page_size); - bt->cursor_page = bt->page_no; - if ( bt_unlockpage(bt, bt->page_no, BtLockRead) ) - return 0; + if( slot = bt_loadpage (bt, set, key, len, 0, BtLockRead) ) + memcpy (bt->cursor, set->page, bt->mgr->page_size); + else + return 0; + + bt->cursor_page = set->page_no; + + bt_unlockpage(BtLockRead, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); return slot; } @@ -1785,14 +2515,15 @@ uint slot; uint bt_nextkey (BtDb *bt, uint slot) { -off64_t right; +BtPageSet set[1]; +uid right; do { right = bt_getid(bt->cursor->right); while( slot++ < bt->cursor->cnt ) if( slotptr(bt->cursor,slot)->dead ) continue; - else if( right || (slot < bt->cursor->cnt)) + else if( right || (slot < bt->cursor->cnt) ) // skip infinite stopper return slot; else break; @@ -1802,14 +2533,19 @@ off64_t right; bt->cursor_page = right; - if( bt_lockpage(bt, right, BtLockRead, &bt->page) ) + if( set->pool = bt_pinpool (bt, right) ) + set->page = bt_page (bt, set->pool, right); + else return 0; - memcpy (bt->cursor, bt->page, bt->mgr->page_size); + set->latch = bt_pinlatch (bt, right); + bt_lockpage(BtLockRead, set->latch); - if ( bt_unlockpage(bt, right, BtLockRead) ) - return 0; + memcpy (bt->cursor, set->page, bt->mgr->page_size); + bt_unlockpage(BtLockRead, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); slot = 0; } while( 1 ); @@ -1834,6 +2570,46 @@ uint bt_tod(BtDb *bt, uint slot) #ifdef STANDALONE +void bt_latchaudit (BtDb *bt) +{ +ushort idx, hashidx; +BtPageSet set[1]; + +#ifdef unix + for( idx = 1; idx < bt->mgr->latchmgr->latchdeployed; idx++ ) { + set->latch = bt->mgr->latchsets + idx; + if( set->latch->pin ) { + fprintf(stderr, "latchset %d pinned for page %.6x\n", idx, set->latch->page_no); + set->latch->pin = 0; + } + } + + for( hashidx = 0; hashidx < bt->mgr->latchmgr->latchhash; hashidx++ ) { + if( idx = bt->mgr->latchmgr->table[hashidx].slot ) do { + set->latch = bt->mgr->latchsets + idx; + if( set->latch->hash != hashidx ) + fprintf(stderr, "latchset %d wrong hashidx\n", idx); + if( set->latch->pin ) + fprintf(stderr, "latchset %d pinned for page %.8x\n", idx, set->latch->page_no); + } while( idx = set->latch->next ); + } + + set->page_no = bt_getid(bt->mgr->latchmgr->alloc[1].right); + + while( set->page_no ) { + fprintf(stderr, "free: %.6x\n", (uint)set->page_no); + + if( set->pool = bt_pinpool (bt, set->page_no) ) + set->page = bt_page (bt, set->pool, set->page_no); + else + return; + + set->page_no = bt_getid(set->page->right); + bt_unpinpool (set->pool); + } +#endif +} + typedef struct { char type, idx; char *infile; @@ -1855,8 +2631,8 @@ uid next, page_no = LEAF_page; // start on first page of leaves unsigned char key[256]; ThreadArg *args = arg; int ch, len = 0, slot; +BtPageSet set[1]; time_t tod[1]; -BtPage page; BtKey ptr; BtDb *bt; FILE *in; @@ -1866,6 +2642,12 @@ FILE *in; switch(args->type | 0x20) { + case 'a': + fprintf(stderr, "started latch mgr audit\n"); + bt_latchaudit (bt); + fprintf(stderr, "finished latch mgr audit\n"); + break; + case 'w': fprintf(stderr, "started indexing for %s\n", args->infile); if( in = fopen (args->infile, "rb") ) @@ -1876,10 +2658,11 @@ FILE *in; if( args->num == 1 ) sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9; + else if( args->num ) - sprintf((char *)key+len, "%.9d", line+args->idx * args->num), len += 9; + sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9; - if( bt_insertkey (bt, key, len, 0, line, *tod) ) + if( bt_insertkey (bt, key, len, line, *tod, 0) ) fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0); len = 0; } @@ -1897,10 +2680,11 @@ FILE *in; line++; if( args->num == 1 ) sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9; + else if( args->num ) - sprintf((char *)key+len, "%.9d", line+args->idx * args->num), len += 9; + sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9; - if( bt_deletekey (bt, key, len, 0) ) + if( bt_deletekey (bt, key, len) ) fprintf(stderr, "Error %d Line: %d\n", bt->err, line), exit(0); len = 0; } @@ -1918,8 +2702,9 @@ FILE *in; line++; if( args->num == 1 ) sprintf((char *)key+len, "%.9d", 1000000000 - line), len += 9; + else if( args->num ) - sprintf((char *)key+len, "%.9d", line+args->idx * args->num), len += 9; + sprintf((char *)key+len, "%.9d", line + args->idx * args->num), len += 9; if( bt_findkey (bt, key, len) ) found++; @@ -1933,33 +2718,48 @@ FILE *in; break; case 's': - len = key[0] = 0; - - fprintf(stderr, "started reading\n"); - - if( slot = bt_startkey (bt, key, len) ) - slot--; - else - fprintf(stderr, "Error %d in StartKey. Syserror: %d\n", bt->err, errno), exit(0); - - while( slot = bt_nextkey (bt, slot) ) { - ptr = bt_key(bt, slot); - fwrite (ptr->key, ptr->len, 1, stdout); - fputc ('\n', stdout); - } + fprintf(stderr, "started scanning\n"); + do { + if( set->pool = bt_pinpool (bt, page_no) ) + set->page = bt_page (bt, set->pool, page_no); + else + break; + set->latch = bt_pinlatch (bt, page_no); + bt_lockpage (BtLockRead, set->latch); + next = bt_getid (set->page->right); + cnt += set->page->act; + + for( slot = 0; slot++ < set->page->cnt; ) + if( next || slot < set->page->cnt ) + if( !slotptr(set->page, slot)->dead ) { + ptr = keyptr(set->page, slot); + fwrite (ptr->key, ptr->len, 1, stdout); + fputc ('\n', stdout); + } + + bt_unlockpage (BtLockRead, set->latch); + bt_unpinlatch (set->latch); + bt_unpinpool (set->pool); + } while( page_no = next ); + cnt--; // remove stopper key + fprintf(stderr, " Total keys read %d\n", cnt); break; case 'c': - fprintf(stderr, "started reading\n"); - - do { - bt_lockpage (bt, page_no, BtLockRead, &page); - cnt += page->act; - next = bt_getid (page->right); - bt_unlockpage (bt, page_no, BtLockRead); - } while( page_no = next ); - + fprintf(stderr, "started counting\n"); + next = bt->mgr->latchmgr->nlatchpage + LATCH_page; + page_no = LEAF_page; + + while( page_no < bt_getid(bt->mgr->latchmgr->alloc->right) ) { + pread (bt->mgr->idx, bt->frame, bt->mgr->page_size, page_no << bt->mgr->page_bits); + if( !bt->frame->free && !bt->frame->lvl ) + cnt += bt->frame->act; + if( page_no > LEAF_page ) + next = page_no + 1; + page_no = next; + } + cnt--; // remove stopper key fprintf(stderr, " Total keys read %d\n", cnt); break; @@ -2000,7 +2800,7 @@ BtDb *bt; fprintf (stderr, " where page_bits is the page size in bits\n"); fprintf (stderr, " mapped_segments is the number of mmap segments in buffer pool\n"); fprintf (stderr, " seg_bits is the size of individual segments in buffer pool in pages in bits\n"); - fprintf (stderr, " line_numbers set to 1 to append line numbers to input lines\n"); + fprintf (stderr, " line_numbers = 1 to append line numbers to keys\n"); fprintf (stderr, " src_file1 thru src_filen are files of keys separated by newline\n"); exit(0); } @@ -2018,7 +2818,7 @@ BtDb *bt; poolsize = atoi(argv[4]); if( !poolsize ) - fprintf (stderr, "Warning: mapped_pool has no segments\n"); + fprintf (stderr, "Warning: no mapped_pool\n"); if( poolsize > 65535 ) fprintf (stderr, "Warning: mapped_pool > 65535 segments\n");