+// Latch Manager
+
+// wait until write lock mode is clear
+// and add 1 to the share count
+
+void bt_spinreadlock(BtSpinLatch *latch)
+{
+ushort prev;
+
+ do {
+ // obtain latch mutex
+#ifdef unix
+ if( __sync_lock_test_and_set(latch->mutex, 1) )
+ continue;
+#else
+ if( _InterlockedExchange8(latch->mutex, 1) )
+ continue;
+#endif
+ // see if exclusive request is granted or pending
+
+ if( prev = !(latch->exclusive | latch->pending) )
+ latch->share++;
+
+#ifdef unix
+ *latch->mutex = 0;
+#else
+ _InterlockedExchange8(latch->mutex, 0);
+#endif
+
+ if( prev )
+ return;
+
+#ifdef unix
+ } while( sched_yield(), 1 );
+#else
+ } while( SwitchToThread(), 1 );
+#endif
+}
+
+// wait for other read and write latches to relinquish
+
+void bt_spinwritelock(BtSpinLatch *latch)
+{
+uint prev;
+
+ do {
+#ifdef unix
+ if( __sync_lock_test_and_set(latch->mutex, 1) )
+ continue;
+#else
+ if( _InterlockedExchange8(latch->mutex, 1) )
+ continue;
+#endif
+ if( prev = !(latch->share | latch->exclusive) )
+ latch->exclusive = 1, latch->pending = 0;
+ else
+ latch->pending = 1;
+#ifdef unix
+ *latch->mutex = 0;
+#else
+ _InterlockedExchange8(latch->mutex, 0);
+#endif
+ if( prev )
+ return;
+#ifdef unix
+ } while( sched_yield(), 1 );
+#else
+ } while( SwitchToThread(), 1 );
+#endif
+}
+
+// try to obtain write lock
+
+// return 1 if obtained,
+// 0 otherwise
+
+int bt_spinwritetry(BtSpinLatch *latch)
+{
+uint prev;
+
+#ifdef unix
+ if( __sync_lock_test_and_set(latch->mutex, 1) )
+ return 0;
+#else
+ if( _InterlockedExchange8(latch->mutex, 1) )
+ return 0;
+#endif
+ // take write access if all bits are clear
+
+ if( prev = !(latch->exclusive | latch->share) )
+ latch->exclusive = 1;
+
+#ifdef unix
+ *latch->mutex = 0;
+#else
+ _InterlockedExchange8(latch->mutex, 0);
+#endif
+ return prev;
+}
+
+// clear write mode
+
+void bt_spinreleasewrite(BtSpinLatch *latch)
+{
+#ifdef unix
+ while( __sync_lock_test_and_set(latch->mutex, 1) )
+ sched_yield();
+#else
+ while( _InterlockedExchange8(latch->mutex, 1) )
+ SwitchToThread();
+#endif
+ latch->exclusive = 0;
+#ifdef unix
+ *latch->mutex = 0;
+#else
+ _InterlockedExchange8(latch->mutex, 0);
+#endif
+}
+
+// decrement reader count
+
+void bt_spinreleaseread(BtSpinLatch *latch)
+{
+#ifdef unix
+ while( __sync_lock_test_and_set(latch->mutex, 1) )
+ sched_yield();
+#else
+ while( _InterlockedExchange8(latch->mutex, 1) )
+ SwitchToThread();
+#endif
+ latch->share--;
+#ifdef unix
+ *latch->mutex = 0;
+#else
+ _InterlockedExchange8(latch->mutex, 0);
+#endif
+}
+
+// link latch table entry into latch hash table
+
+void bt_latchlink (BtDb *bt, ushort hashidx, ushort victim, uid page_no)
+{
+BtLatchSet *set = bt->mgr->latchsets + victim;
+
+ if( set->next = bt->mgr->latchmgr->table[hashidx].slot )
+ bt->mgr->latchsets[set->next].prev = victim;
+
+ bt->mgr->latchmgr->table[hashidx].slot = victim;
+ set->page_no = page_no;
+ set->hash = hashidx;
+ set->prev = 0;
+}
+
+// release latch pin
+
+void bt_unpinlatch (BtLatchSet *set)
+{
+#ifdef unix
+ __sync_fetch_and_add(&set->pin, -1);
+#else
+ _InterlockedDecrement16 (&set->pin);
+#endif
+}
+
+// find existing latchset or inspire new one
+// return with latchset pinned
+
+BtLatchSet *bt_pinlatch (BtDb *bt, uid page_no)
+{
+ushort hashidx = page_no % bt->mgr->latchmgr->latchhash;
+ushort slot, avail = 0, victim, idx;
+BtLatchSet *set;
+
+ // obtain read lock on hash table entry
+
+ bt_spinreadlock(bt->mgr->latchmgr->table[hashidx].latch);
+
+ if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do
+ {
+ set = bt->mgr->latchsets + slot;
+ if( page_no == set->page_no )
+ break;
+ } while( slot = set->next );
+
+ if( slot ) {
+#ifdef unix
+ __sync_fetch_and_add(&set->pin, 1);
+#else
+ _InterlockedIncrement16 (&set->pin);
+#endif
+ }
+
+ bt_spinreleaseread (bt->mgr->latchmgr->table[hashidx].latch);
+
+ if( slot )
+ return set;
+
+ // try again, this time with write lock
+
+ bt_spinwritelock(bt->mgr->latchmgr->table[hashidx].latch);
+
+ if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do
+ {
+ set = bt->mgr->latchsets + slot;
+ if( page_no == set->page_no )
+ break;
+ if( !set->pin && !avail )
+ avail = slot;
+ } while( slot = set->next );
+
+ // found our entry, or take over an unpinned one
+
+ if( slot || (slot = avail) ) {
+ set = bt->mgr->latchsets + slot;
+#ifdef unix
+ __sync_fetch_and_add(&set->pin, 1);
+#else
+ _InterlockedIncrement16 (&set->pin);
+#endif
+ set->page_no = page_no;
+ bt_spinreleasewrite(bt->mgr->latchmgr->table[hashidx].latch);
+ return set;
+ }
+
+ // see if there are any unused entries
+#ifdef unix
+ victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, 1) + 1;
+#else
+ victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchdeployed);
+#endif
+
+ if( victim < bt->mgr->latchmgr->latchtotal ) {
+ set = bt->mgr->latchsets + victim;
+#ifdef unix
+ __sync_fetch_and_add(&set->pin, 1);
+#else
+ _InterlockedIncrement16 (&set->pin);
+#endif
+ bt_latchlink (bt, hashidx, victim, page_no);
+ bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch);
+ return set;
+ }
+
+#ifdef unix
+ victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, -1);
+#else
+ victim = _InterlockedDecrement16 (&bt->mgr->latchmgr->latchdeployed);
+#endif
+ // find and reuse previous lock entry
+
+ while( 1 ) {
+#ifdef unix
+ victim = __sync_fetch_and_add(&bt->mgr->latchmgr->latchvictim, 1);
+#else
+ victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchvictim) - 1;
+#endif
+ // we don't use slot zero
+
+ if( victim %= bt->mgr->latchmgr->latchtotal )
+ set = bt->mgr->latchsets + victim;
+ else
+ continue;
+
+ // take control of our slot
+ // from other threads
+
+ if( set->pin || !bt_spinwritetry (set->busy) )
+ continue;
+
+ idx = set->hash;
+
+ // try to get write lock on hash chain
+ // skip entry if not obtained
+ // or has outstanding locks
+
+ if( !bt_spinwritetry (bt->mgr->latchmgr->table[idx].latch) ) {
+ bt_spinreleasewrite (set->busy);
+ continue;
+ }
+
+ if( set->pin ) {
+ bt_spinreleasewrite (set->busy);
+ bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch);
+ continue;
+ }
+
+ // unlink our available victim from its hash chain
+
+ if( set->prev )
+ bt->mgr->latchsets[set->prev].next = set->next;
+ else
+ bt->mgr->latchmgr->table[idx].slot = set->next;
+
+ if( set->next )
+ bt->mgr->latchsets[set->next].prev = set->prev;
+
+ bt_spinreleasewrite (bt->mgr->latchmgr->table[idx].latch);
+#ifdef unix
+ __sync_fetch_and_add(&set->pin, 1);
+#else
+ _InterlockedIncrement16 (&set->pin);
+#endif
+ bt_latchlink (bt, hashidx, victim, page_no);
+ bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch);
+ bt_spinreleasewrite (set->busy);
+ return set;
+ }
+}
+