+ushort hashidx = page_no % bt->mgr->latchmgr->latchhash;
+ushort slot, avail = 0, victim, idx;
+BtLatchSet *set;
+
+ // obtain read lock on hash table entry
+
+ bt_spinreadlock(bt->mgr->latchmgr->table[hashidx].latch, 0);
+
+ if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do
+ {
+ set = bt->mgr->latchsets + slot;
+ if( page_no == set->page_no )
+ break;
+ } while( slot = set->next );
+
+ if( slot ) {
+#ifdef unix
+ __sync_fetch_and_add(&set->pin, 1);
+#else
+ _InterlockedIncrement16 (&set->pin);
+#endif
+ }
+
+ bt_spinreleaseread (bt->mgr->latchmgr->table[hashidx].latch, 0);
+
+ if( slot )
+ return set;
+
+ // try again, this time with write lock
+
+ bt_spinwritelock(bt->mgr->latchmgr->table[hashidx].latch, 0);
+
+ if( slot = bt->mgr->latchmgr->table[hashidx].slot ) do
+ {
+ set = bt->mgr->latchsets + slot;
+ if( page_no == set->page_no )
+ break;
+ if( !set->pin && !avail )
+ avail = slot;
+ } while( slot = set->next );
+
+ // found our entry, or take over an unpinned one
+
+ if( slot || (slot = avail) ) {
+ set = bt->mgr->latchsets + slot;
+#ifdef unix
+ __sync_fetch_and_add(&set->pin, 1);
+#else
+ _InterlockedIncrement16 (&set->pin);
+#endif
+ set->page_no = page_no;
+ bt_spinreleasewrite(bt->mgr->latchmgr->table[hashidx].latch, 0);
+ return set;
+ }
+
+ // see if there are any unused entries
+#ifdef unix
+ victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, 1) + 1;
+#else
+ victim = _InterlockedIncrement16 (&bt->mgr->latchmgr->latchdeployed);
+#endif
+
+ if( victim < bt->mgr->latchmgr->latchtotal ) {
+ set = bt->mgr->latchsets + victim;
+#ifdef unix
+ __sync_fetch_and_add(&set->pin, 1);
+#else
+ _InterlockedIncrement16 (&set->pin);
+#endif
+ bt_latchlink (bt, hashidx, victim, page_no);
+ bt_spinreleasewrite (bt->mgr->latchmgr->table[hashidx].latch, 0);
+ return set;
+ }
+
+#ifdef unix
+ victim = __sync_fetch_and_add (&bt->mgr->latchmgr->latchdeployed, -1);
+#else
+ victim = _InterlockedDecrement16 (&bt->mgr->latchmgr->latchdeployed);
+#endif
+ // find and reuse previous lock entry