📄 cache.c
字号:
bdb->bi_cache.c_eiused++; if ( ei2 && ( ei2->bei_kids || !ei2->bei_id )) bdb->bi_cache.c_leaves++; ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock ); if ( addlru ) { ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex ); bdb_cache_lru_add( bdb, ein ); } addlru = 1; /* Got the parent, link in and we're done. */ if ( ei2 ) { bdb_cache_entryinfo_lock( ei2 ); ein->bei_parent = ei2; avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp, avl_dup_error); ei2->bei_ckids++; bdb_cache_entryinfo_unlock( ei2 ); bdb_cache_entryinfo_lock( eir ); /* Reset all the state info */ for (ein = eir; ein != ei2; ein=ein->bei_parent) ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED; *res = eir; break; } ei.bei_kids = NULL; ei.bei_id = eip.bei_id; ei.bei_ckids = 1; avl_insert( &ei.bei_kids, (caddr_t)ein, bdb_rdn_cmp, avl_dup_error ); } return rc;}/* Used by hdb_dn2idl when loading the EntryInfo for all the children * of a given node */int hdb_cache_load( struct bdb_info *bdb, EntryInfo *ei, EntryInfo **res ){ EntryInfo *ei2; int rc; /* See if we already have this one */ bdb_cache_entryinfo_lock( ei->bei_parent ); ei2 = (EntryInfo *)avl_find( ei->bei_parent->bei_kids, ei, bdb_rdn_cmp ); bdb_cache_entryinfo_unlock( ei->bei_parent ); if ( !ei2 ) { /* Not found, add it */ struct berval bv; /* bei_rdn was not malloc'd before, do it now */ ber_dupbv( &bv, &ei->bei_rdn ); ei->bei_rdn = bv; rc = bdb_entryinfo_add_internal( bdb, ei, res ); bdb_cache_entryinfo_unlock( ei->bei_parent ); ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock ); } else { /* Found, return it */ *res = ei2; return 0; } return rc;}#endif/* caller must have lru_head_mutex locked. mutex * will be unlocked on return. */static voidbdb_cache_lru_add( struct bdb_info *bdb, EntryInfo *ei ){ DB_LOCK lock, *lockp; EntryInfo *elru, *elprev; int count = 0; LRU_ADD( &bdb->bi_cache, ei ); ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex ); /* See if we're above the cache size limit */ if ( bdb->bi_cache.c_cursize <= bdb->bi_cache.c_maxsize ) return; if ( bdb->bi_cache.c_locker ) { lockp = &lock; } else { lockp = NULL; } /* Don't bother if we can't get the lock */ if ( ldap_pvt_thread_mutex_trylock( &bdb->bi_cache.lru_tail_mutex ) ) return; /* Look for an unused entry to remove */ for (elru = bdb->bi_cache.c_lrutail; elru; elru = elprev ) { elprev = elru->bei_lruprev; /* If we can successfully writelock it, then * the object is idle. */ if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) { /* If this node is in the process of linking into the cache, * or this node is being deleted, skip it. */ if ( elru->bei_state & ( CACHE_ENTRY_NOT_LINKED | CACHE_ENTRY_DELETED )) { bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp ); continue; } /* Free entry for this node if it's present */ if ( elru->bei_e ) { elru->bei_e->e_private = NULL;#ifdef SLAP_ZONE_ALLOC bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );#else bdb_entry_return( elru->bei_e );#endif elru->bei_e = NULL; count++; } /* ITS#4010 if we're in slapcat, and this node is a leaf * node, free it. * * FIXME: we need to do this for slapd as well, (which is * why we compute bi_cache.c_leaves now) but at the moment * we can't because it causes unresolvable deadlocks. */ if ( slapMode & SLAP_TOOL_READONLY ) { if ( !elru->bei_kids ) { /* This does LRU_DELETE for us */ bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 ); bdb_cache_delete_cleanup( &bdb->bi_cache, elru ); } /* Leave node on LRU list for a future pass */ } else { LRU_DELETE( &bdb->bi_cache, elru ); } bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp ); if ( count >= bdb->bi_cache.c_minfree ) { ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock ); bdb->bi_cache.c_cursize -= count; ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock ); break; } } } ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex );}EntryInfo *bdb_cache_find_info( struct bdb_info *bdb, ID id ){ EntryInfo ei = { 0 }, *ei2; ei.bei_id = id; ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock ); ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree, (caddr_t) &ei, bdb_id_cmp ); ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock ); return ei2;}/* * cache_find_id - find an entry in the cache, given id. * The entry is locked for Read upon return. Call with islocked TRUE if * the supplied *eip was already locked. */intbdb_cache_find_id( Operation *op, DB_TXN *tid, ID id, EntryInfo **eip, int islocked, u_int32_t locker, DB_LOCK *lock ){ struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private; Entry *ep = NULL; int rc = 0, load = 0; EntryInfo ei = { 0 }; ei.bei_id = id;#ifdef SLAP_ZONE_ALLOC slap_zh_rlock(bdb->bi_cache.c_zctx);#endif /* If we weren't given any info, see if we have it already cached */ if ( !*eip ) {again: ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock ); *eip = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree, (caddr_t) &ei, bdb_id_cmp ); if ( *eip ) { /* If the lock attempt fails, the info is in use */ if ( ldap_pvt_thread_mutex_trylock( &(*eip)->bei_kids_mutex )) { ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock ); /* If this node is being deleted, treat * as if the delete has already finished */ if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) { return DB_NOTFOUND; } /* otherwise, wait for the info to free up */ ldap_pvt_thread_yield(); goto again; } /* If this info isn't hooked up to its parent yet, * unlock and wait for it to be fully initialized */ if ( (*eip)->bei_state & CACHE_ENTRY_NOT_LINKED ) { bdb_cache_entryinfo_unlock( *eip ); ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock ); ldap_pvt_thread_yield(); goto again; } islocked = 1; } ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock ); } /* See if the ID exists in the database; add it to the cache if so */ if ( !*eip ) {#ifndef BDB_HIER rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep ); if ( rc == 0 ) { rc = bdb_cache_find_ndn( op, tid, &ep->e_nname, eip ); if ( *eip ) islocked = 1; if ( rc ) { ep->e_private = NULL;#ifdef SLAP_ZONE_ALLOC bdb_entry_return( bdb, ep, (*eip)->bei_zseq );#else bdb_entry_return( ep );#endif ep = NULL; } }#else rc = hdb_cache_find_parent(op, tid, locker, id, eip ); if ( rc == 0 ) islocked = 1;#endif } /* Ok, we found the info, do we have the entry? */ if ( rc == 0 ) { if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) { rc = DB_NOTFOUND; } else { /* Make sure only one thread tries to load the entry */load1:#ifdef SLAP_ZONE_ALLOC if ((*eip)->bei_e && !slap_zn_validate( bdb->bi_cache.c_zctx, (*eip)->bei_e, (*eip)->bei_zseq)) { (*eip)->bei_e = NULL; (*eip)->bei_zseq = 0; }#endif if ( !(*eip)->bei_e && !((*eip)->bei_state & CACHE_ENTRY_LOADING)) { load = 1; (*eip)->bei_state |= CACHE_ENTRY_LOADING; } if ( islocked ) { bdb_cache_entryinfo_unlock( *eip ); islocked = 0; } rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, *eip, 0, 0, lock ); if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) { rc = DB_NOTFOUND; bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock ); } else if ( rc == 0 ) { if ( load ) { /* Give up original read lock, obtain write lock */ if ( rc == 0 ) { rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, *eip, 1, 0, lock ); } if ( rc == 0 && !ep) { rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep ); } if ( rc == 0 ) { ep->e_private = *eip;#ifdef BDB_HIER bdb_fix_dn( ep, 0 );#endif (*eip)->bei_e = ep;#ifdef SLAP_ZONE_ALLOC (*eip)->bei_zseq = *((ber_len_t *)ep - 2);#endif ep = NULL; } bdb_cache_entryinfo_lock( *eip ); (*eip)->bei_state ^= CACHE_ENTRY_LOADING; bdb_cache_entryinfo_unlock( *eip ); if ( rc == 0 ) { /* If we succeeded, downgrade back to a readlock. */ rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, *eip, 0, 0, lock ); } else { /* Otherwise, release the lock. */ bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock ); } } else if ( !(*eip)->bei_e ) { /* Some other thread is trying to load the entry, * give it a chance to finish. */ bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock ); ldap_pvt_thread_yield(); bdb_cache_entryinfo_lock( *eip ); islocked = 1; goto load1;#ifdef BDB_HIER } else { /* Check for subtree renames */ rc = bdb_fix_dn( (*eip)->bei_e, 1 ); if ( rc ) { bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, *eip, 1, 0, lock ); /* check again in case other modifier did it already */ if ( bdb_fix_dn( (*eip)->bei_e, 1 ) ) rc = bdb_fix_dn( (*eip)->bei_e, 2 ); bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, *eip, 0, 0, lock ); }#endif } } } } if ( islocked ) { bdb_cache_entryinfo_unlock( *eip ); } if ( ep ) { ep->e_private = NULL;#ifdef SLAP_ZONE_ALLOC bdb_entry_return( bdb, ep, (*eip)->bei_zseq );#else bdb_entry_return( ep );#endif } if ( rc == 0 ) { if ( load ) { ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock ); bdb->bi_cache.c_cursize++; ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock ); } ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex ); /* If the LRU list has only one entry and this is it, it * doesn't need to be added again. */ if ( bdb->bi_cache.c_lruhead == bdb->bi_cache.c_lrutail && bdb->bi_cache.c_lruhead == *eip ) { ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex ); } else { /* if entry is on LRU list, remove from old spot */ if ( (*eip)->bei_lrunext || (*eip)->bei_lruprev ) { ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_tail_mutex ); LRU_DELETE( &bdb->bi_cache, *eip ); ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex ); } /* lru_head_mutex is unlocked for us */ bdb_cache_lru_add( bdb, *eip ); } }#ifdef SLAP_ZONE_ALLOC if (rc == 0 && (*eip)->bei_e) { slap_zn_rlock(bdb->bi_cache.c_zctx, (*eip)->bei_e); } slap_zh_runlock(bdb->bi_cache.c_zctx);#endif return rc;}intbdb_cache_children( Operation *op, DB_TXN *txn, Entry *e ){ int rc; if ( BEI(e)->bei_kids ) { return 0; } if ( BEI(e)->bei_state & CACHE_ENTRY_NO_KIDS ) { return DB_NOTFOUND; } rc = bdb_dn2id_children( op, txn, e ); if ( rc == DB_NOTFOUND ) { BEI(e)->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS; } return rc;}/* Update the cache after a successful database Add. */intbdb_cache_add( struct bdb_info *bdb, EntryInfo *eip, Entry *e, struct berval *nrdn, u_int32_t locker ){ EntryInfo *new, ei; DB_LOCK lock; int rc;#ifdef BDB_HIER struct berval rdn = e->e_name;#endif ei.bei_id = e->e_id; ei.bei_parent = eip; ei.bei_nrdn = *nrdn; ei.bei_lockpad = 0; /* Lock this entry so that bdb_add can run to completion. * It can only fail if BDB has run out of lock resources. */ rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, &ei, 1, 0, &lock ); if ( rc ) { bdb_cache_entryinfo_unlock( eip ); return rc; }#ifdef BDB_HIER if ( nrdn->bv_len != e->e_nname.bv_len ) { char *ptr = ber_bvchr( &rdn, ',' ); assert( ptr != NULL ); rdn.bv_len = ptr - rdn.bv_val; } ber_dupbv( &ei.bei_rdn, &rdn ); if ( eip->bei_dkids ) eip->bei_dkids++;#endif rc = bdb_entryinfo_add_internal( bdb, &ei, &new );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -