📄 dhtdbimpl.java
字号:
new DHTDBLookupResult()
{
public DHTDBValue[]
getValues()
{
return( values );
}
public byte
getDiversificationType()
{
return( mapping.getDiversificationType());
}
});
}finally{
this_mon.exit();
}
}
public DHTDBValue
get(
HashWrapper key )
{
// local remove
try{
this_mon.enter();
DHTDBMapping mapping = (DHTDBMapping)stored_values.get( key );
if ( mapping != null ){
return( mapping.get( local_contact ));
}
return( null );
}finally{
this_mon.exit();
}
}
public DHTDBValue
remove(
DHTTransportContact originator,
HashWrapper key )
{
// local remove
try{
this_mon.enter();
DHTDBMapping mapping = (DHTDBMapping)stored_values.get( key );
if ( mapping != null ){
DHTDBValueImpl res = mapping.remove( originator );
if ( res != null ){
return( res.getValueForDeletion( getNextValueVersion()));
}
return( null );
}
return( null );
}finally{
this_mon.exit();
}
}
public DHTStorageBlock
keyBlockRequest(
DHTTransportContact direct_sender,
byte[] request,
byte[] signature )
{
if ( adapter == null ){
return( null );
}
// for block requests sent to us (as opposed to being returned from other operations)
// make sure that the key is close enough to us
if ( direct_sender != null ){
byte[] key = adapter.getKeyForKeyBlock( request );
List closest_contacts = control.getClosestKContactsList( key, true );
boolean process_it = false;
for (int i=0;i<closest_contacts.size();i++){
if ( router.isID(((DHTTransportContact)closest_contacts.get(i)).getID())){
process_it = true;
break;
}
}
if ( !process_it ){
DHTLog.log( "Not processing key block for " + DHTLog.getString2(key) + " as key too far away" );
return( null );
}
if ( ! control.verifyContact( direct_sender, true )){
DHTLog.log( "Not processing key block for " + DHTLog.getString2(key) + " as verification failed" );
return( null );
}
}
return( adapter.keyBlockRequest( direct_sender, request, signature ));
}
public DHTStorageBlock
getKeyBlockDetails(
byte[] key )
{
if ( adapter == null ){
return( null );
}
return( adapter.getKeyBlockDetails( key ));
}
public boolean
isKeyBlocked(
byte[] key )
{
return( getKeyBlockDetails(key) != null );
}
public DHTStorageBlock[]
getDirectKeyBlocks()
{
if ( adapter == null ){
return( new DHTStorageBlock[0] );
}
return( adapter.getDirectKeyBlocks());
}
public boolean
isEmpty()
{
return( total_keys == 0 );
}
public int
getKeyCount()
{
return( (int)total_keys );
}
public int[]
getValueDetails()
{
try{
this_mon.enter();
int[] res = new int[6];
Iterator it = stored_values.values().iterator();
while( it.hasNext()){
DHTDBMapping mapping = (DHTDBMapping)it.next();
res[DHTDBStats.VD_VALUE_COUNT] += mapping.getValueCount();
res[DHTDBStats.VD_LOCAL_SIZE] += mapping.getLocalSize();
res[DHTDBStats.VD_DIRECT_SIZE] += mapping.getDirectSize();
res[DHTDBStats.VD_INDIRECT_SIZE] += mapping.getIndirectSize();
int dt = mapping.getDiversificationType();
if ( dt == DHT.DT_FREQUENCY ){
res[DHTDBStats.VD_DIV_FREQ]++;
}else if ( dt == DHT.DT_SIZE ){
res[DHTDBStats.VD_DIV_SIZE]++;
}
}
return( res );
}finally{
this_mon.exit();
}
}
public int
getKeyBlockCount()
{
if ( adapter == null ){
return( 0 );
}
return( adapter.getDirectKeyBlocks().length );
}
public Iterator
getKeys()
{
try{
this_mon.enter();
return( new ArrayList( stored_values.keySet()).iterator());
}finally{
this_mon.exit();
}
}
protected int
republishOriginalMappings()
{
int values_published = 0;
Map republish = new HashMap();
try{
this_mon.enter();
Iterator it = stored_values.entrySet().iterator();
while( it.hasNext()){
Map.Entry entry = (Map.Entry)it.next();
HashWrapper key = (HashWrapper)entry.getKey();
DHTDBMapping mapping = (DHTDBMapping)entry.getValue();
Iterator it2 = mapping.getValues();
List values = new ArrayList();
while( it2.hasNext()){
DHTDBValueImpl value = (DHTDBValueImpl)it2.next();
if ( value != null && value.isLocal()){
// we're republising the data, reset the creation time
value.setCreationTime();
values.add( value );
}
}
if ( values.size() > 0 ){
republish.put( key, values );
}
}
}finally{
this_mon.exit();
}
Iterator it = republish.entrySet().iterator();
while( it.hasNext()){
Map.Entry entry = (Map.Entry)it.next();
HashWrapper key = (HashWrapper)entry.getKey();
List values = (List)entry.getValue();
// no point in worry about multi-value puts here as it is extremely unlikely that
// > 1 value will locally stored, or > 1 value will go to the same contact
for (int i=0;i<values.size();i++){
values_published++;
control.putEncodedKey( key.getHash(), "Republish", (DHTDBValueImpl)values.get(i), 0, true );
}
}
return( values_published );
}
protected int[]
republishCachedMappings()
{
// first refresh any leaves that have not performed at least one lookup in the
// last period
router.refreshIdleLeaves( cache_republish_interval );
final Map republish = new HashMap();
long now = System.currentTimeMillis();
try{
this_mon.enter();
checkCacheExpiration( true );
Iterator it = stored_values.entrySet().iterator();
while( it.hasNext()){
Map.Entry entry = (Map.Entry)it.next();
HashWrapper key = (HashWrapper)entry.getKey();
DHTDBMapping mapping = (DHTDBMapping)entry.getValue();
// assume that if we've diversified then the other k-1 locations are under similar
// stress and will have done likewise - no point in republishing cache values to them
// New nodes joining will have had stuff forwarded to them regardless of diversification
// status
if ( mapping.getDiversificationType() != DHT.DT_NONE ){
continue;
}
Iterator it2 = mapping.getValues();
List values = new ArrayList();
while( it2.hasNext()){
DHTDBValueImpl value = (DHTDBValueImpl)it2.next();
if ( !value.isLocal()){
// if this value was stored < period ago then we assume that it was
// also stored to the other k-1 locations at the same time and therefore
// we don't need to re-store it
if ( now < value.getStoreTime()){
// deal with clock changes
value.setStoreTime( now );
}else if ( now - value.getStoreTime() <= cache_republish_interval ){
// System.out.println( "skipping store" );
}else{
values.add( value );
}
}
}
if ( values.size() > 0 ){
republish.put( key, values );
}
}
}finally{
this_mon.exit();
}
final int[] values_published = {0};
final int[] keys_published = {0};
final int[] republish_ops = {0};
final HashSet anti_spoof_done = new HashSet();
if ( republish.size() > 0 ){
// System.out.println( "cache replublish" );
// The approach is to refresh all leaves in the smallest subtree, thus populating the tree with
// sufficient information to directly know which nodes to republish the values
// to.
// However, I'm going to rely on the "refresh idle leaves" logic above
// (that's required to keep the DHT alive in general) to ensure that all
// k-buckets are reasonably up-to-date
Iterator it = republish.entrySet().iterator();
List stop_caching = new ArrayList();
// build a map of contact -> list of keys to republish
Map contact_map = new HashMap();
while( it.hasNext()){
Map.Entry entry = (Map.Entry)it.next();
HashWrapper key = (HashWrapper)entry.getKey();
byte[] lookup_id = key.getHash();
// just use the closest contacts - if some have failed then they'll
// get flushed out by this operation. Grabbing just the live ones
// is a bad idea as failures may rack up against the live ones due
// to network problems and kill them, leaving the dead ones!
List contacts = control.getClosestKContactsList( lookup_id, false );
// if we are no longer one of the K closest contacts then we shouldn't
// cache the value
boolean keep_caching = false;
for (int j=0;j<contacts.size();j++){
if ( router.isID(((DHTTransportContact)contacts.get(j)).getID())){
keep_caching = true;
break;
}
}
if ( !keep_caching ){
DHTLog.log( "Dropping cache entry for " + DHTLog.getString( lookup_id ) + " as now too far away" );
stop_caching.add( key );
// we carry on and do one last publish
}
for (int j=0;j<contacts.size();j++){
DHTTransportContact contact = (DHTTransportContact)contacts.get(j);
if ( router.isID( contact.getID())){
continue; // ignore ourselves
}
Object[] data = (Object[])contact_map.get( new HashWrapper(contact.getID()));
if ( data == null ){
data = new Object[]{ contact, new ArrayList()};
contact_map.put( new HashWrapper(contact.getID()), data );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -