⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 indexed.cpp

📁 wxWidgets写的电驴
💻 CPP
📖 第 1 页 / 共 3 页
字号:
						CKadSourcePtrList::iterator itNote = KeyHashNoteMap.begin();			for (; itNote != KeyHashNoteMap.end(); ++itNote) {				Source* currNote = *itNote;				CKadEntryPtrList& NoteEntryList = currNote->entryList;				CKadEntryPtrList::iterator itNoteEntry = NoteEntryList.begin();				for (; itNoteEntry != NoteEntryList.end(); ++itNoteEntry) {					delete *itNoteEntry;				}				delete currNote;			}			delete currNoteHash;		} 		m_Notes_map.clear();	} catch ( const CIOException& ioe ) {		AddDebugLogLineM( false, logKadIndex, wxString::Format(wxT("Exception in CIndexed::~CIndexed (IO error(%i))"), ioe.m_cause));	}}void CIndexed::clean(void){	if( m_lastClean > time(NULL) ) {		return;	}	uint32 k_Removed = 0;	uint32 s_Removed = 0;	uint32 s_Total = 0;	uint32 k_Total = 0;	time_t tNow = time(NULL);	KeyHashMap::iterator itKeyHash = m_Keyword_map.begin();	while (itKeyHash != m_Keyword_map.end()) {		KeyHashMap::iterator curr_itKeyHash = itKeyHash++; // Don't change this to a ++it!		KeyHash* currKeyHash = curr_itKeyHash->second;				CSourceKeyMap::iterator itSource = currKeyHash->m_Source_map.begin();		for ( ; itSource != currKeyHash->m_Source_map.end(); ) {			CSourceKeyMap::iterator curr_itSource = itSource++; // Don't change this to a ++it!			Source* currSource = curr_itSource->second;			CKadEntryPtrList::iterator itEntry = currSource->entryList.begin();			while (itEntry != currSource->entryList.end()) {				k_Total++;								Kademlia::CEntry* currName = *itEntry;				if( !currName->source && currName->lifetime < tNow) {					k_Removed++;					itEntry = currSource->entryList.erase(itEntry);					delete currName;				} else {					++itEntry;				}			}						if( currSource->entryList.empty()) {				currKeyHash->m_Source_map.erase(curr_itSource);				delete currSource;			}		}		if( currKeyHash->m_Source_map.empty()) {			m_Keyword_map.erase(curr_itKeyHash);			delete currKeyHash;		}	}	SrcHashMap::iterator itSrcHash = m_Sources_map.begin();	while (itSrcHash != m_Sources_map.end()) {		SrcHashMap::iterator curr_itSrcHash = itSrcHash++; // Don't change this to a ++it!		SrcHash* currSrcHash = curr_itSrcHash->second;		CKadSourcePtrList::iterator itSource = currSrcHash->m_Source_map.begin();		while (itSource != currSrcHash->m_Source_map.end()) {			Source* currSource = *itSource;									CKadEntryPtrList::iterator itEntry = currSource->entryList.begin();			while (itEntry != currSource->entryList.end()) {				s_Total++;								Kademlia::CEntry* currName = *itEntry;				if (currName->lifetime < tNow) {					s_Removed++;					itEntry = currSource->entryList.erase(itEntry);					delete currName;				} else {					++itEntry;				}			}						if( currSource->entryList.empty()) {				itSource = currSrcHash->m_Source_map.erase(itSource);				delete currSource;			} else {				++itSource;			}		}		if( currSrcHash->m_Source_map.empty()) {			m_Sources_map.erase(curr_itSrcHash);			delete currSrcHash;		}	}	m_totalIndexSource = s_Total;	m_totalIndexKeyword = k_Total;	AddDebugLogLineM( false, logKadIndex, wxString::Format(wxT("Removed %u keyword out of %u and %u source out of %u"),  k_Removed, k_Total, s_Removed, s_Total));	m_lastClean = time(NULL) + MIN2S(30);}bool CIndexed::AddKeyword(const CUInt128& keyID, const CUInt128& sourceID, Kademlia::CEntry* entry, uint8& load){	if( !entry ) {		return false;	}	if( m_totalIndexKeyword > KADEMLIAMAXENTRIES ) {		load = 100;		return false;	}	if( entry->size == 0 || entry->fileName.IsEmpty() || entry->taglist.size() == 0 || entry->lifetime < time(NULL)) {		return false;	}	KeyHashMap::iterator itKeyHash = m_Keyword_map.find(keyID); 	KeyHash* currKeyHash = NULL;	if(itKeyHash == m_Keyword_map.end()) {		Source* currSource = new Source;		currSource->sourceID.setValue(sourceID);		currSource->entryList.push_front(entry);		currKeyHash = new KeyHash;		currKeyHash->keyID.setValue(keyID);		currKeyHash->m_Source_map[currSource->sourceID] = currSource;		m_Keyword_map[currKeyHash->keyID] = currKeyHash;		load = 1;		m_totalIndexKeyword++;		return true;	} else {		currKeyHash = itKeyHash->second; 		uint32 indexTotal = currKeyHash->m_Source_map.size();		if ( indexTotal > KADEMLIAMAXINDEX ) {			load = 100;			//Too many entries for this Keyword..			return false;		}		Source* currSource = NULL;		CSourceKeyMap::iterator itSource = currKeyHash->m_Source_map.find(sourceID);		if(itSource != currKeyHash->m_Source_map.end()) {			currSource = itSource->second;			if (currSource->entryList.size() > 0) {				if( indexTotal > KADEMLIAMAXINDEX - 5000 ) {					load = 100;					//We are in a hot node.. If we continued to update all the publishes					//while this index is full, popular files will be the only thing you index.					return false;				}				delete currSource->entryList.front();				currSource->entryList.pop_front();			} else {				m_totalIndexKeyword++;			}			load = (indexTotal*100)/KADEMLIAMAXINDEX;			currSource->entryList.push_front(entry);			return true;		} else {			currSource = new Source;			currSource->sourceID.setValue(sourceID);			currSource->entryList.push_front(entry);			currKeyHash->m_Source_map[currSource->sourceID] = currSource;			m_totalIndexKeyword++;			load = (indexTotal*100)/KADEMLIAMAXINDEX;			return true;		}	}		return false;}bool CIndexed::AddSources(const CUInt128& keyID, const CUInt128& sourceID, Kademlia::CEntry* entry, uint8& load){	if( !entry ) {		return false;	}	if( entry->ip == 0 || entry->tcpport == 0 || entry->udpport == 0 || entry->taglist.size() == 0 || entry->lifetime < time(NULL)) {		return false;	}			SrcHash* currSrcHash = NULL;	SrcHashMap::iterator itSrcHash = m_Sources_map.find(keyID);	if(itSrcHash == m_Sources_map.end()) {		Source* currSource = new Source;		currSource->sourceID.setValue(sourceID);		currSource->entryList.push_front(entry);		currSrcHash = new SrcHash;		currSrcHash->keyID.setValue(keyID);		currSrcHash->m_Source_map.push_front(currSource);		m_Sources_map[currSrcHash->keyID] =  currSrcHash;		m_totalIndexSource++;		load = 1;		return true;	} else {		currSrcHash = itSrcHash->second;		uint32 size = currSrcHash->m_Source_map.size();		CKadSourcePtrList::iterator itSource = currSrcHash->m_Source_map.begin();		for (; itSource != currSrcHash->m_Source_map.end(); ++itSource) {			Source* currSource = *itSource;			if( currSource->entryList.size() ) {				CEntry* currEntry = currSource->entryList.front();				wxASSERT(currEntry!=NULL);				if( currEntry->ip == entry->ip && ( currEntry->tcpport == entry->tcpport || currEntry->udpport == entry->udpport )) {					CEntry* currName = currSource->entryList.front();					currSource->entryList.pop_front();					delete currName;					currSource->entryList.push_front(entry);					load = (size*100)/KADEMLIAMAXSOUCEPERFILE;					return true;				}			} else {				//This should never happen!				currSource->entryList.push_front(entry);				wxASSERT(0);				load = (size*100)/KADEMLIAMAXSOUCEPERFILE;				return true;			}		}		if( size > KADEMLIAMAXSOUCEPERFILE ) {			Source* currSource = currSrcHash->m_Source_map.back();			currSrcHash->m_Source_map.pop_back();			wxASSERT(currSource!=NULL);			Kademlia::CEntry* currName = currSource->entryList.back();			currSource->entryList.pop_back();			wxASSERT(currName!=NULL);			delete currName;			currSource->sourceID.setValue(sourceID);			currSource->entryList.push_front(entry);			currSrcHash->m_Source_map.push_front(currSource);			load = 100;			return true;		} else {			Source* currSource = new Source;			currSource->sourceID.setValue(sourceID);			currSource->entryList.push_front(entry);			currSrcHash->m_Source_map.push_front(currSource);			m_totalIndexSource++;			load = (size*100)/KADEMLIAMAXSOUCEPERFILE;			return true;		}	}		return false;}bool CIndexed::AddNotes(const CUInt128& keyID, const CUInt128& sourceID, Kademlia::CEntry* entry, uint8& load){	if( !entry ) {		return false;	}	if( entry->ip == 0 || entry->taglist.size() == 0 ) {		return false;	}			SrcHash* currNoteHash = NULL;	SrcHashMap::iterator itNoteHash = m_Notes_map.find(keyID);	if(itNoteHash == m_Notes_map.end()) {		Source* currNote = new Source;		currNote->sourceID.setValue(sourceID);		currNote->entryList.push_front(entry);		currNoteHash = new SrcHash;		currNoteHash->keyID.setValue(keyID);		currNoteHash->m_Source_map.push_front(currNote);		m_Notes_map[currNoteHash->keyID] = currNoteHash;		load = 1;		return true;	} else {		currNoteHash = itNoteHash->second;		uint32 size = currNoteHash->m_Source_map.size();		CKadSourcePtrList::iterator itSource = currNoteHash->m_Source_map.begin();		for (; itSource != currNoteHash->m_Source_map.end(); ++itSource) {						Source* currNote = *itSource;						if( currNote->entryList.size() ) {				CEntry* currEntry = currNote->entryList.front();				wxASSERT(currEntry!=NULL);				if(currEntry->ip == entry->ip || !currEntry->sourceID.compareTo(entry->sourceID)) {					CEntry* currName = currNote->entryList.front();					currNote->entryList.pop_front();					delete currName;					currNote->entryList.push_front(entry);					load = (size*100)/KADEMLIAMAXNOTESPERFILE;					return true;				}			} else {				//This should never happen!				currNote->entryList.push_front(entry);				wxASSERT(0);				load = (size*100)/KADEMLIAMAXNOTESPERFILE;				return true;			}		}		if( size > KADEMLIAMAXNOTESPERFILE ) {			Source* currNote = currNoteHash->m_Source_map.back();			currNoteHash->m_Source_map.pop_back();			wxASSERT(currNote!=NULL);			CEntry* currName = currNote->entryList.back();			currNote->entryList.pop_back();			wxASSERT(currName!=NULL);			delete currName;			currNote->sourceID.setValue(sourceID);			currNote->entryList.push_front(entry);			currNoteHash->m_Source_map.push_front(currNote);			load = 100;			return true;		} else {			Source* currNote = new Source;			currNote->sourceID.setValue(sourceID);			currNote->entryList.push_front(entry);			currNoteHash->m_Source_map.push_front(currNote);			load = (size*100)/KADEMLIAMAXNOTESPERFILE;			return true;		}	}		return false;}bool CIndexed::AddLoad(const CUInt128& keyID, uint32 timet){	Load* load = NULL;		if((uint32)time(NULL)>timet) {		return false;	}		LoadMap::iterator it = m_Load_map.find(keyID);	if(it != m_Load_map.end())	{		wxASSERT(0);		return false;	}	load = new Load();	load->keyID.setValue(keyID);	load->time = timet;	m_Load_map[load->keyID] =  load;	return true;}bool SearchTermsMatch(const SSearchTerm* pSearchTerm, const Kademlia::CEntry* item/*, CStringArray& astrFileNameTokens*/){	// boolean operators	if (pSearchTerm->type == SSearchTerm::AND) {		return SearchTermsMatch(pSearchTerm->left, item/*, astrFileNameTokens*/) && SearchTermsMatch(pSearchTerm->right, item/*, astrFileNameTokens*/);	}	if (pSearchTerm->type == SSearchTerm::OR) {		return SearchTermsMatch(pSearchTerm->left, item/*, astrFileNameTokens*/) || SearchTermsMatch(pSearchTerm->right, item/*, astrFileNameTokens*/);	}	if (pSearchTerm->type == SSearchTerm::NAND) {		return SearchTermsMatch(pSearchTerm->left, item/*, astrFileNameTokens*/) && !SearchTermsMatch(pSearchTerm->right, item/*, astrFileNameTokens*/);	}	// word which is to be searched in the file name (and in additional meta data as done by some ed2k servers???)	if (pSearchTerm->type == SSearchTerm::String) {		int iStrSearchTerms = pSearchTerm->astr->GetCount();		if (iStrSearchTerms == 0) {			return false;		}#if 0		//TODO: Use a pre-tokenized list for better performance.		// tokenize the filename (very expensive) only once per search expression and only if really needed		if (astrFileNameTokens.GetCount() == 0)		{			int iPosTok = 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -