📄 sharedfilelist.cpp
字号:
else if (_aMetaTags[i].nName == FT_MEDIA_LENGTH && pTag->IsInt())
{
ASSERT( _aMetaTags[i].nED2KType == TAGTYPE_STRING );
// All 'eserver' versions and eMule versions >= 0.42.4 support the media length tag with type 'integer'
if ( pServer!=NULL && (pServer->GetTCPFlags() & SRV_TCPFLG_COMPRESSION)
|| uEmuleVer >= MAKE_CLIENT_VERSION(0,42,4))
{
if (pServer && (pServer->GetTCPFlags() & SRV_TCPFLG_NEWTAGS))
tags.Add(new CTag(_aMetaTags[i].nName, pTag->GetInt()));
else
tags.Add(new CTag(_aMetaTags[i].pszED2KName, pTag->GetInt()));
}
else
{
CString strValue;
SecToTimeLength(pTag->GetInt(), strValue);
tags.Add(new CTag(_aMetaTags[i].pszED2KName, strValue));
}
}
else
ASSERT(0);
}
}
}
EUtf8Str eStrEncode;
#ifdef _UNICODE
if (pServer != NULL && (pServer->GetTCPFlags() & SRV_TCPFLG_UNICODE)){
// eserver doesn't properly support searching with ASCII-7 strings in BOM-UTF8 published strings
//eStrEncode = utf8strOptBOM;
eStrEncode = utf8strRaw;
}
else if (pClient && !pClient->GetUnicodeSupport())
eStrEncode = utf8strNone;
else
eStrEncode = utf8strRaw;
#else
eStrEncode = utf8strNone;
#endif
files->WriteUInt32(tags.GetSize());
for (int i = 0; i < tags.GetSize(); i++)
{
const CTag* pTag = tags[i];
//TRACE(" %s\n", pTag->GetFullInfo());
if (pServer && (pServer->GetTCPFlags() & SRV_TCPFLG_NEWTAGS) || (uEmuleVer >= MAKE_CLIENT_VERSION(0,42,7)))
pTag->WriteNewEd2kTag(files, eStrEncode);
else
pTag->WriteTagToFile(files, eStrEncode);
delete pTag;
}
}
// -khaos--+++> New param: pbytesLargest, pointer to uint64.
// Various other changes to accomodate our new statistic...
// Point of this is to find the largest file currently shared.
uint64 CSharedFileList::GetDatasize(uint64 &pbytesLargest) const
{
pbytesLargest=0;
// <-----khaos-
uint64 fsize;
fsize=0;
CCKey bufKey;
CKnownFile* cur_file;
for (POSITION pos = m_Files_map.GetStartPosition();pos != 0;){
m_Files_map.GetNextAssoc(pos,bufKey,cur_file);
fsize+=cur_file->GetFileSize();
// -khaos--+++> If this file is bigger than all the others...well duh.
if (cur_file->GetFileSize() > pbytesLargest)
pbytesLargest = cur_file->GetFileSize();
// <-----khaos-
}
return fsize;
}
CKnownFile* CSharedFileList::GetFileByID(const uchar* hash) const
{
if (hash)
{
CKnownFile* found_file;
CCKey key(hash);
if (m_Files_map.Lookup(key, found_file))
return found_file;
}
return NULL;
}
bool CSharedFileList::IsFilePtrInList(const CKnownFile* file) const
{
if (file)
{
POSITION pos = m_Files_map.GetStartPosition();
while (pos)
{
CCKey key;
CKnownFile* cur_file;
m_Files_map.GetNextAssoc(pos, key, cur_file);
if (file == cur_file)
return true;
}
}
return false;
}
void CSharedFileList::HashNextFile(){
// SLUGFILLER: SafeHash
if (!theApp.emuledlg || !::IsWindow(theApp.emuledlg->m_hWnd)) // wait for the dialog to open
return;
if (theApp.emuledlg && theApp.emuledlg->IsRunning())
theApp.emuledlg->sharedfileswnd->sharedfilesctrl.ShowFilesCount();
if (!currentlyhashing_list.IsEmpty()) // one hash at a time
return;
// SLUGFILLER: SafeHash
if (waitingforhash_list.IsEmpty())
return;
UnknownFile_Struct* nextfile = waitingforhash_list.RemoveHead();
currentlyhashing_list.AddTail(nextfile); // SLUGFILLER: SafeHash - keep track
CAddFileThread* addfilethread = (CAddFileThread*) AfxBeginThread(RUNTIME_CLASS(CAddFileThread), THREAD_PRIORITY_BELOW_NORMAL,0, CREATE_SUSPENDED);
addfilethread->SetValues(this,nextfile->strDirectory,nextfile->strName);
addfilethread->ResumeThread();
// SLUGFILLER: SafeHash - nextfile deleting handled elsewhere
//delete nextfile;
}
// SLUGFILLER: SafeHash
bool CSharedFileList::IsHashing(const CString& rstrDirectory, const CString& rstrName){
for (POSITION pos = waitingforhash_list.GetHeadPosition(); pos != 0; ){
const UnknownFile_Struct* pFile = waitingforhash_list.GetNext(pos);
if (!pFile->strName.CompareNoCase(rstrName) && !CompareDirectories(pFile->strDirectory, rstrDirectory))
return true;
}
for (POSITION pos = currentlyhashing_list.GetHeadPosition(); pos != 0; ){
const UnknownFile_Struct* pFile = currentlyhashing_list.GetNext(pos);
if (!pFile->strName.CompareNoCase(rstrName) && !CompareDirectories(pFile->strDirectory, rstrDirectory))
return true;
}
return false;
}
void CSharedFileList::RemoveFromHashing(CKnownFile* hashed){
for (POSITION pos = currentlyhashing_list.GetHeadPosition(); pos != 0; ){
POSITION posLast = pos;
const UnknownFile_Struct* pFile = currentlyhashing_list.GetNext(pos);
if (!pFile->strName.CompareNoCase(hashed->GetFileName()) && !CompareDirectories(pFile->strDirectory, hashed->GetPath())){
currentlyhashing_list.RemoveAt(posLast);
delete pFile;
HashNextFile(); // start next hash if possible, but only if a previous hash finished
return;
}
}
}
void CSharedFileList::HashFailed(UnknownFile_Struct* hashed){
for (POSITION pos = currentlyhashing_list.GetHeadPosition(); pos != 0; ){
POSITION posLast = pos;
const UnknownFile_Struct* pFile = currentlyhashing_list.GetNext(pos);
if (!pFile->strName.CompareNoCase(hashed->strName) && !CompareDirectories(pFile->strDirectory, hashed->strDirectory)){
currentlyhashing_list.RemoveAt(posLast);
delete pFile;
HashNextFile(); // start next hash if possible, but only if a previous hash finished
break;
}
}
delete hashed;
}
// SLUGFILLER: SafeHash
IMPLEMENT_DYNCREATE(CAddFileThread, CWinThread)
CAddFileThread::CAddFileThread()
{
m_pOwner = NULL;
m_partfile = NULL;
}
void CAddFileThread::SetValues(CSharedFileList* pOwner, LPCTSTR directory, LPCTSTR filename, CPartFile* partfile)
{
m_pOwner = pOwner;
m_strDirectory = directory;
m_strFilename = filename;
m_partfile = partfile;
}
BOOL CAddFileThread::InitInstance()
{
InitThreadLocale();
return TRUE;
}
int CAddFileThread::Run()
{
DbgSetThreadName("Hashing %s", m_strFilename);
if ( !(m_pOwner || m_partfile) || m_strFilename.IsEmpty() || !theApp.emuledlg->IsRunning() )
return 0;
CoInitialize(NULL);
// locking that hashing thread is needed because we may create a couple of those threads at startup when rehashing
// potentially corrupted downloading part files. if all those hash threads would run concurrently, the io-system would be
// under very heavy load and slowly progressing
CSingleLock sLock1(&theApp.hashing_mut); // only one filehash at a time
sLock1.Lock();
CString strFilePath;
_tmakepath(strFilePath.GetBuffer(MAX_PATH), NULL, m_strDirectory, m_strFilename, NULL);
strFilePath.ReleaseBuffer();
if (m_partfile)
theApp.QueueLogLine(false, GetResString(IDS_HASHINGFILE) + _T(" \"%s\" \"%s\""), m_partfile->GetFileName(), strFilePath);
else
theApp.QueueLogLine(false, GetResString(IDS_HASHINGFILE) + _T(" \"%s\""), strFilePath);
CKnownFile* newrecord = new CKnownFile();
if (newrecord->CreateFromFile(m_strDirectory, m_strFilename, m_partfile) && theApp.emuledlg && theApp.emuledlg->IsRunning()) // SLUGFILLER: SafeHash - in case of shutdown while still hashing
{
if (m_partfile && m_partfile->GetFileOp() == PFOP_HASHING)
m_partfile->SetFileOp(PFOP_NONE);
VERIFY( PostMessage(theApp.emuledlg->m_hWnd, TM_FINISHEDHASHING, (m_pOwner ? 0: (WPARAM)m_partfile), (LPARAM)newrecord) );
}
else
{
if (theApp.emuledlg && theApp.emuledlg->IsRunning())
{
if (m_partfile && m_partfile->GetFileOp() == PFOP_HASHING)
m_partfile->SetFileOp(PFOP_NONE);
}
// SLUGFILLER: SafeHash - inform main program of hash failure
if (m_pOwner && theApp.emuledlg && theApp.emuledlg->IsRunning())
{
UnknownFile_Struct* hashed = new UnknownFile_Struct;
hashed->strDirectory = m_strDirectory;
hashed->strName = m_strFilename;
VERIFY( PostMessage(theApp.emuledlg->m_hWnd,TM_HASHFAILED,0,(LPARAM)hashed) );
}
// SLUGFILLER: SafeHash
delete newrecord;
}
sLock1.Unlock();
CoUninitialize();
return 0;
}
void CSharedFileList::UpdateFile(CKnownFile* toupdate)
{
output->UpdateFile(toupdate);
}
void CSharedFileList::Process()
{
if( !m_lastPublishED2KFlag || ( ::GetTickCount() - m_lastPublishED2K < ED2KREPUBLISHTIME ) )
{
return;
}
SendListToServer();
m_lastPublishED2K = ::GetTickCount();
}
void CSharedFileList::Publish()
{
UINT tNow = time(NULL);
bool isFirewalled = theApp.IsFirewalled();
if( Kademlia::CKademlia::isConnected() && ( !isFirewalled || ( isFirewalled && theApp.clientlist->GetBuddyStatus() == 2)) && GetCount() && Kademlia::CKademlia::getPublish())
{
if( Kademlia::CKademlia::getTotalStoreKey() < KADEMLIATOTALSTOREKEY)
{
if( (!m_lastProcessPublishKadKeywordList || (::GetTickCount() - m_lastProcessPublishKadKeywordList) > KADEMLIAPUBLISHTIME) )
{
if (tNow >= m_keywords->GetNextPublishTime())
{
// faile safe; reset the next publish keyword, the "worse case" would be that we process the
// keyword list each KADEMLIAPUBLISHTIME seconds
m_keywords->SetNextPublishTime(0);
// search the next keyword which has to be (re)-published
UINT tMinNextPublishTime = (UINT)-1;
int iCheckedKeywords = 0;
CPublishKeyword* pPubKw = m_keywords->GetNextKeyword();
while (pPubKw)
{
iCheckedKeywords++;
UINT tNextKwPublishTime = pPubKw->GetNextPublishTime();
ASSERT( pPubKw->GetRefCount() != 0 );
if (tNextKwPublishTime == 0 || tNextKwPublishTime <= tNow)
{
DEBUG_ONLY( Debug(_T("pkwlst: %-18ls Refs=%3u Published=%2u NextPublish=%s Publishing\n"), pPubKw->GetKeyword(), pPubKw->GetRefCount(), pPubKw->GetPublishedCount(), CastSecondsToHM(tNextKwPublishTime - tNow)) );
Kademlia::CSearch* pSearch = Kademlia::CSearchManager::prepareFindFile(Kademlia::CSearch::STOREKEYWORD, false, pPubKw->GetKadID());
if (pSearch)
{
// add all file IDs which relate to the current keyword to be published
const CSimpleKnownFileArray& aFiles = pPubKw->GetReferences();
uint32 count = 0;
for (int f = 0; f < aFiles.GetSize(); f++)
{
ASSERT_VALID( aFiles[f] );
//Only publish complete files as someone else should have the full file to publish these keywords.
//As a side effect, this may help reduce people finding incomplete files in the network.
if( !aFiles[f]->IsPartFile() )
{
count++;
Kademlia::CUInt128 kadFileID(aFiles[f]->GetFileHash());
pSearch->addFileID(kadFileID);
if( count > 150 )
{
pPubKw->RotateReferences(f);
break;
}
}
}
if( count )
{
pSearch->PreparePacket();
pPubKw->SetNextPublishTime(tNow + (KADEMLIAREPUBLISHTIMEK));
pPubKw->IncPublishedCount();
Kademlia::CSearchManager::startSearch(pSearch);
}
else
{
pPubKw->SetNextPublishTime(tNow + (KADEMLIAREPUBLISHTIMEK/2));
pPubKw->IncPublishedCount();
delete pSearch;
}
}
break;
}
//DEBUG_ONLY( Debug("pkwlst: %-18s Refs=%3u Published=%2u NextPublish=%s\n", pPubKw->GetKeyword(), pPubKw->GetRefCount(), pPubKw->GetPublishedCount(), CastSecondsToHM(tNextKwPublishTime - tNow)) );
if (tNextKwPublishTime < tMinNextPublishTime)
tMinNextPublishTime = tNextKwPublishTime;
if (iCheckedKeywords >= m_keywords->GetCount()){
DEBUG_ONLY( Debug(_T("pkwlst: EOL\n")) );
// we processed the entire list of keywords to be published, set the next list processing
// time to the min. time the next keyword has to be published.
m_keywords->SetNextPublishTime(tMinNextPublishTime);
break;
}
pPubKw = m_keywords->GetNextKeyword();
}
}
else{
//DEBUG_ONLY( Debug("Next processing of publish keyword list in %s\n", CastSecondsToHM(m_keywords->GetNextPublishTime() - tNow)) );
}
// even if we did not publish a keyword, reset the timer so that this list is processed
// only every KADEMLIAPUBLISHTIME seconds.
m_lastProcessPublishKadKeywordList = GetTickCount();
}
}
if( Kademlia::CKademlia::getTotalStoreSrc() < KADEMLIATOTALSTORESRC)
{
if( (!m_lastPublishKadSrc || (::GetTickCount() - m_lastPublishKadSrc) > KADEMLIAPUBLISHTIME) )
{
if(m_currFileSrc > GetCount())
m_currFileSrc = 0;
CKnownFile* pCurKnownFile = GetFileByIndex(m_currFileSrc);
if(pCurKnownFile)
{
//Only publish source if two conditions.
//1) We are not firewalled..
//2) We are firewalled, but it's a complete source..
//
//HighID users will find incomplete sources passively..
//If the overhead of lowID is not to high, maybe we can start publishing all lowID sources..
if (pCurKnownFile->PublishSrc() && (!theApp.IsFirewalled() || (theApp.IsFirewalled() && !pCurKnownFile->IsPartFile())))
{
Kademlia::CUInt128 kadFileID;
kadFileID.setValue(pCurKnownFile->GetFileHash());
Kademlia::CSearchManager::prepareFindFile(Kademlia::CSearch::STOREFILE, true, kadFileID );
}
}
m_currFileSrc++;
// even if we did not publish a source, reset the timer so that this list is processed
// only every KADEMLIAPUBLISHTIME seconds.
m_lastPublishKadSrc = ::GetTickCount();
}
}
}
}
void CSharedFileList::AddKeywords(CKnownFile* pFile)
{
m_keywords->AddKeywords(pFile);
}
void CSharedFileList::RemoveKeywords(CKnownFile* pFile)
{
m_keywords->RemoveKeywords(pFile);
}
void CSharedFileList::DeletePartFileInstances() const
{
// this is only allowed during shut down
ASSERT( theApp.m_app_state == APP_STATE_SHUTINGDOWN );
ASSERT( theApp.knownfiles );
POSITION pos = m_Files_map.GetStartPosition();
while (pos)
{
CCKey key;
CKnownFile* cur_file;
m_Files_map.GetNextAssoc(pos, key, cur_file);
if (cur_file->IsKindOf(RUNTIME_CLASS(CPartFile)))
{
if (!theApp.downloadqueue->IsPartFile(cur_file) && !theApp.knownfiles->IsFilePtrInList(cur_file))
delete cur_file; // this is only allowed during shut down
}
}
}
bool CSharedFileList::IsUnsharedFile(const uchar* auFileHash) const {
bool bFound;
if (auFileHash){
CSKey key(auFileHash);
if (m_UnsharedFiles_map.Lookup(key, bFound))
return true;
}
return false;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -