📄 7zupdate.cpp
字号:
generalGroup.Method = method;
int i;
for (i = 0; i < updateItems.Size(); i++)
{
const CUpdateItem &ui = updateItems[i];
if (!ui.NewData)
continue;
if (!ui.HasStream())
continue;
if (useFilters)
{
const UString name = ui.Name;
int dotPos = name.ReverseFind(L'.');
if (dotPos >= 0)
{
UString ext = name.Mid(dotPos + 1);
if (IsExeFile(ext))
{
exeGroup.Indices.Add(i);
continue;
}
}
}
generalGroup.Indices.Add(i);
}
if (exeGroup.Indices.Size() > 0)
if (!MakeExeMethod(method, maxFilter, exeGroup.Method))
exeGroup.Method = method;
for (i = 0; i < groups.Size();)
if (groups[i].Indices.Size() == 0)
groups.Delete(i);
else
i++;
}
static void FromUpdateItemToFileItem(const CUpdateItem &ui,
CFileItem &file, CFileItem2 &file2)
{
file.Name = NItemName::MakeLegalName(ui.Name);
if (ui.AttribDefined)
file.SetAttrib(ui.Attrib);
file2.CTime = ui.CTime; file2.CTimeDefined = ui.CTimeDefined;
file2.ATime = ui.ATime; file2.ATimeDefined = ui.ATimeDefined;
file2.MTime = ui.MTime; file2.MTimeDefined = ui.MTimeDefined;
file2.IsAnti = ui.IsAnti;
file2.StartPosDefined = false;
file.Size = ui.Size;
file.IsDir = ui.IsDir;
file.HasStream = ui.HasStream();
}
static HRESULT Update2(
DECL_EXTERNAL_CODECS_LOC_VARS
IInStream *inStream,
const CArchiveDatabaseEx *db,
const CObjectVector<CUpdateItem> &updateItems,
COutArchive &archive,
CArchiveDatabase &newDatabase,
ISequentialOutStream *seqOutStream,
IArchiveUpdateCallback *updateCallback,
const CUpdateOptions &options)
{
UInt64 numSolidFiles = options.NumSolidFiles;
if (numSolidFiles == 0)
numSolidFiles = 1;
/*
CMyComPtr<IOutStream> outStream;
RINOK(seqOutStream->QueryInterface(IID_IOutStream, (void **)&outStream));
if (!outStream)
return E_NOTIMPL;
*/
UInt64 startBlockSize = db != 0 ? db->ArchiveInfo.StartPosition: 0;
if (startBlockSize > 0 && !options.RemoveSfxBlock)
{
RINOK(WriteRange(inStream, seqOutStream, 0, startBlockSize, NULL));
}
CRecordVector<int> fileIndexToUpdateIndexMap;
if (db != 0)
{
fileIndexToUpdateIndexMap.Reserve(db->Files.Size());
for (int i = 0; i < db->Files.Size(); i++)
fileIndexToUpdateIndexMap.Add(-1);
}
int i;
for(i = 0; i < updateItems.Size(); i++)
{
int index = updateItems[i].IndexInArchive;
if (index != -1)
fileIndexToUpdateIndexMap[index] = i;
}
CRecordVector<int> folderRefs;
if (db != 0)
{
for(i = 0; i < db->Folders.Size(); i++)
{
CNum indexInFolder = 0;
CNum numCopyItems = 0;
CNum numUnpackStreams = db->NumUnpackStreamsVector[i];
for (CNum fileIndex = db->FolderStartFileIndex[i];
indexInFolder < numUnpackStreams; fileIndex++)
{
if (db->Files[fileIndex].HasStream)
{
indexInFolder++;
int updateIndex = fileIndexToUpdateIndexMap[fileIndex];
if (updateIndex >= 0)
if (!updateItems[updateIndex].NewData)
numCopyItems++;
}
}
if (numCopyItems != numUnpackStreams && numCopyItems != 0)
return E_NOTIMPL; // It needs repacking !!!
if (numCopyItems > 0)
folderRefs.Add(i);
}
folderRefs.Sort(CompareFolderRefs, (void *)db);
}
////////////////////////////
RINOK(archive.Create(seqOutStream, false));
RINOK(archive.SkeepPrefixArchiveHeader());
UInt64 complexity = 0;
for(i = 0; i < folderRefs.Size(); i++)
complexity += db->GetFolderFullPackSize(folderRefs[i]);
UInt64 inSizeForReduce = 0;
for(i = 0; i < updateItems.Size(); i++)
{
const CUpdateItem &ui = updateItems[i];
if (ui.NewData)
{
complexity += ui.Size;
if (numSolidFiles == 1)
{
if (ui.Size > inSizeForReduce)
inSizeForReduce = ui.Size;
}
else
inSizeForReduce += ui.Size;
}
}
RINOK(updateCallback->SetTotal(complexity));
complexity = 0;
RINOK(updateCallback->SetCompleted(&complexity));
CLocalProgress *lps = new CLocalProgress;
CMyComPtr<ICompressProgressInfo> progress = lps;
lps->Init(updateCallback, true);
/////////////////////////////////////////
// Write Copy Items
for(i = 0; i < folderRefs.Size(); i++)
{
int folderIndex = folderRefs[i];
lps->ProgressOffset = complexity;
UInt64 packSize = db->GetFolderFullPackSize(folderIndex);
RINOK(WriteRange(inStream, archive.SeqStream,
db->GetFolderStreamPos(folderIndex, 0), packSize, progress));
complexity += packSize;
const CFolder &folder = db->Folders[folderIndex];
CNum startIndex = db->FolderStartPackStreamIndex[folderIndex];
for (int j = 0; j < folder.PackStreams.Size(); j++)
{
newDatabase.PackSizes.Add(db->PackSizes[startIndex + j]);
// newDatabase.PackCRCsDefined.Add(db.PackCRCsDefined[startIndex + j]);
// newDatabase.PackCRCs.Add(db.PackCRCs[startIndex + j]);
}
newDatabase.Folders.Add(folder);
CNum numUnpackStreams = db->NumUnpackStreamsVector[folderIndex];
newDatabase.NumUnpackStreamsVector.Add(numUnpackStreams);
CNum indexInFolder = 0;
for (CNum fi = db->FolderStartFileIndex[folderIndex];
indexInFolder < numUnpackStreams; fi++)
{
CFileItem file;
CFileItem2 file2;
db->GetFile(fi, file, file2);
if (file.HasStream)
{
indexInFolder++;
int updateIndex = fileIndexToUpdateIndexMap[fi];
if (updateIndex >= 0)
{
const CUpdateItem &ui = updateItems[updateIndex];
if (ui.NewProperties)
{
CFileItem uf;
FromUpdateItemToFileItem(ui, uf, file2);
uf.Size = file.Size;
uf.Crc = file.Crc;
uf.CrcDefined = file.CrcDefined;
uf.HasStream = file.HasStream;
file = uf;
}
}
newDatabase.AddFile(file, file2);
}
}
}
folderRefs.ClearAndFree();
fileIndexToUpdateIndexMap.ClearAndFree();
/////////////////////////////////////////
// Compress New Files
CObjectVector<CSolidGroup> groups;
SplitFilesToGroups(*options.Method, options.UseFilters, options.MaxFilter,
updateItems, groups);
const UInt32 kMinReduceSize = (1 << 16);
if (inSizeForReduce < kMinReduceSize)
inSizeForReduce = kMinReduceSize;
for (int groupIndex = 0; groupIndex < groups.Size(); groupIndex++)
{
const CSolidGroup &group = groups[groupIndex];
int numFiles = group.Indices.Size();
if (numFiles == 0)
continue;
CRecordVector<CRefItem> refItems;
refItems.Reserve(numFiles);
bool sortByType = (numSolidFiles > 1);
for (i = 0; i < numFiles; i++)
refItems.Add(CRefItem(group.Indices[i], updateItems[group.Indices[i]], sortByType));
refItems.Sort(CompareUpdateItems, (void *)&sortByType);
CRecordVector<UInt32> indices;
indices.Reserve(numFiles);
for (i = 0; i < numFiles; i++)
{
UInt32 index = refItems[i].Index;
indices.Add(index);
/*
const CUpdateItem &ui = updateItems[index];
CFileItem file;
if (ui.NewProperties)
FromUpdateItemToFileItem(ui, file);
else
file = db.Files[ui.IndexInArchive];
if (file.IsAnti || file.IsDir)
return E_FAIL;
newDatabase.Files.Add(file);
*/
}
CEncoder encoder(group.Method);
for (i = 0; i < numFiles;)
{
UInt64 totalSize = 0;
int numSubFiles;
UString prevExtension;
for (numSubFiles = 0; i + numSubFiles < numFiles &&
numSubFiles < numSolidFiles; numSubFiles++)
{
const CUpdateItem &ui = updateItems[indices[i + numSubFiles]];
totalSize += ui.Size;
if (totalSize > options.NumSolidBytes)
break;
if (options.SolidExtension)
{
UString ext = ui.GetExtension();
if (numSubFiles == 0)
prevExtension = ext;
else
if (ext.CompareNoCase(prevExtension) != 0)
break;
}
}
if (numSubFiles < 1)
numSubFiles = 1;
CFolderInStream *inStreamSpec = new CFolderInStream;
CMyComPtr<ISequentialInStream> solidInStream(inStreamSpec);
inStreamSpec->Init(updateCallback, &indices[i], numSubFiles);
CFolder folderItem;
int startPackIndex = newDatabase.PackSizes.Size();
RINOK(encoder.Encode(
EXTERNAL_CODECS_LOC_VARS
solidInStream, NULL, &inSizeForReduce, folderItem,
archive.SeqStream, newDatabase.PackSizes, progress));
for (; startPackIndex < newDatabase.PackSizes.Size(); startPackIndex++)
lps->OutSize += newDatabase.PackSizes[startPackIndex];
lps->InSize += folderItem.GetUnpackSize();
// for()
// newDatabase.PackCRCsDefined.Add(false);
// newDatabase.PackCRCs.Add(0);
newDatabase.Folders.Add(folderItem);
CNum numUnpackStreams = 0;
for (int subIndex = 0; subIndex < numSubFiles; subIndex++)
{
const CUpdateItem &ui = updateItems[indices[i + subIndex]];
CFileItem file;
CFileItem2 file2;
if (ui.NewProperties)
FromUpdateItemToFileItem(ui, file, file2);
else
db->GetFile(ui.IndexInArchive, file, file2);
if (file2.IsAnti || file.IsDir)
return E_FAIL;
/*
CFileItem &file = newDatabase.Files[
startFileIndexInDatabase + i + subIndex];
*/
if (!inStreamSpec->Processed[subIndex])
{
continue;
// file.Name += L".locked";
}
file.Crc = inStreamSpec->CRCs[subIndex];
file.Size = inStreamSpec->Sizes[subIndex];
if (file.Size != 0)
{
file.CrcDefined = true;
file.HasStream = true;
numUnpackStreams++;
}
else
{
file.CrcDefined = false;
file.HasStream = false;
}
newDatabase.AddFile(file, file2);
}
// numUnpackStreams = 0 is very bad case for locked files
// v3.13 doesn't understand it.
newDatabase.NumUnpackStreamsVector.Add(numUnpackStreams);
i += numSubFiles;
}
}
groups.ClearAndFree();
{
/////////////////////////////////////////
// Write Empty Files & Folders
CRecordVector<int> emptyRefs;
for(i = 0; i < updateItems.Size(); i++)
{
const CUpdateItem &ui = updateItems[i];
if (ui.NewData)
{
if (ui.HasStream())
continue;
}
else
if (ui.IndexInArchive != -1)
if (db->Files[ui.IndexInArchive].HasStream)
continue;
emptyRefs.Add(i);
}
emptyRefs.Sort(CompareEmptyItems, (void *)&updateItems);
for (i = 0; i < emptyRefs.Size(); i++)
{
const CUpdateItem &ui = updateItems[emptyRefs[i]];
CFileItem file;
CFileItem2 file2;
if (ui.NewProperties)
FromUpdateItemToFileItem(ui, file, file2);
else
db->GetFile(ui.IndexInArchive, file, file2);
newDatabase.AddFile(file, file2);
}
}
newDatabase.ReserveDown();
return S_OK;
}
HRESULT Update(
DECL_EXTERNAL_CODECS_LOC_VARS
IInStream *inStream,
const CArchiveDatabaseEx *db,
const CObjectVector<CUpdateItem> &updateItems,
COutArchive &archive,
CArchiveDatabase &newDatabase,
ISequentialOutStream *seqOutStream,
IArchiveUpdateCallback *updateCallback,
const CUpdateOptions &options)
{
return Update2(
EXTERNAL_CODECS_LOC_VARS
inStream, db, updateItems,
archive, newDatabase, seqOutStream, updateCallback, options);
}
}}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -