📄 7zupdate.cpp
字号:
bind.InCoder = 1;
bind.OutStream = 0;
exeMethod.Binds.Add(bind);
}
return true;
}
static void SplitFilesToGroups(
const CCompressionMethodMode &method,
bool useFilters, bool maxFilter,
const CObjectVector<CUpdateItem> &updateItems,
CObjectVector<CSolidGroup> &groups)
{
if (method.Methods.Size() != 1 || method.Binds.Size() != 0)
useFilters = false;
groups.Clear();
groups.Add(CSolidGroup());
groups.Add(CSolidGroup());
CSolidGroup &generalGroup = groups[0];
CSolidGroup &exeGroup = groups[1];
generalGroup.Method = method;
int i;
for (i = 0; i < updateItems.Size(); i++)
{
const CUpdateItem &updateItem = updateItems[i];
if (!updateItem.NewData)
continue;
if (!updateItem.HasStream())
continue;
if (useFilters)
{
const UString name = updateItem.Name;
int dotPos = name.ReverseFind(L'.');
if (dotPos >= 0)
{
UString ext = name.Mid(dotPos + 1);
if (IsExeFile(ext))
{
exeGroup.Indices.Add(i);
continue;
}
}
}
generalGroup.Indices.Add(i);
}
if (exeGroup.Indices.Size() > 0)
if (!MakeExeMethod(method, maxFilter, exeGroup.Method))
exeGroup.Method = method;
for (i = 0; i < groups.Size();)
if (groups[i].Indices.Size() == 0)
groups.Delete(i);
else
i++;
}
static void FromUpdateItemToFileItem(const CUpdateItem &updateItem,
CFileItem &file)
{
file.Name = NItemName::MakeLegalName(updateItem.Name);
if (updateItem.AttributesAreDefined)
file.SetAttributes(updateItem.Attributes);
// if (updateItem.CreationTimeIsDefined)
// file.SetCreationTime(updateItem.ItemInfo.CreationTime);
if (updateItem.LastWriteTimeIsDefined)
file.SetLastWriteTime(updateItem.LastWriteTime);
file.UnPackSize = updateItem.Size;
file.IsDirectory = updateItem.IsDirectory;
file.IsAnti = updateItem.IsAnti;
file.HasStream = updateItem.HasStream();
}
HRESULT Update(const NArchive::N7z::CArchiveDatabaseEx &database,
CObjectVector<CUpdateItem> &updateItems,
IOutStream *outStream,
IInStream *inStream,
NArchive::N7z::CInArchiveInfo *inArchiveInfo,
const CCompressionMethodMode &method,
const CCompressionMethodMode *headerMethod,
bool useFilters,
bool maxFilter,
bool useAdditionalHeaderStreams,
bool compressMainHeader,
IArchiveUpdateCallback *updateCallback,
UINT64 numSolidFiles, UINT64 numSolidBytes, bool solidExtension,
bool removeSfxBlock)
{
if (numSolidFiles == 0)
numSolidFiles = 1;
UINT64 startBlockSize = inArchiveInfo != 0 ?
inArchiveInfo->StartPosition: 0;
if (startBlockSize > 0 && !removeSfxBlock)
{
CLimitedSequentialInStream *streamSpec = new CLimitedSequentialInStream;
CMyComPtr<ISequentialInStream> limitedStream(streamSpec);
RINOK(inStream->Seek(0, STREAM_SEEK_SET, NULL));
streamSpec->Init(inStream, startBlockSize);
RINOK(CopyBlock(limitedStream, outStream, NULL));
}
CRecordVector<int> fileIndexToUpdateIndexMap;
fileIndexToUpdateIndexMap.Reserve(database.Files.Size());
int i;
for (i = 0; i < database.Files.Size(); i++)
fileIndexToUpdateIndexMap.Add(-1);
for(i = 0; i < updateItems.Size(); i++)
{
int index = updateItems[i].IndexInArchive;
if (index != -1)
fileIndexToUpdateIndexMap[index] = i;
}
CRecordVector<CFolderRef> folderRefs;
for(i = 0; i < database.Folders.Size(); i++)
{
UINT64 indexInFolder = 0;
UINT64 numCopyItems = 0;
UINT64 numUnPackStreams = database.NumUnPackStreamsVector[i];
for (int fileIndex = database.FolderStartFileIndex[i];
indexInFolder < numUnPackStreams; fileIndex++)
{
if (database.Files[fileIndex].HasStream)
{
indexInFolder++;
int updateIndex = fileIndexToUpdateIndexMap[fileIndex];
if (updateIndex >= 0)
if (!updateItems[updateIndex].NewData)
numCopyItems++;
}
}
if (numCopyItems != numUnPackStreams && numCopyItems != 0)
return E_NOTIMPL; // It needs repacking !!!
if (numCopyItems > 0)
{
CFolderRef folderRef;
folderRef.Database = &database;
folderRef.FolderIndex = i;
folderRefs.Add(folderRef);
}
}
qsort(&folderRefs.Front(), folderRefs.Size(), sizeof(folderRefs[0]),
CompareFolderRefs);
CArchiveDatabase newDatabase;
/////////////////////////////////////////
// Write Empty Files & Folders
CRecordVector<const CUpdateItem *> emptyRefs;
for(i = 0; i < updateItems.Size(); i++)
{
const CUpdateItem &updateItem = updateItems[i];
if (updateItem.NewData)
{
if (updateItem.HasStream())
continue;
}
else
if (updateItem.IndexInArchive != -1)
if (database.Files[updateItem.IndexInArchive].HasStream)
continue;
emptyRefs.Add(&updateItem);
}
qsort(&emptyRefs.Front(), emptyRefs.Size(), sizeof(emptyRefs[0]),
CompareEmptyItems);
for(i = 0; i < emptyRefs.Size(); i++)
{
const CUpdateItem &updateItem = *emptyRefs[i];
CFileItem file;
if (updateItem.NewProperties)
FromUpdateItemToFileItem(updateItem, file);
else
file = database.Files[updateItem.IndexInArchive];
newDatabase.Files.Add(file);
}
////////////////////////////
COutArchive archive;
archive.Create(outStream);
RINOK(archive.SkeepPrefixArchiveHeader());
UINT64 complexity = 0;
for(i = 0; i < folderRefs.Size(); i++)
complexity += database.GetFolderFullPackSize(folderRefs[i].FolderIndex);
for(i = 0; i < updateItems.Size(); i++)
{
const CUpdateItem &updateItem = updateItems[i];
if (updateItem.NewData)
complexity += updateItem.Size;
}
RINOK(updateCallback->SetTotal(complexity));
complexity = 0;
RINOK(updateCallback->SetCompleted(&complexity));
/////////////////////////////////////////
// Write Copy Items
for(i = 0; i < folderRefs.Size(); i++)
{
int folderIndex = folderRefs[i].FolderIndex;
RINOK(WriteRange(inStream, archive.Stream,
CUpdateRange(database.GetFolderStreamPos(folderIndex, 0),
database.GetFolderFullPackSize(folderIndex)),
updateCallback, complexity));
const CFolder &folder = database.Folders[folderIndex];
UINT32 startIndex = database.FolderStartPackStreamIndex[folderIndex];
int j;
for (j = 0; j < folder.PackStreams.Size(); j++)
{
newDatabase.PackSizes.Add(database.PackSizes[startIndex + j]);
// newDatabase.PackCRCsDefined.Add(database.PackCRCsDefined[startIndex + j]);
// newDatabase.PackCRCs.Add(database.PackCRCs[startIndex + j]);
}
newDatabase.Folders.Add(folder);
UINT64 numUnPackStreams = database.NumUnPackStreamsVector[folderIndex];
newDatabase.NumUnPackStreamsVector.Add(numUnPackStreams);
UINT64 indexInFolder = 0;
for (j = database.FolderStartFileIndex[folderIndex];
indexInFolder < numUnPackStreams; j++)
{
CFileItem file = database.Files[j];
if (file.HasStream)
{
indexInFolder++;
int updateIndex = fileIndexToUpdateIndexMap[j];
if (updateIndex >= 0)
{
const CUpdateItem &updateItem = updateItems[updateIndex];
if (updateItem.NewProperties)
{
CFileItem file2;
FromUpdateItemToFileItem(updateItem, file2);
file2.UnPackSize = file.UnPackSize;
file2.FileCRC = file.FileCRC;
file2.FileCRCIsDefined = file.FileCRCIsDefined;
file2.HasStream = file.HasStream;
file = file2;
}
}
newDatabase.Files.Add(file);
}
}
}
/////////////////////////////////////////
// Compress New Files
CObjectVector<CSolidGroup> groups;
SplitFilesToGroups(method, useFilters, maxFilter, updateItems, groups);
for (int groupIndex = 0; groupIndex < groups.Size(); groupIndex++)
{
const CSolidGroup &group = groups[groupIndex];
int numFiles = group.Indices.Size();
if (numFiles == 0)
continue;
CRecordVector<CRefItem> refItems;
refItems.Reserve(numFiles);
for (i = 0; i < numFiles; i++)
refItems.Add(CRefItem(group.Indices[i],
updateItems[group.Indices[i]], numSolidFiles > 1));
qsort(&refItems.Front(), refItems.Size(), sizeof(refItems[0]), CompareUpdateItems);
CRecordVector<UINT32> indices;
indices.Reserve(numFiles);
int startFileIndexInDatabase = newDatabase.Files.Size();
for (i = 0; i < numFiles; i++)
{
UINT32 index = refItems[i].Index;
indices.Add(index);
const CUpdateItem &updateItem = updateItems[index];
CFileItem file;
if (updateItem.NewProperties)
FromUpdateItemToFileItem(updateItem, file);
else
file = database.Files[updateItem.IndexInArchive];
if (file.IsAnti || file.IsDirectory)
return E_FAIL;
newDatabase.Files.Add(file);
}
CEncoder encoder(group.Method);
for (i = 0; i < numFiles;)
{
UINT64 totalSize = 0;
int numSubFiles;
UString prevExtension;
for (numSubFiles = 0; i + numSubFiles < numFiles &&
numSubFiles < numSolidFiles; numSubFiles++)
{
const CUpdateItem &updateItem = updateItems[indices[i + numSubFiles]];
totalSize += updateItem.Size;
if (totalSize > numSolidBytes)
break;
if (solidExtension)
{
UString ext = updateItem.GetExtension();
if (numSubFiles == 0)
prevExtension = ext;
else
if (ext.CollateNoCase(prevExtension) != 0)
break;
}
}
if (numSubFiles < 1)
numSubFiles = 1;
CFolderInStream *inStreamSpec = new CFolderInStream;
CMyComPtr<ISequentialInStream> solidInStream(inStreamSpec);
inStreamSpec->Init(updateCallback, &indices[i], numSubFiles);
CFolder folderItem;
CLocalProgress *localProgressSpec = new CLocalProgress;
CMyComPtr<ICompressProgressInfo> localProgress = localProgressSpec;
localProgressSpec->Init(updateCallback, true);
CLocalCompressProgressInfo *localCompressProgressSpec = new CLocalCompressProgressInfo;
CMyComPtr<ICompressProgressInfo> compressProgress = localCompressProgressSpec;
localCompressProgressSpec->Init(localProgress, &complexity, NULL);
RINOK(encoder.Encode(solidInStream, NULL, folderItem,
archive.Stream, newDatabase.PackSizes, compressProgress));
// for()
// newDatabase.PackCRCsDefined.Add(false);
// newDatabase.PackCRCs.Add(0);
newDatabase.Folders.Add(folderItem);
UINT32 numUnPackStreams = 0;
for (int subIndex = 0; subIndex < numSubFiles; subIndex++)
{
CFileItem &file = newDatabase.Files[
startFileIndexInDatabase + i + subIndex];
file.FileCRC = inStreamSpec->CRCs[subIndex];
file.UnPackSize = inStreamSpec->Sizes[subIndex];
if (file.UnPackSize != 0)
{
file.FileCRCIsDefined = true;
file.HasStream = true;
numUnPackStreams++;
complexity += file.UnPackSize;
}
else
{
file.FileCRCIsDefined = false;
file.HasStream = false;
}
}
newDatabase.NumUnPackStreamsVector.Add(numUnPackStreams);
i += numSubFiles;
}
}
if (newDatabase.Files.Size() != updateItems.Size())
return E_FAIL;
return archive.WriteDatabase(newDatabase, headerMethod,
useAdditionalHeaderStreams, compressMainHeader);
}
}}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -