📄 unzip.c
字号:
then jump back to the central directory. In the case of a large zipfile
this would lead to a whole lot of disk-grinding, especially if each mem-
ber file is small. Instead, we read from the central directory the per-
tinent information for a block of files, then go extract/test the whole
block. Thus this routine contains two small(er) loops within a very
large outer loop: the first of the small ones reads a block of files
from the central directory; the second extracts or tests each file; and
the outer one loops over blocks. There's some file-pointer positioning
stuff in between, but that's about it. Btw, it's because of this jump-
ing around that we can afford to be lenient if an error occurs in one of
the member files: we should still be able to go find the other members,
since we know the offset of each from the beginning of the zipfile.
Begin main loop over blocks of member files. We know the entire central
directory is on this disk: we would not have any of this information un-
less the end-of-central-directory record was on this disk, and we would
not have gotten to this routine unless this is also the disk on which
the central directory starts. In practice, this had better be the ONLY
disk in the archive, but maybe someday we'll add multi-disk support.
---------------------------------------------------------------------------*/
members_remaining = ecrec.total_entries_central_dir;
//filelistcount = 0;
//filelistmax = members_remaining + 30;
//files = new direntry [ filelistmax ];
while( members_remaining-- )
{
if( readbuf( sig, 4, arc ) <= 0 )
{
error_in_archive = PK_EOF;
SAY_ERROR( RES_ERR_BAD_ARCHIVE, filename );
break;
}
if( strncmp( sig, central_hdr_sig, 4 )) /* just to make sure */
{
error_in_archive = PK_BADERR;
SAY_ERROR( RES_ERR_BAD_ARCHIVE, filename );
break;
}
/* process_cdir_file_hdr() sets pInfo->hostnum, pInfo->lcflag */
if(( error = process_cdir_file_hdr( &crec, arc )) != PK_COOL ) {
error_in_archive = error; /* only PK_EOF defined */
SAY_ERROR( RES_ERR_BAD_ARCHIVE, filename );
break;
}
if((error = do_string( crec.filename_length, FILENAME, curfilename, arc )) != PK_COOL) {
if( error > error_in_archive )
error_in_archive = error;
if( error > PK_WARN ) { /* fatal: no more left to do */
UNZIP_ERROR = RES_ERR_UNKNOWN;
SAY_ERROR( RES_ERR_BAD_ARCHIVE, filename );
break;
}
}
if(( error = do_string( crec.extra_field_length, SKIP, NULL, arc )) != PK_COOL ) {
if( error > error_in_archive )
error_in_archive = error;
if( error > PK_WARN ) { /* fatal: bail now */
UNZIP_ERROR = RES_ERR_UNKNOWN;
SAY_ERROR( RES_ERR_BAD_ARCHIVE, filename );
break;
}
}
if(( error = do_string( crec.file_comment_length, SKIP, NULL, arc )) != PK_COOL ) {
if( error > error_in_archive )
error_in_archive = error;
if( error > PK_WARN ) { /* fatal: bail now */
UNZIP_ERROR = RES_ERR_UNKNOWN;
SAY_ERROR( RES_ERR_BAD_ARCHIVE, filename );
break;
}
}
fname = curfilename;
path_idx = 0; /* see if directory has changed */
if( attach_point[ strlen(attach_point)-1 ] != ASCII_BACKSLASH )
sprintf( path, "%s\\%s", attach_point, curfilename );
else
sprintf( path, "%s%s", attach_point, curfilename );
len = strlen( path );
for( i=len; i>2; i-- )
{
if( path[i-1] == ASCII_FORESLASH )
{
if( !path_idx )
{
path_idx = i-1;
fname = &path[ path_idx+1 ];
}
path[i-1] = ASCII_BACKSLASH;
}
}
strcpy( data.name, fname ); /* use a dummy to add entry to the hash table */
// (wrong -->) dir_was = GLOBAL_SEARCH_INDEX - 1;
// KBR 9/4/97 - fixed.
// dir_was was being set based on the CWD rather than the attach point.
i = 0; dir_was = 0;
do {
if( !stricmp( GLOBAL_SEARCH_PATH[i], attach_point )) {
dir_was = i;
break;
}
} while( i++ < GLOBAL_SEARCH_INDEX );
vol_was = (char)(toupper(path[0]) - 'A');
#if( !RES_USE_FLAT_MODEL )
/* See if there is a new directory name. If so, we need to create a new hash table,
add this path into the global hash table, and continue add files into the new
table. This is assuming you're building the hierarchical model of course. */
path[ path_idx+1 ] = '\0';
path[ path_idx+2 ] = '\0';
if( path_idx && strcmp( path, path_was )) /* new directory! */
{
strcpy( path_was, path );
/* see if it already exists */
/* RES_LOCK( GLOBAL_HASH_TABLE ); GFG */
entry = hash_find( path, GLOBAL_HASH_TABLE );
if( entry )
{
table = (HASH_TABLE *)entry -> dir;
if( !table )
break;
}
else
{
#if( RES_DEBUG_VERSION )
if( GLOBAL_SEARCH_INDEX >= (MAX_DIRECTORIES-1)) {
assert(!"Exceeded MAX_DIRECTORIES as defined in omni.h");
// SAY_ERROR( RES_ERR_TOO_MANY_DIRECTORIES, "ResAddPath" );
return( FALSE );
}
#endif
table = hash_create( ARCHIVE_TABLE_SIZE, path );
strcpy( info.name, path ); /* insert a dummy entry into the global hash table */
info.attrib = _A_SUBDIR | (unsigned int)FORCE_BIT;
info.time_create = 0;
info.time_access = 0;
info.size = 0;
entry = hash_add( &info, GLOBAL_HASH_TABLE );
if( !entry )
break;
entry -> archive = -1; /* the actual directory existence should not be considered
as part of the archive. All of the contents found within
the directory are. This allows a hard disk based file to
override a zip archvie */
entry -> volume = vol_was;
entry -> directory = dir_was;
GLOBAL_PATH_LIST = LIST_APPEND( GLOBAL_PATH_LIST, table );
GLOBAL_SEARCH_PATH[ GLOBAL_SEARCH_INDEX ] = MemStrDup( path );
dir_was = GLOBAL_SEARCH_INDEX++;
entry -> dir = table;
}
/* RES_UNLOCK( GLOBAL_HASH_TABLE ); GFG */
}
#endif /* !RES_USE_FLAT_MODEL */
if( !(*data.name))
continue; /* this is usually a directory entry, which we'll decipher later */
#if( RES_REJECT_EMPTY_FILES )
if( !crec.csize )
continue;
#endif
data.attrib = (unsigned int)FORCE_BIT;
data.time_create = 0;
data.time_access = 0;
data.size = crec.csize;
/* RES_LOCK( table ); GFG */
entry = hash_find( data.name, table ); /* see if an entry already exists */
if( !entry )
entry = hash_add( &data, table ); /* if not, create one */
else /* there is already a file with the same name here! */
if( !replace_flag )
continue;
entry -> file_position = crec.relative_offset_local_header;
//entry -> file_position = crec.relative_offset_local_header + 4 + LREC_SIZE + crec.filename_length;
//if( crec.extra_field_length )
// entry -> file_position += crec.extra_field_length + 4 /* ?4? */;
entry -> method = crec.compression_method;
entry -> size = crec.ucsize;
entry -> csize = crec.csize;
entry -> archive = arc -> os_handle;
entry -> volume = vol_was;
entry -> directory = dir_was;
/* RES_UNLOCK( table ); GFG */
}
if( error > PK_WARN ) /* if error occurred, see if user ejected media during long inflation job */
ResCheckMedia( toupper(filename[0]) - 'A' );
#ifdef USE_SH_POOLS
MemFreePtr( arc -> tmp_in_buffer );
MemFreePtr( arc -> tmp_slide );
#else
MemFree( arc -> tmp_in_buffer );
MemFree( arc -> tmp_slide );
#endif
return( arc );
}
/* =======================================================
FUNCTION: archive_delete
PURPOSE: Destructor for archive_create. Destroy
an archive that was perviously created
using archive_create.
PARAMS: Ptr to an archive.
RETURNS: None.
======================================================= */
void archive_delete( ARCHIVE * arc )
{
_close( arc -> os_handle );
DESTROY_LOCK( arc -> lock );
#ifdef USE_SH_POOLS
MemFreePtr( arc );
#else
MemFree( arc );
#endif
}
/* =======================================================
FUNCTION: archive_size
PURPOSE: For large archives, its more efficient
to scan the entire file, counting the
number of entries, and then doing a
single hash_resize - than it is to
iteratively call hash_resize.
PARAMS: Archive ptr.
RETURNS: Number of entries in file.
======================================================= */
int archive_size( ARCHIVE * arc )
{
char sig[5];
int error = 0,
error_in_archive = 0;
int count;
ecdir_rec ecrec; /* used in unzip.c, extract.c */
// int filnum=(-1);
ush members_remaining;//,
// num_skipped = 0,
// num_bad_pwd = 0;
char curfilename[FILNAMSIZ];
cdir_file_hdr crec; /* used in unzip.c, extract.c, misc.c */
/*---------------------------------------------------------------------------
Start by constructing the various PK signature strings.
---------------------------------------------------------------------------*/
local_hdr_sig[0] = '\120'; /* ASCII 'P', */
central_hdr_sig[0] = '\120';
end_central_sig[0] = '\120'; /* not EBCDIC */
if(( ((error_in_archive = find_end_central_dir(MIN((arc -> length),66000L), &ecrec, arc)) != 0 )))
return( -1 );
if( UNZIP_LSEEK( ecrec.offset_start_central_directory, arc ))
return( -1 );
members_remaining = ecrec.total_entries_central_dir;
count = 0;
while( members_remaining-- )
{
if( readbuf( sig, 4, arc ) <= 0 )
{
error_in_archive = PK_EOF;
break;
}
if( strncmp( sig, central_hdr_sig, 4 )) /* just to make sure */
{
error_in_archive = PK_BADERR;
break;
}
/* process_cdir_file_hdr() sets pInfo->hostnum, pInfo->lcflag */
if(( error = process_cdir_file_hdr( &crec, arc )) != PK_COOL )
{
error_in_archive = error; /* only PK_EOF defined */
break;
}
if((error = do_string( crec.filename_length, FILENAME, curfilename, arc )) != PK_COOL)
{
if( error > error_in_archive )
error_in_archive = error;
if( error > PK_WARN ) /* fatal: no more left to do */
{
UNZIP_ERROR = RES_ERR_UNKNOWN;
break;
}
}
if(( error = do_string( crec.file_comment_length, SKIP, NULL, arc )) != PK_COOL )
{
if( error > error_in_archive )
error_in_archive = error;
if( error > PK_WARN ) /* fatal: bail now */
{
UNZIP_ERROR = RES_ERR_UNKNOWN;
break;
}
}
if( !(*curfilename))
continue; /* this is usually a directory entry, which we'll decipher later */
count++;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -