📄 archive.php
字号:
<?php
/* vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4: */
/**
* Factory to access the most common File_Archive features
* It uses lazy include, so you dont have to include the files from
* File/Archive/* directories
*
* PHP versions 4 and 5
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330,Boston,MA 02111-1307 USA
*
* @category File Formats
* @package File_Archive
* @author Vincent Lascaux <vincentlascaux@php.net>
* @copyright 1997-2005 The PHP Group
* @license http://www.gnu.org/copyleft/lesser.html LGPL
* @version CVS: $Id: Archive.php,v 1.1 2006/07/15 21:30:24 cvs Exp $
* @link http://pear.php.net/package/File_Archive
*/
/**
* To have access to PEAR::isError and PEAR::raiseError
* We should probably use lazy include and remove this inclusion...
*/
require_once PEAR_DIR."PEAR.php";
function File_Archive_cleanCache($file, $group)
{
$file = split('_', $file);
if (count($file) != 3) {
return false; //not a File_Archive file, keep it
}
$name = $file[2];
$name = urldecode($name);
$group = $file[1];
//clean the cache only for files in File_Archive groups
return substr($group, 0, 11) == 'FileArchive' &&
!file_exists($name); //and only if the related file no longer exists
}
/**
* Factory to access the most common File_Archive features
* It uses lazy include, so you dont have to include the files from
* File/Archive/* directories
*/
class File_Archive
{
function& _option($name)
{
static $container = array(
'zipCompressionLevel' => 9,
'gzCompressionLevel' => 9,
'tmpDirectory' => '.',
'cache' => null,
'appendRemoveDuplicates' => false,
'blockSize' => 65536,
'cacheCondition' => false
);
return $container[$name];
}
/**
* Sets an option that will be used by default by all readers or writers
* Option names are case sensitive
* Currently, the following options are used:
*
* "cache"
* Instance of a Cache_Lite object used to cache some compressed
* data to speed up future compressions of files
* Default: null (no cache used)
*
* "zipCompressionLevel"
* Value between 0 and 9 specifying the default compression level used
* by Zip writers (0 no compression, 9 highest compression)
* Default: 9
*
* "gzCompressionLevel"
* Value between 0 and 9 specifying the default compression level used
* by Gz writers (0 no compression, 9 highest compression)
* Default: 9
*
* "tmpDirectory"
* Directory where the temporary files generated by File_Archive will
* be created
* Default: '.'
*
* "appendRemoveDuplicates"
* If set to true, the appender created will by default remove the
* file present in the archive when adding a new one. This will slow the
* appending of files to archives
* Default: false
*
* "blockSize"
* To transfer data from a reader to a writer, some chunks a read from the
* source and written to the writer. This parameter controls the size of the
* chunks
* Default: 64kB
*
* "cacheCondition"
* This parameter specifies when a cache should be used. When the cache is
* used, the data of the reader is saved in a temporary file for future access.
* The cached reader will be read only once, even if you read it several times.
* This can be usefull to read compressed files or downloaded files (from http or ftp)
* The possible values for this option are
* - false: never use cache
* - a regexp: A cache will be used if the specified URL matches the regexp
* preg_match is used
* Default: false
* Example: '/^(http|ftp):\/\//' will cache all files downloaded via http or ftp
*
*/
function setOption($name, $value)
{
$option =& File_Archive::_option($name);
$option = $value;
if ($name == 'cache' && $value !== null) {
//TODO: ask to Cache_Lite to allow that
$value->_fileNameProtection = false;
}
}
/**
* Retrieve the value of an option
*/
function getOption($name)
{
return File_Archive::_option($name);
}
/**
* Create a reader to read the URL $URL.
* If the URL is a directory, it will recursively read that directory.
* If $uncompressionLevel is not null, the archives (files with extension
* tar, zip, gz or tgz) will be considered as directories (up to a depth of
* $uncompressionLevel if $uncompressionLevel > 0). The reader will only
* read files with a directory depth of $directoryDepth. It reader will
* replace the given URL ($URL) with $symbolic in the public filenames
* The default symbolic name is the last filename in the URL (or '' for
* directories)
*
* Examples:
* Considere the following file system
* <pre>
* a.txt
* b.tar (archive that contains the following files)
* c.txt
* d.tgz (archive that contains the following files)
* e.txt
* dir1/
* f.txt
* dir2/
* g.txt
* dir3/
* h.tar (archive that contains the following files)
* i.txt
* </pre>
*
* read('.') will return a reader that gives access to following
* files (recursively read current dir):
* <pre>
* a.txt
* b.tar
* dir2/g.txt
* dir2/dir3/h.tar
* </pre>
*
* read('.', 'myBaseDir') will return the following reader:
* <pre>
* myBaseDir/a.txt
* myBaseDir/b.tar
* myBaseDir/dir2/g.txt
* myBaseDir/dir2/dir3/h.tar
* </pre>
*
* read('.', '', -1) will return the following reader (uncompress
* everything)
* <pre>
* a.txt
* b.tar/c.txt
* b.tar/d.tgz/e.txt
* b.tar/d.tgz/dir1/f.txt
* dir2/g.txt
* dir2/dir3/h.tar/i.txt
* </pre>
*
* read('.', '', 1) will uncompress only one level (so d.tgz will
* not be uncompressed):
* <pre>
* a.txt
* b.tar/c.txt
* b.tar/d.tgz
* dir2/g.txt
* dir2/dir3/h.tar/i.txt
* </pre>
*
* read('.', '', 0, 0) will not recurse into subdirectories
* <pre>
* a.txt
* b.tar
* </pre>
*
* read('.', '', 0, 1) will recurse only one level in
* subdirectories
* <pre>
* a.txt
* b.tar
* dir2/g.txt
* </pre>
*
* read('.', '', -1, 2) will uncompress everything and recurse in
* only 2 levels in subdirectories or archives
* <pre>
* a.txt
* b.tar/c.txt
* b.tar/d.tgz/e.txt
* dir2/g.txt
* </pre>
*
* The recursion level is determined by the real path, not the symbolic one.
* So read('.', 'myBaseDir', -1, 2) will result to the same files:
* <pre>
* myBaseDir/a.txt
* myBaseDir/b.tar/c.txt
* myBaseDir/b.tar/d.tgz/e.txt (accepted because the real depth is 2)
* myBaseDir/dir2/g.txt
* </pre>
*
* Use readSource to do the same thing, reading from a specified reader instead of
* reading from the system files
*
* To read a single file, you can do read('a.txt', 'public_name.txt')
* If no public name is provided, the default one is the name of the file
* read('dir2/g.txt') contains the single file named 'g.txt'
* read('b.tar/c.txt') contains the single file named 'c.txt'
*
* Note: This function uncompress files reading their extension
* The compressed files must have a tar, zip, gz or tgz extension
* Since it is impossible for some URLs to use is_dir or is_file, this
* function may not work with
* URLs containing folders which name ends with such an extension
*/
function readSource(&$source, $URL, $symbolic = null,
$uncompression = 0, $directoryDepth = -1)
{
return File_Archive::_readSource($source, $URL, $reachable, $baseDir,
$symbolic, $uncompression, $directoryDepth);
}
/**
* This function performs exactly as readSource, but with two additional parameters
* ($reachable and $baseDir) that will be set so that $reachable."/".$baseDir == $URL
* and $reachable can be reached (in case of error)
*
* @access private
*/
function _readSource(&$toConvert, $URL, &$reachable, &$baseDir, $symbolic = null,
$uncompression = 0, $directoryDepth = -1)
{
$source =& File_Archive::_convertToReader($toConvert);
if (PEAR::isError($source)) {
return $source;
}
if (is_array($URL)) {
$converted = array();
foreach($URL as $key => $foo) {
$converted[] =& File_Archive::_convertToReader($URL[$key]);
}
return File_Archive::readMulti($converted);
}
//No need to uncompress more than $directoryDepth
//That's not perfect, and some archives will still be uncompressed just
//to be filtered out :(
if ($directoryDepth >= 0) {
$uncompressionLevel = min($uncompression, $directoryDepth);
} else {
$uncompressionLevel = $uncompression;
}
require_once PEAR_DIR.'File/Archive/Reader.php';
$std = File_Archive_Reader::getStandardURL($URL);
//Modify the symbolic name if necessary
$slashPos = strrpos($std, '/');
if ($symbolic === null) {
if ($slashPos === false) {
$realSymbolic = $std;
} else {
$realSymbolic = substr($std, $slashPos+1);
}
} else {
$realSymbolic = $symbolic;
}
if ($slashPos !== false) {
$baseFile = substr($std, 0, $slashPos+1);
$lastFile = substr($std, $slashPos+1);
} else {
$baseFile = '';
$lastFile = $std;
}
if (strpos($lastFile, '*')!==false ||
strpos($lastFile, '?')!==false) {
//We have to build a regexp here
$regexp = str_replace(
array('\*', '\?'),
array('[^/]*', '[^/]'),
preg_quote($lastFile)
);
$result = File_Archive::_readSource($source, $baseFile,
$reachable, $baseDir, null, 0, -1);
return File_Archive::filter(
File_Archive::predEreg('^'.$regexp.'$'),
$result
);
}
//If the URL can be interpreted as a directory, and we are reading from the file system
if ((empty($URL) || is_dir($URL)) && $source === null) {
require_once PEAR_DIR."File/Archive/Reader/Directory.php";
require_once PEAR_DIR."File/Archive/Reader/ChangeName.php";
if ($uncompressionLevel != 0) {
require_once PEAR_DIR."File/Archive/Reader/Uncompress.php";
$result = new File_Archive_Reader_Uncompress(
new File_Archive_Reader_Directory($std, '', $directoryDepth),
$uncompressionLevel
);
} else {
$result = new File_Archive_Reader_Directory($std, '', $directoryDepth);
}
if ($directoryDepth >= 0) {
require_once PEAR_DIR.'File/Archive/Reader/Filter.php';
require_once PEAR_DIR.'File/Archive/Predicate/MaxDepth.php';
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -