⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 viewcvs.py

📁 PHP写的图片查看器
💻 PY
📖 第 1 页 / 共 5 页
字号:
    'file_url' : urllib.quote(filename),    'rev' : rev,    'qquery' : request.qmark_query,    }def copy_stream(fp):  while 1:    chunk = fp.read(CHUNK_SIZE)    if not chunk:      break    sys.stdout.write(chunk)def markup_stream_default(fp):  print '<pre>'  while 1:    ### technically, the htmlify() could fail if something falls across    ### the chunk boundary. TFB.    chunk = fp.read(CHUNK_SIZE)    if not chunk:      break    sys.stdout.write(htmlify(chunk))  print '</pre>'def markup_stream_python(fp):  try:    # see if Marc-Andre Lemburg's py2html stuff is around    # http://starship.python.net/crew/lemburg/SoftwareDescriptions.html#py2html.py    ### maybe restrict the import to *only* this directory?    sys.path.insert(0, cfg.options.py2html_path)    import py2html    import PyFontify  except ImportError:    # fall back to the default streamer    markup_stream_default(fp)  else:    ### it doesn't escape stuff quite right, nor does it munge URLs and    ### mailtos as well as we do.    html = cgi.escape(fp.read())    pp = py2html.PrettyPrint(PyFontify.fontify, "rawhtml", "color")    html = pp.fontify(html)    html = re.sub(_re_rewrite_url, r'<a href="\1">\1</a>', html)    html = re.sub(_re_rewrite_email, r'<a href="mailto:\1">\1</a>', html)    sys.stdout.write(html)def markup_stream_enscript(lang, fp):  sys.stdout.flush()  enscript = popen.pipe_cmds([(os.path.normpath(os.path.join(cfg.options.enscript_path,'enscript')),                               '--color', '-W', 'html', '-E' + lang, '-o',                               '-', '-'),                              ('sed', '-n', '/^<PRE>$/,/<\\/PRE>$/p')])  try:    while 1:      chunk = fp.read(CHUNK_SIZE)      if not chunk:        if fp.eof() is None:          time.sleep(1)          continue        break      enscript.write(chunk)  except IOError, v:    print "<h3>Failure during use of an external program:</h3>"    print "<pre>"    print os.path.normpath(os.path.join(cfg.options.enscript_path,'enscript')) + " --color -W html -E"+lang+" -o - -"    print "</pre>"    raise  enscript.close()markup_streamers = {#  '.py' : markup_stream_python,  }### this sucks... we have to duplicate the extensions defined by enscriptenscript_extensions = {  '.C' : 'cpp',  '.EPS' : 'postscript',  '.DEF' : 'modula_2',  # requires a patch for enscript 1.6.2, see INSTALL  '.F' : 'fortran',  '.for': 'fortran',  '.H' : 'cpp',  '.MOD' : 'modula_2',  # requires a patch for enscript 1.6.2, see INSTALL  '.PS' : 'postscript',  '.S' : 'asm',  '.ada' : 'ada',  '.adb' : 'ada',  '.ads' : 'ada',  '.awk' : 'awk',  '.c' : 'c',  '.c++' : 'cpp',  '.cc' : 'cpp',  '.cpp' : 'cpp',  '.cxx' : 'cpp',  '.dpr' : 'delphi',  '.el' : 'elisp',  '.eps' : 'postscript',  '.f' : 'fortran',  '.gs' : 'haskell',  '.h' : 'c',  '.hs' : 'haskell',  '.htm' : 'html',  '.html' : 'html',  '.idl' : 'idl',  '.java' : 'java',  '.js' : 'javascript',  '.lgs' : 'haskell',  '.lhs' : 'haskell',  '.m' : 'objc',  '.p' : 'pascal',  # classic setting:  # '.pas' : 'pascal',  # most people using pascal today are using the Delphi system originally   # brought to us as Turbo-Pascal during the eighties of the last century:  '.pas' : 'delphi',  # ---  '.pl' : 'perl',  '.pm' : 'perl',  '.pp' : 'pascal',  '.ps' : 'postscript',  '.s' : 'asm',  '.scheme' : 'scheme',  '.scm' : 'scheme',  '.scr' : 'synopsys',  '.sh' : 'sh',  '.shtml' : 'html',  '.st' : 'states',  '.syn' : 'synopsys',  '.synth' : 'synopsys',  '.tcl' : 'tcl',  '.v' : 'verilog',  '.vba' : 'vba',  '.vh' : 'verilog',  '.vhd' : 'vhdl',  '.vhdl' : 'vhdl',  ### use enscript or py2html?  '.py' : 'python',  }enscript_filenames = {  '.emacs' : 'elisp',  'Makefile' : 'makefile',  'makefile' : 'makefile',  }def markup_stream(request, fp, revision, mime_type):  full_name = request.full_name  where = request.where  query_dict = request.query_dict  pathname, filename = os.path.split(where)  if pathname[-6:] == '/Attic':    pathname = pathname[:-6]  file_url = urllib.quote(filename)  data = nav_header_data(request, pathname, filename, revision)  data.update({    'request' : request,    'cfg' : cfg,    'vsn' : __version__,    'kv' : request.kv,    'nav_file' : clickable_path(request, where, 1, 1, 0),    'href' : download_url(request, file_url, revision, None),    'text_href' : download_url(request, file_url, revision, 'text/plain'),    'mime_type' : request.mime_type,    'log' : None,    })  if cfg.options.show_log_in_markup:    show_revs, rev_map, rev_order, taginfo, rev2tag, \               cur_branch, branch_points, branch_names = read_log(full_name)    entry = rev_map[revision]    idx = string.rfind(revision, '.')    branch = revision[:idx]    data.update({      'utc_date' : time.asctime(time.gmtime(entry.date)),      'ago' : html_time(request, entry.date, 1),      'author' : entry.author,      'branches' : None,      'tags' : None,      'branch_points' : None,      'changed' : entry.changed,      'log' : htmlify(entry.log),      'state' : entry.state,      'vendor_branch' : ezt.boolean(_re_is_vendor_branch.match(revision)),      })    if rev2tag.has_key(branch):      data['branches'] = string.join(rev2tag[branch], ', ')    if rev2tag.has_key(revision):      data['tags'] = string.join(rev2tag[revision], ', ')    if branch_points.has_key(revision):      data['branch_points'] = string.join(branch_points[revision], ', ')    prev_rev = string.split(revision, '.')    while 1:      if prev_rev[-1] == '0':     # .0 can be caused by 'commit -r X.Y.Z.0'        prev_rev = prev_rev[:-2]  # X.Y.Z.0 becomes X.Y.Z      else:        prev_rev[-1] = str(int(prev_rev[-1]) - 1)      prev = string.join(prev_rev, '.')      if rev_map.has_key(prev) or prev == '':        break    data['prev'] = prev  else:    data['tag'] = query_dict.get('only_with_tag')  http_header()  generate_page(request, cfg.templates.markup, data)  if mime_type[:6] == 'image/':    url = download_url(request, file_url, revision, mime_type)    print '<img src="%s"><br>' % url    while fp.read(8192):      pass  else:    basename, ext = os.path.splitext(filename)    streamer = markup_streamers.get(ext)    if streamer:      streamer(fp)    elif not cfg.options.use_enscript:      markup_stream_default(fp)    else:      lang = enscript_extensions.get(ext)      if not lang:        lang = enscript_filenames.get(basename)      if lang and lang not in cfg.options.disable_enscript_lang:        markup_stream_enscript(lang, fp)      else:        markup_stream_default(fp)  status = fp.close()  if status:    raise 'pipe error status: %d' % status  html_footer(request)def get_file_data(full_name):  """Return a sequence of tuples containing various data about the files.  data[0] = (relative) filename  data[1] = full pathname  data[2] = is_directory (0/1)  Only RCS files (*,v) and subdirs are returned.  """    files = os.listdir(full_name)   return get_file_tests(full_name,files) def get_file_tests(full_name,files):  data = [ ]  uid = os.getuid()  gid = os.getgid()  for file in files:    pathname = full_name + '/' + file    try:      info = os.stat(pathname)    except os.error:      data.append((file, _UNREADABLE_MARKER, None))      continue    mode = info[stat.ST_MODE]    isdir = stat.S_ISDIR(mode)    isreg = stat.S_ISREG(mode)    if (isreg and file[-2:] == ',v') or isdir:      #      # Quick version of access() where we use existing stat() data.      #      # This might not be perfect -- the OS may return slightly different      # results for some bizarre reason. However, we make a good show of      # "can I read this file/dir?" by checking the various perm bits.      #      # NOTE: if the UID matches, then we must match the user bits -- we      # cannot defer to group or other bits. Similarly, if the GID matches,      # then we must have read access in the group bits.      #       # If the UID or GID don't match, we need to check the      # results of an os.access() call, in case the web server process      # is in the group that owns the directory.      #      if isdir:        mask = stat.S_IROTH | stat.S_IXOTH      else:        mask = stat.S_IROTH      valid = 1      if info[stat.ST_UID] == uid:        if ((mode >> 6) & mask) != mask:          valid = 0      elif info[stat.ST_GID] == gid:        if ((mode >> 3) & mask) != mask:          valid = 0      # If the process running the web server is a member of       # the group stat.ST_GID access may be granted.      # so the fall back to os.access is needed to figure this out.      elif ((mode & mask) != mask) and (os.access(pathname,os.R_OK) == -1):        valid = 0            if valid:        data.append((file, pathname, isdir))      else:        data.append((file, _UNREADABLE_MARKER, isdir))  return datadef get_last_modified(file_data):  """Return mapping of subdir to info about the most recently modified subfile.  key     = subdir  data[0] = "subdir/subfile" of the most recently modified subfile  data[1] = the mod time of that file (time_t)  """  lastmod = { }  for file, pathname, isdir in file_data:    if not isdir or pathname == _UNREADABLE_MARKER:      continue    if file == 'Attic':      continue    subfiles = os.listdir(pathname)    latest = ('', 0)    for subfile in subfiles:      ### filter CVS locks? stale NFS handles?      if subfile[-2:] != ',v':        continue      subpath = pathname + '/' + subfile      info = os.stat(subpath)      if not stat.S_ISREG(info[stat.ST_MODE]):        continue      if info[stat.ST_MTIME] > latest[1]:        latest = (file + '/' + subfile, info[stat.ST_MTIME])    if latest[0]:      lastmod[file] = latest  return lastmoddef parse_log_header(fp):  """Parse and RCS/CVS log header.  fp is a file (pipe) opened for reading the log information.  On entry, fp should point to the start of a log entry.  On exit, fp will have consumed the separator line between the header and  the first revision log.  If there is no revision information (e.g. the "-h" switch was passed to  rlog), then fp will consumed the file separator line on exit.  """  filename = head = branch = None  taginfo = { }         # tag name => revision  parsing_tags = 0  eof = None  while 1:    line = fp.readline()    if not line:      # the true end-of-file      eof = _EOF_LOG      break    if parsing_tags:      if line[0] == '\t':        [ tag, rev ] = map(string.strip, string.split(line, ':'))        taginfo[tag] = rev      else:        # oops. this line isn't tag info. stop parsing tags.        parsing_tags = 0    if not parsing_tags:      if line[:9] == 'RCS file:':        # remove the trailing ,v        filename = line[10:-3]      elif line[:5] == 'head:':        head = line[6:-1]      elif line[:7] == 'branch:':        branch = line[8:-1]      elif line[:14] == 'symbolic names':        # start parsing the tag information        parsing_tags = 1      elif line == ENTRY_END_MARKER:        # end of the headers        break      elif line == LOG_END_MARKER:        # end of this file's log information        eof = _EOF_FILE        break      elif line[:6] == 'rlog: ':        # rlog: filename/goes/here,v: error message        idx = string.find(line, ':', 6)        if idx != -1:          if line[idx:idx+32] == ': warning: Unknown phrases like ':            # don't worry about this warning. it can happen with some RCS            # files that have unknown fields in them (e.g. "permissions 644;"            continue          # looks like a filename          filename = line[6:idx]          if filename[-2:] == ',v':            filename = filename[:-2]          return LogHeader(filename), _EOF_ERROR        # dunno what this is  return LogHeader(filename, head, branch, taginfo), eof_re_date_author = re.compile(r'^date:\s+([^;]+);\s+author:\s+([^;]+);.*')_re_log_info = re.compile(r'^date:\s+([^;]+);'                          r'\s+author:\s+([^;]+);'                          r'\s+state:\s+([^;]+);'                          r'(\s+lines:\s+([0-9\s+-]+))?\n$')### _re_rev should be updated to extract the "locked" flag_re_rev = re.compile(r'^revision\s+([0-9.]+).*')def parse_log_entry(fp):  """Parse a single log entry.  On entry, fp should point to the first line of the entry (the "revision"  line).  On exit, fp will have consumed the log separator line (dashes) or the  end-of-file marker (equals).  Returns: revision, date (time_t secs), author, state, lines changed,  the log text, and eof flag (see _EOF_*)  """  rev = None  line = fp.readline()  if not line:    return None, _EOF_LOG  if line[:8] == 'revision':    match = _re_rev.match(line)    if not match:

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -