📄 viewcvs.py
字号:
return None, _EOF_LOG rev = match.group(1) line = fp.readline() if not line: return None, _EOF_LOG match = _re_log_info.match(line) eof = None log = '' while 1: line = fp.readline() if not line: # true end-of-file eof = _EOF_LOG break if line[:9] == 'branches:': continue if line == ENTRY_END_MARKER: break if line == LOG_END_MARKER: # end of this file's log information eof = _EOF_FILE break log = log + line if not rev or not match: # there was a parsing error return None, eof # parse out a time tuple for the local time tm = compat.cvs_strptime(match.group(1)) try: date = int(time.mktime(tm)) - time.timezone except OverflowError: # it is possible that CVS recorded an "illegal" time, such as those # which occur during a Daylight Savings Time switchover (there is a # gap in the time continuum). Let's advance one hour and try again. # While the time isn't necessarily "correct", recall that the gap means # that times *should* be an hour forward. This is certainly close enough # for our needs. # # Note: a true overflow will simply raise an error again, which we won't # try to catch a second time. tm = tm[:3] + (tm[3] + 1,) + tm[4:] date = int(time.mktime(tm)) - time.timezone return LogEntry(rev, date, # author, state, lines changed match.group(2), match.group(3), match.group(5), log), eofdef skip_file(fp): "Skip the rest of a file's log information." while 1: line = fp.readline() if not line: break if line == LOG_END_MARKER: breakdef process_rlog_output(rlog, full_name, view_tag, fileinfo, alltags): "Fill in fileinfo and alltags with info from the rlog output." # consume each file found in the resulting log while 1: revwanted = None branch = None branchpoint = None header, eof = parse_log_header(rlog) filename = header.filename head = header.head branch = header.branch symrev = header.taginfo # the rlog output is done if eof == _EOF_LOG: break if filename: # convert from absolute to relative if filename[:len(full_name)] == full_name: filename = filename[len(full_name)+1:] # for a subdir (not Attic files!), use the subdir for a key idx = string.find(filename, '/') if idx != -1 and filename[:6] != 'Attic/': info_key = filename[:idx] else: info_key = filename # an error was found regarding this file if eof == _EOF_ERROR: fileinfo[info_key] = _FILE_HAD_ERROR continue # if we hit the end of the log information (already!), then there is # nothing we can do with this file if eof: continue if not filename or not head: # parsing error. skip the rest of this file. skip_file(rlog) continue if not branch: idx = string.rfind(head, '.') branch = head[:idx] idx = string.rfind(branch, '.') if idx == -1: branch = '0.' + branch else: branch = branch[:idx] + '.0' + branch[idx:] symrev['MAIN'] = symrev['HEAD'] = branch if symrev.has_key(view_tag): revwanted = symrev[view_tag] if revwanted[:2] == '0.': ### possible? branch = revwanted[2:] else: idx = string.find(revwanted, '.0.') if idx == -1: branch = revwanted else: branch = revwanted[:idx] + revwanted[idx+2:] if revwanted != branch: revwanted = None idx = string.rfind(branch, '.') if idx == -1: branchpoint = '' else: branchpoint = branch[:idx] elif view_tag: # the tag wasn't found, so skip this file skip_file(rlog) continue # we don't care about the values -- just the keys. this the fastest # way to merge the set of keys alltags.update(symrev) # read all of the log entries until we find the revision we want while 1: # fetch one of the log entries entry, eof = parse_log_entry(rlog) if not entry: # parsing error if not eof: skip_file(rlog) break rev = entry.rev idx = string.rfind(rev, '.') revbranch = rev[:idx] if not view_tag or (not revwanted and branch == revbranch): revwanted = rev if rev == revwanted or rev == branchpoint: fileinfo[info_key] = (rev, entry.date, entry.log, entry.author, filename, entry.state) if rev == revwanted: # done with this file now if not eof: skip_file(rlog) break # if we hit the true EOF, or just this file's end-of-info, then we are # done collecting log entries. if eof: breakdef get_logs(full_name, files, view_tag): if len(files) == 0: return { }, { } fileinfo = { } alltags = { # all the tags seen in the files of this dir 'MAIN' : '1', 'HEAD' : '1', } chunk_size = 100 while files: chunk = files[:chunk_size] del files[:chunk_size] # prepend the full pathname for each file for i in range(len(chunk)): chunk[i] = full_name + '/' + chunk[i] if not view_tag: # NOTE: can't pass tag on command line since a tag may contain "-" # we'll search the output for the appropriate revision # fetch the latest revision on the default branch chunk = ('-r',) + tuple(chunk) rlog = popen.popen(os.path.normpath(os.path.join(cfg.general.rcs_path,'rlog')), chunk, 'r') process_rlog_output(rlog, full_name, view_tag, fileinfo, alltags) ### it would be nice to verify that we got SOMETHING from rlog about ### each file. if we didn't, then it could be that the chunk is still ### too large, so we want to cut the chunk_size in half and try again. ### ### BUT: if we didn't get feedback for some *other* reason, then halving ### the chunk size could merely send us into a needless retry loop. ### ### more work for later... status = rlog.close() if status: raise 'error during rlog: '+hex(status) return fileinfo, alltagsdef revcmp(rev1, rev2): rev1 = map(int, string.split(rev1, '.')) rev2 = map(int, string.split(rev2, '.')) return cmp(rev1, rev2)def view_directory(request): full_name = request.full_name where = request.where query_dict = request.query_dict view_tag = query_dict.get('only_with_tag') hideattic = int(query_dict.get('hideattic')) ### watch for errors in int()? sortby = query_dict.get('sortby', 'file') search_re = query_dict.get('search') # Search current directory if search_re and cfg.options.use_re_search: file_data = search_files(request,search_re) else: file_data = get_file_data(full_name) if cfg.options.show_subdir_lastmod: lastmod = get_last_modified(file_data) else: lastmod = { } if cfg.options.show_logs: subfiles = map(lambda (subfile, mtime): subfile, lastmod.values()) else: subfiles = [ ] attic_files = [ ] if not hideattic or view_tag: # if we are not hiding the contents of the Attic dir, or we have a # specific tag, then the Attic may contain files/revs to display. # grab the info for those files, too. try: attic_files = os.listdir(full_name + '/Attic') except os.error: pass else: ### filter for just RCS files? attic_files = map(lambda file: 'Attic/' + file, attic_files) # get all the required info rcs_files = subfiles + attic_files for file, pathname, isdir in file_data: if not isdir and pathname != _UNREADABLE_MARKER: rcs_files.append(file) fileinfo, alltags = get_logs(full_name, rcs_files, view_tag) # append the Attic files into the file_data now # NOTE: we only insert the filename and isdir==0 for file in attic_files: file_data.append((file, None, 0)) # prepare the data that will be passed to the template data = { 'where' : where, 'request' : request, 'cfg' : cfg, 'kv' : request.kv, 'current_root' : request.cvsrep, 'view_tag' : view_tag, 'sortby' : sortby, 'no_match' : None, 'unreadable' : None, 'tarball_href' : None, 'address' : cfg.general.address, 'vsn' : __version__, 'search_re' : None, 'have_logs' : None, 'sortby_file_href' : toggle_query(query_dict, 'sortby', 'file'), 'sortby_rev_href' : toggle_query(query_dict, 'sortby', 'rev'), 'sortby_date_href' : toggle_query(query_dict, 'sortby', 'date'), 'sortby_author_href' : toggle_query(query_dict, 'sortby', 'author'), 'sortby_log_href' : toggle_query(query_dict, 'sortby', 'log'), 'show_attic_href' : toggle_query(query_dict, 'hideattic', 0), 'hide_attic_href' : toggle_query(query_dict, 'hideattic', 1), 'has_tags' : ezt.boolean(alltags or view_tag), ### one day, if EZT has "or" capability, we can lose this 'selection_form' : ezt.boolean(alltags or view_tag or cfg.options.use_re_search), } # add in the CVS roots for the selection if len(cfg.general.cvs_roots) < 2: roots = [ ] else: roots = cfg.general.cvs_roots.keys() roots.sort(lambda n1, n2: cmp(string.lower(n1), string.lower(n2))) data['roots'] = roots if where: ### in the future, it might be nice to break this path up into ### a list of elements, allowing the template to display it in ### a variety of schemes. data['nav_path'] = clickable_path(request, where, 0, 0, 0) # fileinfo will be len==0 if we only have dirs and !show_subdir_lastmod. # in that case, we don't need the extra columns if len(fileinfo): data['have_logs'] = 'yes' if search_re: data['search_re'] = htmlify(search_re) def file_sort_cmp(data1, data2, sortby=sortby, fileinfo=fileinfo): if data1[2]: # is_directory if data2[2]: # both are directories. sort on name. return cmp(data1[0], data2[0]) # data1 is a directory, it sorts first. return -1 if data2[2]: # data2 is a directory, it sorts first. return 1 # the two files should be RCS files. drop the ",v" from the end. file1 = data1[0][:-2] file2 = data2[0][:-2] # we should have data on these. if not, then it is because we requested # a specific tag and that tag is not present on the file. info1 = fileinfo.get(file1) if info1 == _FILE_HAD_ERROR: info1 = None info2 = fileinfo.get(file2) if info2 == _FILE_HAD_ERROR: info2 = None if info1 and info2: if sortby == 'rev': result = revcmp(info1[0], info2[0]) elif sortby == 'date': result = cmp(info2[1], info1[1]) # latest date is first elif sortby == 'log': result = cmp(info1[2], info2[2]) elif sortby == 'author': result = cmp(info1[3], info2[3]) else: # sortby == 'file' ... fall thru result = 0 # return for unequal values; or fall thru for secondary-sort on name if result: return result # sort by file name if file1[:6] == 'Attic/': file1 = file1[6:] if file2[:6] == 'Attic/': file2 = file2[6:] return cmp(file1, file2) # sort with directories first, and using the "sortby" criteria file_data.sort(file_sort_cmp) num_files = 0 num_displayed = 0 unreadable = 0 ### display a row for ".." ? rows = data['rows'] = [ ] for file, pathname, isdir in file_data: row = _item(href=None, graph_href=None, author=None, log=None, log_file=None, log_rev=None, show_log=None, state=None) if pathname == _UNREADABLE_MARKER: if isdir is None: # We couldn't even stat() the file to figure out what it is. slash = '' elif isdir: slash = '/' else: slash = '' file = file[:-2] # strip the ,v num_displayed = num_displayed + 1 row.anchor = file row.name = file + slash row.type = 'unreadable' rows.append(row) unreadable = 1 continue if isdir: if not hideattic and file == 'Attic': continue if where == '' and ((file == 'CVSROOT' and cfg.options.hide_cvsroot) or cfg.is_forbidden(file)): continue if file == 'CVS': # CVS directory in a repository is used for fileattr. continue url = urllib.quote(file) + '/' + request.qmark_query
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -