📄 cvs2cl.pl
字号:
open (LOG_OUT, ">$tmpfile") or die "Unable to open \"$tmpfile\""; } else { open (LOG_OUT, ">-") or die "Unable to open stdout for writing"; } print LOG_OUT $ChangeLog_Header; if ($XML_Output) { my $encoding = length $XML_Encoding ? qq'encoding="$XML_Encoding"' : ''; my $version = 'version="1.0"'; my $declaration = sprintf '<?xml %s?>', join ' ', grep length, $version, $encoding; my $root = '<changelog xmlns="http://www.red-bean.com/xmlns/cvs2cl/">'; print LOG_OUT "$declaration\n\n$root\n\n"; } my @key_list = (); if($Chronological_Order) { @key_list = sort {$main::a <=> $main::b} (keys %changelog); } else { @key_list = sort {$main::b <=> $main::a} (keys %changelog); } foreach my $time (@key_list) { next if ($Delta_Mode && (($time <= $Delta_StartTime) || ($time > $Delta_EndTime && $Delta_EndTime))); # Set up the date/author line. # kff todo: do some more XML munging here, on the header # part of the entry: my ($ignore,$min,$hour,$mday,$mon,$year,$wday) = $UTC_Times ? gmtime($time) : localtime($time); # XML output includes everything else, we might as well make # it always include Day Of Week too, for consistency. if ($Show_Day_Of_Week or $XML_Output) { $wday = ("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday")[$wday]; $wday = ($XML_Output) ? "<weekday>${wday}</weekday>\n" : " $wday"; } else { $wday = ""; } my $authorhash = $changelog{$time}; if ($Show_Tag_Dates) { my %tags; while (my ($author,$mesghash) = each %$authorhash) { while (my ($msg,$qunk) = each %$mesghash) { foreach my $qunkref2 (@$qunk) { if (defined ($$qunkref2{'tags'})) { foreach my $tag (@{$$qunkref2{'tags'}}) { $tags{$tag} = 1; } } } } } foreach my $tag (keys %tags) { if (!defined $tag_date_printed{$tag}) { $tag_date_printed{$tag} = $time; if ($XML_Output) { # NOT YET DONE } else { if ($Show_Times) { printf LOG_OUT ("%4u-%02u-%02u${wday} %02u:%02u tag %s\n\n", $year+1900, $mon+1, $mday, $hour, $min, $tag); } else { printf LOG_OUT ("%4u-%02u-%02u${wday} tag %s\n\n", $year+1900, $mon+1, $mday, $tag); } } } } } while (my ($author,$mesghash) = each %$authorhash) { # If XML, escape in outer loop to avoid compound quoting: if ($XML_Output) { $author = &xml_escape ($author); } FOOBIE: # We sort here to enable predictable ordering for the testing porpoises for my $msg (sort keys %$mesghash) { my $qunklist = $mesghash->{$msg}; ## MJP: 19.xii.01 : Exclude @ignore_tags for my $ignore_tag (keys %ignore_tags) { next FOOBIE if grep($_ eq $ignore_tag, map(@{$_->{tags}}, grep(defined $_->{tags}, @$qunklist))); } ## MJP: 19.xii.01 : End exclude @ignore_tags # show only files with tag --show-tag $show_tag if ( keys %show_tags ) { next FOOBIE if !grep(exists $show_tags{$_}, map(@{$_->{tags}}, grep(defined $_->{tags}, @$qunklist))); } my $files = &pretty_file_list ($qunklist); my $header_line; # date and author my $body; # see below my $wholething; # $header_line + $body if ($XML_Output) { $header_line = sprintf ("<date>%4u-%02u-%02u</date>\n" . "${wday}" . "<time>%02u:%02u</time>\n" . "<author>%s</author>\n", $year+1900, $mon+1, $mday, $hour, $min, $author); } else { if ($Show_Times) { $header_line = sprintf ("%4u-%02u-%02u${wday} %02u:%02u %s\n\n", $year+1900, $mon+1, $mday, $hour, $min, $author); } else { $header_line = sprintf ("%4u-%02u-%02u${wday} %s\n\n", $year+1900, $mon+1, $mday, $author); } } $Text::Wrap::huge = 'overflow' if $Text::Wrap::VERSION >= 2001.0130; # Reshape the body according to user preferences. if ($XML_Output) { $msg = &preprocess_msg_text ($msg); $body = $files . $msg; } elsif ($No_Wrap && !$Summary) { $msg = &preprocess_msg_text ($msg); $files = wrap ("\t", " ", "$files"); $msg =~ s/\n(.*)/\n\t$1/g; unless ($After_Header eq " ") { $msg =~ s/^(.*)/\t$1/g; } $body = $files . $After_Header . $msg; } elsif ($Summary) { my( $filelist, $qunk ); my( @DeletedQunks, @AddedQunks, @ChangedQunks ); $msg = &preprocess_msg_text ($msg); # # Sort the files (qunks) according to the operation that was # performed. Files which were added have no line change # indicator, whereas deleted files have state dead. # foreach $qunk ( @$qunklist ) { if ( "dead" eq $qunk->{'state'}) { push( @DeletedQunks, $qunk ); } elsif ( !exists( $qunk->{'lines'})) { push( @AddedQunks, $qunk ); } else { push( @ChangedQunks, $qunk ); } } # # The qunks list was originally in tree search order. Let's # get that back. The lists, if they exist, will be reversed upon # processing. # # # Now write the three sections onto $filelist # if ( @DeletedQunks ) { $filelist .= "\tDeleted:\n"; foreach $qunk ( @DeletedQunks ) { $filelist .= "\t\t" . $qunk->{'filename'}; $filelist .= " (" . $qunk->{'revision'} . ")"; $filelist .= "\n"; } undef( @DeletedQunks ); } if ( @AddedQunks ) { $filelist .= "\tAdded:\n"; foreach $qunk ( @AddedQunks ) { $filelist .= "\t\t" . $qunk->{'filename'}; $filelist .= " (" . $qunk->{'revision'} . ")"; $filelist .= "\n"; } undef( @AddedQunks ); } if ( @ChangedQunks ) { $filelist .= "\tChanged:\n"; foreach $qunk ( @ChangedQunks ) { $filelist .= "\t\t" . $qunk->{'filename'}; $filelist .= " (" . $qunk->{'revision'} . ")"; $filelist .= ", \"" . $qunk->{'state'} . "\""; $filelist .= ", lines: " . $qunk->{'lines'}; $filelist .= "\n"; } undef( @ChangedQunks ); } chomp( $filelist ); $msg =~ s/\n(.*)/\n\t$1/g; unless ($After_Header eq " ") { $msg =~ s/^(.*)/\t$1/g; } $body = $filelist . $After_Header . $msg; } else # do wrapping, either FSF-style or regular { if ($FSF_Style) { $files = wrap ("\t", " ", "$files"); my $files_last_line_len = 0; if ($After_Header eq " ") { $files_last_line_len = &last_line_len ($files); $files_last_line_len += 1; # for $After_Header } $msg = &wrap_log_entry ($msg, "\t", 69 - $files_last_line_len, 69); $body = $files . $After_Header . $msg; } else # not FSF-style { $msg = &preprocess_msg_text ($msg); $body = $files . $After_Header . $msg; $body = wrap ("\t", " ", "$body"); } } $wholething = $header_line . $body; if ($XML_Output) { $wholething = "<entry>\n${wholething}</entry>\n"; } # One last check: make sure it passes the regexp test, if the # user asked for that. We have to do it here, so that the # test can match against information in the header as well # as in the text of the log message. # How annoying to duplicate so much code just because I # can't figure out a way to evaluate scalars on the trailing # operator portion of a regular expression. Grrr. if ($Case_Insensitive) { unless ($Regexp_Gate && ($wholething !~ /$Regexp_Gate/oi)) { print LOG_OUT "${wholething}\n"; } } else { unless ($Regexp_Gate && ($wholething !~ /$Regexp_Gate/o)) { print LOG_OUT "${wholething}\n"; } } } } } if ($XML_Output) { print LOG_OUT "</changelog>\n"; } close (LOG_OUT); if (! $Output_To_Stdout) { # If accumulating, append old data to new before renaming. But # don't append the most recent entry, since it's already in the # new log due to CVS's idiosyncratic interpretation of "log -d". if ($Cumulative && -f $logfile_here) { open (NEW_LOG, ">>$tmpfile") or die "trouble appending to $tmpfile ($!)"; open (OLD_LOG, "<$logfile_here") or die "trouble reading from $logfile_here ($!)"; my $started_first_entry = 0; my $passed_first_entry = 0; while (<OLD_LOG>) { if (! $passed_first_entry) { if ((! $started_first_entry) && /^(\d\d\d\d-\d\d-\d\d\s+\d\d:\d\d)/) { $started_first_entry = 1; } elsif (/^(\d\d\d\d-\d\d-\d\d\s+\d\d:\d\d)/) { $passed_first_entry = 1; print NEW_LOG $_; } } else { print NEW_LOG $_; } } close (NEW_LOG); close (OLD_LOG); } if (-f $logfile_here) { rename ($logfile_here, $logfile_bak); } rename ($tmpfile, $logfile_here); } }}sub parse_date_author_and_state (){ # Parses the date/time and author out of a line like: # # date: 1999/02/19 23:29:05; author: apharris; state: Exp; my $line = shift; my ($year, $mon, $mday, $hours, $min, $secs, $author, $state, $rest) = $line =~ m#(\d+)/(\d+)/(\d+)\s+(\d+):(\d+):(\d+);\s+author:\s+([^;]+);\s+state:\s+([^;]+);(.*)# or die "Couldn't parse date ``$line''"; die "Bad date or Y2K issues" unless ($year > 1969 and $year < 2258); # Kinda arbitrary, but useful as a sanity check my $time = timegm($secs,$min,$hours,$mday,$mon-1,$year-1900); my $lines; if ( $rest =~ m#\s+lines:\s+(.*)# ) { $lines =$1; } return ($time, $author, $state, $lines);}# Here we take a bunch of qunks and convert them into printed# summary that will include all the information the user asked for.sub pretty_file_list (){ if ($Hide_Filenames and (! $XML_Output)) { return ""; } my $qunksref = shift; my @qunkrefs = grep +((! exists $_->{'tags'} or ! grep exists $ignore_tags{$_}, @{$_->{'tags'}}) and (! keys %show_tags or (exists $_->{'tags'} and grep exists $show_tags{$_}, @{$_->{'tags'}})) ), @$qunksref; my @filenames; my $beauty = ""; # The accumulating header string for this entry. my %non_unanimous_tags; # Tags found in a proper subset of qunks my %unanimous_tags; # Tags found in all qunks my %all_branches; # Branches found in any qunk my $common_dir = undef; # Dir prefix common to all files ("" if none) my $fbegun = 0; # Did we begin printing filenames yet? # First, loop over the qunks gathering all the tag/branch names. # We'll put them all in non_unanimous_tags, and take out the # unanimous ones later. QUNKREF: foreach my $qunkref (@qunkrefs) { # Keep track of whether all the files in this commit were in the # same directory, and memorize it if so. We can make the output a # little more compact by mentioning the directory only once. if ($Common_Dir && (scalar (@qunkrefs)) > 1) { if (! (defined ($common_dir))) { my ($base, $dir); ($base, $dir, undef) = fileparse ($$qunkref{'filename'}); if ((! (defined ($dir))) # this first case is sheer paranoia or ($dir eq "") or ($dir eq "./") or ($dir eq ".\\")) { $common_dir = ""; } else { $common_dir = $dir; } } elsif ($common_dir ne "") { # Already have a common dir prefix, so how much of it can we preserve? $common_dir = &common_path_prefix ($$qunkref{'filename'}, $common_dir); } } else # only one file in this entry anyway, so common dir not an issue { $common_dir = ""; } if (defined ($$qunkref{'branch'})) { $all_branches{$$qunkref{'branch'}} = 1; } if (defined ($$qunkref{'tags'})) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -