⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 boost_wide_report.py

📁 C++的一个好库。。。现在很流行
💻 PY
📖 第 1 页 / 共 2 页
字号:
    all_runs_xml.startDocument()
    all_runs_xml.startElement( 'all-test-runs', {} )
    
    files = glob.glob( os.path.join( test_runs_dir, '*.xml' ) )
    for test_run in files:
        try:
            utils.log( '    Writing "%s" into the resulting XML...' % test_run )
            xml.sax.parse( test_run, all_runs_xml  )
        except Exception, msg:
            utils.log( '    Skipping "%s" due to errors (%s)' % ( test_run, msg ) )

    all_runs_xml.endElement( 'all-test-runs' )
    all_runs_xml.endDocument()

def execute_tasks(
          tag
        , user
        , run_date
        , comment_file
        , results_dir
        , output_dir
        , reports
        , warnings
        , extended_test_results
        , dont_collect_logs
        , expected_results_file
        , failures_markup_file
        ):

    
    # results_xml_path = os.path.join( results_dir, results_xml )
    # utils.log( 'Merging test runs into "%s"...' % results_xml_path )


    incoming_dir = os.path.join( results_dir, 'incoming', tag )
    processed_dir = os.path.join( incoming_dir, 'processed' )
    merged_dir = os.path.join( processed_dir, 'merged' )
    if not os.path.exists( incoming_dir ):
        os.makedirs( incoming_dir )
    if not os.path.exists( processed_dir ):
        os.makedirs( processed_dir )
    if not os.path.exists( merged_dir ):
        os.makedirs( merged_dir )
    
    if not dont_collect_logs:
        ftp_site = 'fx.meta-comm.com'
        site_path = '/boost-regression/%s' % tag

        ftp_task( ftp_site, site_path, incoming_dir )

    unzip_archives_task( incoming_dir, processed_dir, utils.unzip )
    merge_xmls_task( incoming_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file )
    make_links_task( merged_dir
                     , output_dir
                     , tag
                     , run_date
                     , comment_file
                     , extended_test_results
                     , failures_markup_file )


    results_xml_path = os.path.join( output_dir, "extended_test_results.xml" )
    writer = codecs.open( results_xml_path, 'w', "utf-8" )
    merge_processed_test_runs( merged_dir, tag, writer )
    writer.close()

    
    make_result_pages(
          extended_test_results
        , expected_results_file
        , failures_markup_file
        , tag
        , run_date
        , comment_file
        , output_dir
        , reports
        , warnings
        )

        
def make_result_pages(
          extended_test_results
        , expected_results_file
        , failures_markup_file
        , tag
        , run_date
        , comment_file
        , output_dir
        , reports
        , warnings
        ):

    utils.log( 'Producing the reports...' )
    __log__ = 1

    warnings_text = '+'.join( warnings )
    
    if comment_file != '':
        comment_file = os.path.abspath( comment_file )
        
    links = os.path.join( output_dir, 'links.html' )
    
    utils.makedirs( os.path.join( output_dir, 'output' ) )
    for mode in ( 'developer', 'user' ):
        utils.makedirs( os.path.join( output_dir, mode , 'output' ) )
        
    issues = os.path.join( output_dir, 'developer', 'issues.html'  )
    if 'i' in reports:
        utils.log( '    Making issues list...' )
        utils.libxslt( 
              utils.log
            , extended_test_results
            , xsl_path( 'issues_page.xsl' )
            , issues
            , {
                  'source':                 tag
                , 'run_date':               run_date
                , 'warnings':               warnings_text
                , 'comment_file':           comment_file
                , 'expected_results_file':  expected_results_file
                , 'explicit_markup_file':   failures_markup_file
                , 'release':                "yes"
                }
            )

    for mode in ( 'developer', 'user' ):
        if mode[0] + 'd' in reports:
            utils.log( '    Making detailed %s  report...' % mode )
            utils.libxslt( 
                  utils.log
                , extended_test_results
                , xsl_path( 'result_page.xsl' )
                , os.path.join( output_dir, mode, 'index.html' )
                , { 
                      'links_file':             'links.html'
                    , 'mode':                   mode
                    , 'source':                 tag
                    , 'run_date':               run_date
                    , 'warnings':               warnings_text
                    , 'comment_file':           comment_file
                    , 'expected_results_file':  expected_results_file
                    , 'explicit_markup_file' :  failures_markup_file
                    }
                )
    
    for mode in ( 'developer', 'user' ):
        if mode[0] + 's' in reports:
            utils.log( '    Making summary %s  report...' % mode )
            utils.libxslt(
                  utils.log
                , extended_test_results
                , xsl_path( 'summary_page.xsl' )
                , os.path.join( output_dir, mode, 'summary.html' )
                , { 
                      'mode' :                  mode 
                    , 'source':                 tag
                    , 'run_date':               run_date 
                    , 'warnings':               warnings_text
                    , 'comment_file':           comment_file
                    , 'explicit_markup_file' :  failures_markup_file
                    }
                )

    for mode in ( 'developer', 'user' ):
        if mode[0] + 'dr' in reports:
            utils.log( '    Making detailed %s release report...' % mode )
            utils.libxslt( 
                  utils.log
                , extended_test_results
                , xsl_path( 'result_page.xsl' )
                , os.path.join( output_dir, mode, 'index_release.html' )
                , { 
                      'links_file':             'links.html'
                    , 'mode':                   mode
                    , 'source':                 tag
                    , 'run_date':               run_date 
                    , 'warnings':               warnings_text
                    , 'comment_file':           comment_file
                    , 'expected_results_file':  expected_results_file
                    , 'explicit_markup_file' :  failures_markup_file
                    , 'release':                "yes"
                    }
                )

    for mode in ( 'developer', 'user' ):
        if mode[0] + 'sr' in reports:
            utils.log( '    Making summary %s release report...' % mode )
            utils.libxslt(
                  utils.log
                , extended_test_results
                , xsl_path( 'summary_page.xsl' )
                , os.path.join( output_dir, mode, 'summary_release.html' )
                , { 
                      'mode' :                  mode
                    , 'source':                 tag
                    , 'run_date':               run_date 
                    , 'warnings':               warnings_text
                    , 'comment_file':           comment_file
                    , 'explicit_markup_file' :  failures_markup_file
                    , 'release':                'yes'
                    }
                )
        
    if 'e' in reports:
        utils.log( '    Generating expected_results ...' )
        utils.libxslt(
              utils.log
            , extended_test_results
            , xsl_path( 'produce_expected_results.xsl' )
            , os.path.join( output_dir, 'expected_results.xml' )
            )

    if  'n' in reports:
        utils.log( '    Making runner comment files...' )
        utils.libxslt(
              utils.log
            , extended_test_results
            , xsl_path( 'runners.xsl' )
            , os.path.join( output_dir, 'runners.html' )
            )

    shutil.copyfile(
          xsl_path( 'html/master.css' )
        , os.path.join( output_dir, 'master.css' )
        )


def build_xsl_reports( 
          locate_root_dir
        , tag
        , expected_results_file
        , failures_markup_file
        , comment_file
        , results_dir
        , result_file_prefix
        , dont_collect_logs = 0
        , reports = report_types
        , warnings = []
        , user = None
        , upload = False
        ):

    ( run_date ) = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.gmtime() )

    root_paths.append( locate_root_dir )
    root_paths.append( results_dir )
    
    bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
    
    output_dir = os.path.join( results_dir, result_file_prefix )
    utils.makedirs( output_dir )
    
    if expected_results_file != '':
        expected_results_file = os.path.abspath( expected_results_file )
    else:
        expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )


    extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
        
    execute_tasks(
          tag
        , user
        , run_date
        , comment_file
        , results_dir
        , output_dir
        , reports
        , warnings
        , extended_test_results
        , dont_collect_logs
        , expected_results_file
        , failures_markup_file
        )

    if upload:
        upload_dir = 'regression-logs/'
        utils.log( 'Uploading  results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
        
        archive_name = '%s.tar.gz' % result_file_prefix
        utils.tar( 
              os.path.join( results_dir, result_file_prefix )
            , archive_name
            )
        
        utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
        utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )


def accept_args( args ):
    args_spec = [ 
          'locate-root='
        , 'tag='
        , 'expected-results='
        , 'failures-markup='
        , 'comment='
        , 'results-dir='
        , 'results-prefix='
        , 'dont-collect-logs'
        , 'reports='
        , 'user='
        , 'upload'
        , 'help'
        ]
        
    options = { 
          '--comment': ''
        , '--expected-results': ''
        , '--failures-markup': ''
        , '--reports': string.join( report_types, ',' )
        , '--tag': None
        , '--user': None
        , 'upload': False
        }
    
    utils.accept_args( args_spec, args, options, usage )
    if not options.has_key( '--results-dir' ):
         options[ '--results-dir' ] = options[ '--locate-root' ]

    if not options.has_key( '--results-prefix' ):
        options[ '--results-prefix' ] = 'all'
    
    return ( 
          options[ '--locate-root' ]
        , options[ '--tag' ]
        , options[ '--expected-results' ]
        , options[ '--failures-markup' ]
        , options[ '--comment' ]
        , options[ '--results-dir' ]
        , options[ '--results-prefix' ]
        , options.has_key( '--dont-collect-logs' )
        , options[ '--reports' ].split( ',' )
        , options[ '--user' ]
        , options.has_key( '--upload' )
        )


def usage():
    print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
    print    '''
\t--locate-root         the same as --locate-root in compiler_status
\t--tag                 the tag for the results (i.e. 'CVS-HEAD')
\t--expected-results    the file with the results to be compared with
\t                      the current run
\t--failures-markup     the file with the failures markup
\t--comment             an html comment file (will be inserted in the reports)
\t--results-dir         the directory containing -links.html, -fail.html
\t                      files produced by compiler_status (by default the
\t                      same as specified in --locate-root)
\t--results-prefix      the prefix of -links.html, -fail.html
\t                      files produced by compiler_status
\t--user                SourceForge user name for a shell account
\t--upload              upload reports to SourceForge 

The following options are useful in debugging:

\t--dont-collect-logs dont collect the test logs
\t--reports           produce only the specified reports
\t                        us - user summary
\t                        ds - developer summary
\t                        ud - user detailed
\t                        dd - developer detailed
\t                        l  - links
\t                        p  - patches
\t                        x  - extended results file
\t                        i  - issues
\t                        n  - runner comment files
'''

def main():
    build_xsl_reports( *accept_args( sys.argv[ 1 : ] ) )

if __name__ == '__main__':
    main()

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -