⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 testing.jam

📁 Boost provides free peer-reviewed portable C++ source libraries. We emphasize libraries that work
💻 JAM
📖 第 1 页 / 共 2 页
字号:
# Register generators. Depending on target type, either 'expect-success' or# 'expect-failure' rule will be used.generators.register-standard testing.expect-success : OBJ        : COMPILE      ;generators.register-standard testing.expect-failure : OBJ        : COMPILE_FAIL ;generators.register-standard testing.expect-success : RUN_OUTPUT : RUN          ;generators.register-standard testing.expect-failure : RUN_OUTPUT : RUN_FAIL     ;generators.register-standard testing.expect-failure : EXE        : LINK_FAIL    ;generators.register-standard testing.expect-success : EXE        : LINK         ;# Generator which runs an EXE and captures output.generators.register-standard testing.capture-output : EXE : RUN_OUTPUT ;# Generator which creates a target if sources run successfully. Differs from RUN# in that run output is not captured. The reason why it exists is that the 'run'# rule is much better for automated testing, but is not user-friendly (see# http://article.gmane.org/gmane.comp.lib.boost.build/6353).generators.register-standard testing.unit-test : EXE : UNIT_TEST ;# The action rules called by generators.# Causes the 'target' to exist after bjam invocation if and only if all the# dependencies were successfully built.#rule expect-success ( target : dependency + : requirements * ){    **passed** $(target) : $(sources) ;}# Causes the 'target' to exist after bjam invocation if and only if all some of# the dependencies were not successfully built.#rule expect-failure ( target : dependency + : properties * ){    local grist = [ MATCH ^<(.*)> : $(dependency:G) ] ;    local marker = $(dependency:G=$(grist)*fail) ;    (failed-as-expected) $(marker) ;    FAIL_EXPECTED $(dependency) ;    LOCATE on $(marker) = [ on $(dependency) return $(LOCATE) ] ;    RMOLD $(marker) ;    DEPENDS $(marker) : $(dependency) ;    DEPENDS $(target) : $(marker) ;    **passed** $(target) : $(marker) ;}# The rule/action combination used to report successful passing of a test.#rule **passed**{    # Dump all the tests, if needed. We do it here, since dump should happen    # only after all Jamfiles have been read, and there is no such place    # currently defined (but there should be).    if ! $(.dumped-tests) && ( --dump-tests in [ modules.peek : ARGV ] )    {        .dumped-tests = true ;        dump-tests ;    }    # Force deletion of the target, in case any dependencies failed to build.    RMOLD $(<) ;}# Used to create test files signifying passed tests.#actions **passed**{    echo passed > "$(<)"}# Used to create replacement object files that do not get created during tests# that are expected to fail.#actions (failed-as-expected){    echo failed as expected > "$(<)"}rule run-path-setup ( target : source : properties * ){    # For testing, we need to make sure that all dynamic libraries needed by the    # test are found. So, we collect all paths from dependency libraries (via    # xdll-path property) and add whatever explicit dll-path user has specified.    # The resulting paths are added to the environment on each test invocation.    local dll-paths = [ feature.get-values <dll-path> : $(properties) ] ;    dll-paths += [ feature.get-values <xdll-path> : $(properties) ] ;    dll-paths += [ on $(source) return $(RUN_PATH) ] ;    dll-paths = [ sequence.unique $(dll-paths) ] ;    if $(dll-paths)    {        dll-paths = [ sequence.transform path.native : $(dll-paths) ] ;        PATH_SETUP on $(target) = [ common.prepend-path-variable-command            [ os.shared-library-path-variable ] : $(dll-paths) ] ;    }}local argv = [ modules.peek : ARGV ] ;if --preserve-test-targets in $(argv){    preserve-test-targets = true ;}toolset.flags testing.capture-output ARGS <testing.arg> ;toolset.flags testing.capture-output INPUT_FILES <testing.input-file> ;toolset.flags testing.capture-output LAUNCHER <testing.launcher> ;# Runs executable 'sources' and stores stdout in file 'target'. Unless# --preserve-test-targets command line option has been specified, removes the# executable. The 'target-to-remove' parameter controls what should be removed:#   - if 'none', does not remove anything, ever#   - if empty, removes 'source'#   - if non-empty and not 'none', contains a list of sources to remove.#rule capture-output ( target : source : properties * : targets-to-remove * ){    output-file on $(target) = $(target:S=.output) ;    LOCATE on $(target:S=.output) = [ on $(target) return $(LOCATE) ] ;    # The INCLUDES kill a warning about independent target...    INCLUDES $(target) : $(target:S=.output) ;    # but it also puts .output into dependency graph, so we must tell jam it is    # OK if it cannot find the target or updating rule.    NOCARE $(target:S=.output) ;    # This has two-fold effect. First it adds input files to the dependendency    # graph, preventing a warning. Second, it causes input files to be bound    # before target is created. Therefore, they are bound using SEARCH setting    # on them and not LOCATE setting of $(target), as in other case (due to jam    # bug).    DEPENDS $(target) : [ on $(target) return $(INPUT_FILES) ] ;    if $(targets-to-remove) = none    {        targets-to-remove = ;    }    else if ! $(targets-to-remove)    {        targets-to-remove = $(source) ;    }    run-path-setup $(target) : $(source) : $(properties) ;    if ! $(preserve-test-targets)    {        TEMPORARY $(targets-to-remove) ;        # Set a second action on target that will be executed after capture        # output action. The 'RmTemps' rule has the 'ignore' modifier so it is        # always considered succeeded. This is needed for 'run-fail' test. For        # that test the target will be marked with FAIL_EXPECTED, and without        # 'ignore' successful execution will be negated and be reported as        # failure. With 'ignore' we do not detect a case where removing files        # fails, but it is not likely to happen.        RmTemps $(target) : $(targets-to-remove) ;    }}if [ os.name ] = NT{    .STATUS        = %status% ;    .SET_STATUS    = "set status=%ERRORLEVEL%" ;    .RUN_OUTPUT_NL = "echo." ;    .STATUS_0      = "%status% EQU 0 (" ;    .STATUS_NOT_0  = "%status% NEQ 0 (" ;    .VERBOSE       = "%verbose% EQU 1 (" ;    .ENDIF         = ")" ;    .SHELL_SET     = "set " ;    .CATENATE      = type ;    .CP            = copy ;}else{    .STATUS        = "$status" ;    .SET_STATUS    = "status=$?" ;    .RUN_OUTPUT_NL = "echo" ;    .STATUS_0      = "test $status -eq 0 ; then" ;    .STATUS_NOT_0  = "test $status -ne 0 ; then" ;    .VERBOSE       = "test $verbose -eq 1 ; then" ;    .ENDIF         = "fi" ;    .SHELL_SET     = "" ;    .CATENATE      = cat ;    .CP            = cp ;}.VERBOSE_TEST = 0 ;if --verbose-test in [ modules.peek : ARGV ]{    .VERBOSE_TEST = 1 ;}.RM = [ common.rm-command ] ;actions capture-output bind INPUT_FILES output-file{    $(PATH_SETUP)    $(LAUNCHER) "$(>)" $(ARGS) "$(INPUT_FILES)" > "$(output-file)" 2>&1    $(.SET_STATUS)    $(.RUN_OUTPUT_NL) >> "$(output-file)"    echo EXIT STATUS: $(.STATUS) >> "$(output-file)"    if $(.STATUS_0)        $(.CP) "$(output-file)" "$(<)"    $(.ENDIF)    $(.SHELL_SET)verbose=$(.VERBOSE_TEST)    if $(.STATUS_NOT_0)        $(.SHELL_SET)verbose=1    $(.ENDIF)    if $(.VERBOSE)        echo ====== BEGIN OUTPUT ======        $(.CATENATE) "$(output-file)"        echo ====== END OUTPUT ======    $(.ENDIF)    exit $(.STATUS)}actions quietly updated ignore piecemeal together RmTemps{    $(.RM) "$(>)"}.MAKE_FILE = [ common.file-creation-command ] ;toolset.flags testing.unit-test LAUNCHER <testing.launcher> ;toolset.flags testing.unit-test ARGS <testing.arg> ;rule unit-test ( target : source : properties * ){    run-path-setup $(target) : $(source) : $(properties) ;}actions unit-test{    $(PATH_SETUP)    $(LAUNCHER) $(>) $(ARGS) && $(.MAKE_FILE) $(<)}IMPORT $(__name__) : compile compile-fail run run-fail link link-fail    : : compile compile-fail run run-fail link link-fail ;type.register TIME : time ;generators.register-standard testing.time : : TIME ;rule record-time ( target : source : start end user system ){    local src-string = [$(source:G=:J=",")"] " ;    USER_TIME on $(target) += $(src-string)$(user) ;    SYSTEM_TIME on $(target) += $(src-string)$(system) ;}IMPORT testing : record-time : : testing.record-time ;# Calling this rule requests that Boost Build time how long it taks to build the# 'source' target and display the results both on the standard output and in the# 'target' file.#rule time ( target : source : properties *  ){    # Set up rule for recording timing information.    __TIMING_RULE__ on $(source) = testing.record-time $(target) ;    # Make sure that the source is rebuilt any time we need to retrieve that    # information.    REBUILDS $(target) : $(source) ;}actions time{    echo user: $(USER_TIME)    echo system: $(SYSTEM_TIME)    echo user: $(USER_TIME)" seconds" > "$(<)"    echo system: $(SYSTEM_TIME)" seconds" >> "$(<)"}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -