📄 tm_basic.cxx
字号:
// Set my priority higher than any I plan to create cyg_thread_set_priority(cyg_thread_self(), 2); cyg_thread_create(10, // Priority - just a number alarm_test2, // entry i, // index thread_name("thread", 0), // Name &stacks[0][0], // Stack STACK_SIZE, // Size &threads[0], // Handle &test_threads[0] // Thread data structure ); wait_for_tick(); // Wait until the next clock tick to minimize aberations cyg_clock_to_counter(cyg_real_time_clock(), &rtc_handle); cyg_alarm_create(rtc_handle, alarm_cb3, threads[0], &alarms[0], &test_alarms[0]); init_val = 5; step_val = 5; alarm_cnt = 0; cyg_alarm_initialize(alarms[0], init_val, step_val); cyg_semaphore_init(&synchro, 0); cyg_alarm_enable(alarms[0]); cyg_semaphore_wait(&synchro); cyg_alarm_disable(alarms[0]); cyg_alarm_delete(alarms[0]); show_times(sched_ft, nscheds, "Alarm -> thread resume latency"); cyg_thread_suspend(threads[0]); cyg_thread_delete(threads[0]); end_of_test_group();}voidrun_sched_tests(void){ int i; wait_for_tick(); // Wait until the next clock tick to minimize aberations for (i = 0; i < nscheds; i++) { HAL_CLOCK_READ(&sched_ft[i].start); cyg_scheduler_lock(); HAL_CLOCK_READ(&sched_ft[i].end); cyg_scheduler_unlock(); } show_times(sched_ft, nscheds, "Scheduler lock"); wait_for_tick(); // Wait until the next clock tick to minimize aberations for (i = 0; i < nscheds; i++) { cyg_scheduler_lock(); HAL_CLOCK_READ(&sched_ft[i].start); cyg_scheduler_unlock(); HAL_CLOCK_READ(&sched_ft[i].end); } show_times(sched_ft, nscheds, "Scheduler unlock [0 threads]"); // Set my priority higher than any I plan to create cyg_thread_set_priority(cyg_thread_self(), 2); for (i = 0; i < 1; i++) { cyg_thread_create(10, // Priority - just a number test0, // entry i, // index thread_name("thread", i), // Name &stacks[i][0], // Stack STACK_SIZE, // Size &threads[i], // Handle &test_threads[i] // Thread data structure ); } wait_for_tick(); // Wait until the next clock tick to minimize aberations for (i = 0; i < nscheds; i++) { cyg_scheduler_lock(); HAL_CLOCK_READ(&sched_ft[i].start); cyg_scheduler_unlock(); HAL_CLOCK_READ(&sched_ft[i].end); } show_times(sched_ft, nscheds, "Scheduler unlock [1 suspended]"); for (i = 0; i < 1; i++) { cyg_thread_delete(threads[i]); } // Set my priority higher than any I plan to create cyg_thread_set_priority(cyg_thread_self(), 2); for (i = 0; i < ntest_threads; i++) { cyg_thread_create(10, // Priority - just a number test0, // entry i, // index thread_name("thread", i), // Name &stacks[i][0], // Stack STACK_SIZE, // Size &threads[i], // Handle &test_threads[i] // Thread data structure ); } wait_for_tick(); // Wait until the next clock tick to minimize aberations for (i = 0; i < nscheds; i++) { cyg_scheduler_lock(); HAL_CLOCK_READ(&sched_ft[i].start); cyg_scheduler_unlock(); HAL_CLOCK_READ(&sched_ft[i].end); } show_times(sched_ft, nscheds, "Scheduler unlock [many suspended]"); for (i = 0; i < ntest_threads; i++) { cyg_thread_delete(threads[i]); } // Set my priority higher than any I plan to create cyg_thread_set_priority(cyg_thread_self(), 2); for (i = 0; i < ntest_threads; i++) { cyg_thread_create(10, // Priority - just a number test0, // entry i, // index thread_name("thread", i), // Name &stacks[i][0], // Stack STACK_SIZE, // Size &threads[i], // Handle &test_threads[i] // Thread data structure ); cyg_thread_resume(threads[i]); } wait_for_tick(); // Wait until the next clock tick to minimize aberations for (i = 0; i < nscheds; i++) { cyg_scheduler_lock(); HAL_CLOCK_READ(&sched_ft[i].start); cyg_scheduler_unlock(); HAL_CLOCK_READ(&sched_ft[i].end); } show_times(sched_ft, nscheds, "Scheduler unlock [many low prio]"); for (i = 0; i < ntest_threads; i++) { cyg_thread_delete(threads[i]); } end_of_test_group();}void run_all_tests(CYG_ADDRESS id){ int i, j; cyg_uint32 tv[nsamples], tv0, tv1; cyg_uint32 min_stack, max_stack, total_stack, actual_stack; cyg_tick_count_t ticks, tick0, tick1;#ifdef CYG_SCHEDULER_LOCK_TIMINGS cyg_uint32 lock_ave, lock_max;#endif#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY) cyg_int32 clock_ave;#endif disable_clock_latency_measurement(); cyg_test_dump_thread_stack_stats( "Startup, main stack", thread[0] ); cyg_test_dump_interrupt_stack_stats( "Startup" ); cyg_test_dump_idlethread_stack_stats( "Startup" ); cyg_test_clear_interrupt_stack(); diag_printf("\neCos Kernel Timings\n"); diag_printf("Notes: all times are in microseconds (.000001) unless otherwise stated\n");#ifdef STATS_WITHOUT_FIRST_SAMPLE diag_printf(" second line of results have first sample removed\n");#endif cyg_thread_delay(2); // Make sure the clock is actually running ns_per_system_clock = 1000000/rtc_resolution[1]; for (i = 0; i < nsamples; i++) { HAL_CLOCK_READ(&tv[i]); } tv0 = 0; for (i = 1; i < nsamples; i++) { tv0 += tv[i] - tv[i-1]; } end_of_test_group(); overhead = tv0 / (nsamples-1); diag_printf("Reading the hardware clock takes %d 'ticks' overhead\n", overhead); diag_printf("... this value will be factored out of all other measurements\n"); // Try and measure how long the clock interrupt handling takes for (i = 0; i < nsamples; i++) { tick0 = cyg_current_time(); while (true) { tick1 = cyg_current_time(); if (tick0 != tick1) break; } HAL_CLOCK_READ(&tv[i]); } tv1 = 0; for (i = 0; i < nsamples; i++) { tv1 += tv[i] * 1000; } tv1 = tv1 / nsamples; tv1 -= overhead; // Adjust out the cost of getting the timer value diag_printf("Clock interrupt took"); show_ticks_in_us(tv1); diag_printf(" microseconds (%d raw clock ticks)\n", tv1/1000); enable_clock_latency_measurement(); ticks = cyg_current_time(); show_test_parameters(); show_times_hdr(); reset_clock_latency_measurement(); run_thread_tests(); run_sched_tests(); run_mutex_tests(); run_mbox_tests(); run_semaphore_tests(); run_counter_tests(); run_alarm_tests();#ifdef CYG_SCHEDULER_LOCK_TIMINGS Cyg_Scheduler::get_lock_times(&lock_ave, &lock_max); diag_printf("\nMax lock:"); show_ticks_in_us(lock_max); diag_printf(", Ave lock:"); show_ticks_in_us(lock_ave); diag_printf("\n");#endif#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY) // Display latency figures in same format as all other numbers disable_clock_latency_measurement(); clock_ave = (total_clock_latency*1000) / total_clock_interrupts; show_ticks_in_us(clock_ave); show_ticks_in_us(min_clock_latency*1000); show_ticks_in_us(max_clock_latency*1000); show_ticks_in_us(0); diag_printf(" Clock/interrupt latency\n\n"); enable_clock_latency_measurement(); #endif#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY) disable_clock_latency_measurement(); clock_ave = (total_clock_dsr_latency*1000) / total_clock_dsr_calls; show_ticks_in_us(clock_ave); show_ticks_in_us(min_clock_dsr_latency*1000); show_ticks_in_us(max_clock_dsr_latency*1000); show_ticks_in_us(0); diag_printf(" Clock DSR latency\n\n"); enable_clock_latency_measurement();#endif disable_clock_latency_measurement(); min_stack = STACK_SIZE; max_stack = 0; total_stack = 0; for (i = 0; i < (int)NTEST_THREADS; i++) { for (j = 0; j < STACK_SIZE; j++) { if (stacks[i][j]) break; } actual_stack = STACK_SIZE-j; if (actual_stack < min_stack) min_stack = actual_stack; if (actual_stack > max_stack) max_stack = actual_stack; total_stack += actual_stack; } for (j = 0; j < STACKSIZE; j++) { if (((char *)stack[0])[j]) break; } diag_printf("%5d %5d %5d (main stack: %5d) Thread stack used (%d total)\n", total_stack/NTEST_THREADS, min_stack, max_stack, STACKSIZE - j, STACK_SIZE); cyg_test_dump_thread_stack_stats( "All done, main stack", thread[0] ); cyg_test_dump_interrupt_stack_stats( "All done" ); cyg_test_dump_idlethread_stack_stats( "All done" ); enable_clock_latency_measurement(); ticks = cyg_current_time(); diag_printf("\nTiming complete - %d ms total\n\n", (int)((ticks*ns_per_system_clock)/1000)); CYG_TEST_PASS_FINISH("Basic timing OK");}void tm_basic_main( void ){ CYG_TEST_INIT(); if (cyg_test_is_simulator) { nsamples = NSAMPLES_SIM; ntest_threads = NTEST_THREADS_SIM; nthread_switches = NTHREAD_SWITCHES_SIM; nmutexes = NMUTEXES_SIM; nmboxes = NMBOXES_SIM; nsemaphores = NSEMAPHORES_SIM; nscheds = NSCHEDS_SIM; ncounters = NCOUNTERS_SIM; nalarms = NALARMS_SIM; } else { nsamples = NSAMPLES; ntest_threads = NTEST_THREADS; nthread_switches = NTHREAD_SWITCHES; nmutexes = NMUTEXES; nmboxes = NMBOXES; nsemaphores = NSEMAPHORES; nscheds = NSCHEDS; ncounters = NCOUNTERS; nalarms = NALARMS; } // Sanity#ifdef WORKHORSE_TEST ntest_threads = max(512, ntest_threads); nmutexes = max(1024, nmutexes); nsemaphores = max(1024, nsemaphores); nmboxes = max(1024, nmboxes); ncounters = max(1024, ncounters); nalarms = max(1024, nalarms);#else ntest_threads = max(64, ntest_threads); nmutexes = max(32, nmutexes); nsemaphores = max(32, nsemaphores); nmboxes = max(32, nmboxes); ncounters = max(32, ncounters); nalarms = max(32, nalarms);#endif new_thread(run_all_tests, 0); Cyg_Scheduler::scheduler.start();}externC voidcyg_start( void ){ tm_basic_main();} #else // CYGFUN_KERNEL_API_CexternC voidcyg_start( void ){ CYG_TEST_INIT(); CYG_TEST_NA("Timing tests require:\n" "CYGFUN_KERNEL_API_C && \n" "CYGSEM_KERNEL_SCHED_MLQUEUE &&\n" "CYGVAR_KERNEL_COUNTERS_CLOCK &&\n" "!CYGPKG_HAL_I386_LINUX &&\n" "!CYGDBG_INFRA_DIAG_USE_DEVICE &&\n" "(CYGNUM_KERNEL_SCHED_PRIORITIES > 12)\n");}#endif // CYGFUN_KERNEL_API_C, etc.// EOF tm_basic.cxx
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -