op_model_cell.c
来自「linux 内核源代码」· C语言 代码 · 共 1,212 行 · 第 1/3 页
C
1,212 行
}}static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl){ pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE; cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);}/* * Oprofile is expected to collect data on all CPUs simultaneously. * However, there is one set of performance counters per node. There are * two hardware threads or virtual CPUs on each node. Hence, OProfile must * multiplex in time the performance counter collection on the two virtual * CPUs. The multiplexing of the performance counters is done by this * virtual counter routine. * * The pmc_values used below is defined as 'per-cpu' but its use is * more akin to 'per-node'. We need to store two sets of counter * values per node -- one for the previous run and one for the next. * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even * pair of per-cpu arrays is used for storing the previous and next * pmc values for a given node. * NOTE: We use the per-cpu variable to improve cache performance. * * This routine will alternate loading the virtual counters for * virtual CPUs */static void cell_virtual_cntr(unsigned long data){ int i, prev_hdw_thread, next_hdw_thread; u32 cpu; unsigned long flags; /* * Make sure that the interrupt_hander and the virt counter are * not both playing with the counters on the same node. */ spin_lock_irqsave(&virt_cntr_lock, flags); prev_hdw_thread = hdw_thread; /* switch the cpu handling the interrupts */ hdw_thread = 1 ^ hdw_thread; next_hdw_thread = hdw_thread; /* * There are some per thread events. Must do the * set event, for the thread that is being started */ for (i = 0; i < num_counters; i++) set_pm_event(i, pmc_cntrl[next_hdw_thread][i].evnts, pmc_cntrl[next_hdw_thread][i].masks); /* * The following is done only once per each node, but * we need cpu #, not node #, to pass to the cbe_xxx functions. */ for_each_online_cpu(cpu) { if (cbe_get_hw_thread_id(cpu)) continue; /* * stop counters, save counter values, restore counts * for previous thread */ cbe_disable_pm(cpu); cbe_disable_pm_interrupts(cpu); for (i = 0; i < num_counters; i++) { per_cpu(pmc_values, cpu + prev_hdw_thread)[i] = cbe_read_ctr(cpu, i); if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] == 0xFFFFFFFF) /* If the cntr value is 0xffffffff, we must * reset that to 0xfffffff0 when the current * thread is restarted. This will generate a * new interrupt and make sure that we never * restore the counters to the max value. If * the counters were restored to the max value, * they do not increment and no interrupts are * generated. Hence no more samples will be * collected on that cpu. */ cbe_write_ctr(cpu, i, 0xFFFFFFF0); else cbe_write_ctr(cpu, i, per_cpu(pmc_values, cpu + next_hdw_thread)[i]); } /* * Switch to the other thread. Change the interrupt * and control regs to be scheduled on the CPU * corresponding to the thread to execute. */ for (i = 0; i < num_counters; i++) { if (pmc_cntrl[next_hdw_thread][i].enabled) { /* * There are some per thread events. * Must do the set event, enable_cntr * for each cpu. */ enable_ctr(cpu, i, pm_regs.pm07_cntrl); } else { cbe_write_pm07_control(cpu, i, 0); } } /* Enable interrupts on the CPU thread that is starting */ cbe_enable_pm_interrupts(cpu, next_hdw_thread, virt_cntr_inter_mask); cbe_enable_pm(cpu); } spin_unlock_irqrestore(&virt_cntr_lock, flags); mod_timer(&timer_virt_cntr, jiffies + HZ / 10);}static void start_virt_cntrs(void){ init_timer(&timer_virt_cntr); timer_virt_cntr.function = cell_virtual_cntr; timer_virt_cntr.data = 0UL; timer_virt_cntr.expires = jiffies + HZ / 10; add_timer(&timer_virt_cntr);}/* This function is called once for all cpus combined */static int cell_reg_setup(struct op_counter_config *ctr, struct op_system_config *sys, int num_ctrs){ int i, j, cpu; spu_cycle_reset = 0; if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { spu_cycle_reset = ctr[0].count; /* * Each node will need to make the rtas call to start * and stop SPU profiling. Get the token once and store it. */ spu_rtas_token = rtas_token("ibm,cbe-spu-perftools"); if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) { printk(KERN_ERR "%s: rtas token ibm,cbe-spu-perftools unknown\n", __FUNCTION__); return -EIO; } } pm_rtas_token = rtas_token("ibm,cbe-perftools"); /* * For all events excetp PPU CYCLEs, each node will need to make * the rtas cbe-perftools call to setup and reset the debug bus. * Make the token lookup call once and store it in the global * variable pm_rtas_token. */ if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { printk(KERN_ERR "%s: rtas token ibm,cbe-perftools unknown\n", __FUNCTION__); return -EIO; } num_counters = num_ctrs; pm_regs.group_control = 0; pm_regs.debug_bus_control = 0; /* setup the pm_control register */ memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl)); pm_regs.pm_cntrl.stop_at_max = 1; pm_regs.pm_cntrl.trace_mode = 0; pm_regs.pm_cntrl.freeze = 1; set_count_mode(sys->enable_kernel, sys->enable_user); /* Setup the thread 0 events */ for (i = 0; i < num_ctrs; ++i) { pmc_cntrl[0][i].evnts = ctr[i].event; pmc_cntrl[0][i].masks = ctr[i].unit_mask; pmc_cntrl[0][i].enabled = ctr[i].enabled; pmc_cntrl[0][i].vcntr = i; for_each_possible_cpu(j) per_cpu(pmc_values, j)[i] = 0; } /* * Setup the thread 1 events, map the thread 0 event to the * equivalent thread 1 event. */ for (i = 0; i < num_ctrs; ++i) { if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111)) pmc_cntrl[1][i].evnts = ctr[i].event + 19; else if (ctr[i].event == 2203) pmc_cntrl[1][i].evnts = ctr[i].event; else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215)) pmc_cntrl[1][i].evnts = ctr[i].event + 16; else pmc_cntrl[1][i].evnts = ctr[i].event; pmc_cntrl[1][i].masks = ctr[i].unit_mask; pmc_cntrl[1][i].enabled = ctr[i].enabled; pmc_cntrl[1][i].vcntr = i; } for (i = 0; i < NUM_TRACE_BUS_WORDS; i++) trace_bus[i] = 0xff; for (i = 0; i < NUM_INPUT_BUS_WORDS; i++) input_bus[i] = 0xff; /* * Our counters count up, and "count" refers to * how much before the next interrupt, and we interrupt * on overflow. So we calculate the starting value * which will give us "count" until overflow. * Then we set the events on the enabled counters. */ for (i = 0; i < num_counters; ++i) { /* start with virtual counter set 0 */ if (pmc_cntrl[0][i].enabled) { /* Using 32bit counters, reset max - count */ reset_value[i] = 0xFFFFFFFF - ctr[i].count; set_pm_event(i, pmc_cntrl[0][i].evnts, pmc_cntrl[0][i].masks); /* global, used by cell_cpu_setup */ ctr_enabled |= (1 << i); } } /* initialize the previous counts for the virtual cntrs */ for_each_online_cpu(cpu) for (i = 0; i < num_counters; ++i) { per_cpu(pmc_values, cpu)[i] = reset_value[i]; } return 0;}/* This function is called once for each cpu */static int cell_cpu_setup(struct op_counter_config *cntr){ u32 cpu = smp_processor_id(); u32 num_enabled = 0; int i; if (spu_cycle_reset) return 0; /* There is one performance monitor per processor chip (i.e. node), * so we only need to perform this function once per node. */ if (cbe_get_hw_thread_id(cpu)) return 0; /* Stop all counters */ cbe_disable_pm(cpu); cbe_disable_pm_interrupts(cpu); cbe_write_pm(cpu, pm_interval, 0); cbe_write_pm(cpu, pm_start_stop, 0); cbe_write_pm(cpu, group_control, pm_regs.group_control); cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); write_pm_cntrl(cpu); for (i = 0; i < num_counters; ++i) { if (ctr_enabled & (1 << i)) { pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu); num_enabled++; } } /* * The pm_rtas_activate_signals will return -EIO if the FW * call failed. */ return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);}#define ENTRIES 303#define MAXLFSR 0xFFFFFF/* precomputed table of 24 bit LFSR values */static int initial_lfsr[] = { 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424, 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716, 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547, 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392, 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026, 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556, 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769, 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893, 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017, 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756, 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558, 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401, 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720, 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042, 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955, 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934, 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783, 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278, 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051, 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741, 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972, 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302, 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384, 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469, 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697, 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398, 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140, 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214, 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386, 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087, 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130, 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300, 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475, 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950, 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003, 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375, 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426, 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607};/* * The hardware uses an LFSR counting sequence to determine when to capture * the SPU PCs. An LFSR sequence is like a puesdo random number sequence * where each number occurs once in the sequence but the sequence is not in * numerical order. The SPU PC capture is done when the LFSR sequence reaches * the last value in the sequence. Hence the user specified value N * corresponds to the LFSR number that is N from the end of the sequence. * * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit * LFSR sequence is broken into four ranges. The spacing of the precomputed * values is adjusted in each range so the error between the user specifed * number (N) of events between samples and the actual number of events based * on the precomputed value will be les then about 6.2%. Note, if the user * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used. * This is to prevent the loss of samples because the trace buffer is full. * * User specified N Step between Index in * precomputed values precomputed * table * 0 to 2^16-1 ---- 0 * 2^16 to 2^16+2^19-1 2^12 1 to 128 * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256 * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302 * * * For example, the LFSR values in the second range are computed for 2^16, * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies * 1, 2,..., 127, 128. * * The 24 bit LFSR value for the nth number in the sequence can be * calculated using the following code: * * #define size 24 * int calculate_lfsr(int n) * { * int i; * unsigned int newlfsr0; * unsigned int lfsr = 0xFFFFFF; * unsigned int howmany = n; * * for (i = 2; i < howmany + 2; i++) { * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^ * ((lfsr >> (size - 1 - 1)) & 1) ^ * (((lfsr >> (size - 1 - 6)) & 1) ^ * ((lfsr >> (size - 1 - 23)) & 1))); * * lfsr >>= 1; * lfsr = lfsr | (newlfsr0 << (size - 1)); * } * return lfsr; * } */#define V2_16 (0x1 << 16)#define V2_19 (0x1 << 19)#define V2_22 (0x1 << 22)static int calculate_lfsr(int n){ /* * The ranges and steps are in powers of 2 so the calculations * can be done using shifts rather then divide. */ int index;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?