📄 mpcache.c
字号:
#ifdef DEBUG /* caches will be allocated for active processors only on first ref */ debug ("L1_cache_line_size = %d", L1_cache_line_size); debug ("L2_cache_line_size = %d", L2_cache_line_size); debug ("L1_cache_size = %d", L1_cache_size); debug ("L2_cache_size = %d", L2_cache_size); debug ("L1_num_lines = %d", L1_num_lines); debug ("L2_num_lines = %d", L2_num_lines); debug ("L1_line_shift = %d", L1_line_shift); debug ("L2_line_shift = %d", L2_line_shift); debug ("L1_line_mask = %d", L1_line_mask); debug ("L2_line_mask = %d", L2_line_mask); debug ("L1_tag_shift = %d", L1_tag_shift); debug ("L2_tag_shift = %d", L2_tag_shift); debug ("L2_to_L1_line_size_ratio = %d", L2_to_L1_line_size_ratio);#endif /* DEBUG */#ifdef GRAPHICS /* create the window and draw the initial background */ if (graphics_flag) { CreateWindow (WIN_WIDTH, WIN_HEIGHT); DrawBackground (); }#endif /* GRAPHICS */}/*------------------------------------------------------------------*/#ifdef GRAPHICSstatic void RefreshEntireDisplay (){ int pid, line; if (graphics_flag) { /* first draw the background */ DrawBackground (); for (pid = 0; pid < NUM_DISPLAYED_PROCESSORS; pid++) { /* redraw bar graphs using last values */ SetColor (BLUE); DrawFilledRectangle (P_X + Q_OFFSET (pid, BAR_WIDTH, 1), 100-L1_last_miss_ratio[pid], BAR_WIDTH, L1_last_miss_ratio[pid]); SetColor (RED); DrawFilledRectangle (P_X + Q_OFFSET (pid, BAR_WIDTH, 2), 100-L2_last_miss_ratio[pid], BAR_WIDTH, L2_last_miss_ratio[pid]); /* check if this processor's caches have been allocated */ if (L1_tags[pid] == NULL) continue; /* skip cache line state update */ /* L1 cache states */ for (line = 0; line < L1_num_lines; line++) { int y = line / C_L1_WIDTH; int x = line % C_L1_WIDTH; int color; switch (L1_states[pid][line]) { case UNTOUCHED: color = WHITE; break; case INVALID: color = BLACK; break; case SHARED: color = GREEN; break; case EXCLUSIVE: color = YELLOW; break; case MODIFIED: color = RED; break; case MODIFIED_ABOVE: color = CYAN; break; } SetColor (color); SetPoint (C_X + OFFSET (pid, C_L1_WIDTH) + x, C_Y + y); } /* L2 cache states */ for (line = 0; line < L2_num_lines; line++) { int y = line / C_L2_WIDTH; int x = line % C_L2_WIDTH; int color; switch (L2_states[pid][line]) { case UNTOUCHED: color = WHITE; break; case INVALID: color = BLACK; break; case SHARED: color = GREEN; break; case EXCLUSIVE: color = YELLOW; break; case MODIFIED: color = RED; break; case MODIFIED_ABOVE: color = CYAN; break; } SetColor (color); SetPoint (C_X + OFFSET (pid, C_L2_WIDTH) + x, C_Y + C_L1_HEIGHT + y); } } FlushWindow (); }}#endif /* GRAPHICS *//*------------------------------------------------------------------*/static void CreateCaches (int pid){ /* create L1 and L2 tag and state arrays, initialized to zero (for 'untouched' state) */ L1_tags[pid] = calloc (L1_num_lines, sizeof (unsigned int)); L1_states[pid] = calloc (L1_num_lines, sizeof (unsigned int)); L2_tags[pid] = calloc (L2_num_lines, sizeof (unsigned int)); L2_states[pid] = calloc (L2_num_lines, sizeof (unsigned int));}/*------------------------------------------------------------------*/static void UpdateL1States (int current_pid, unsigned int L2_tag, unsigned int L2_line, int new_state){ int i; unsigned long base_addr = (L2_tag << L2_tag_shift) | (L2_line << L2_line_shift); unsigned int L1_tag, base_L1_line; L1_tag = (base_addr >> L1_tag_shift); base_L1_line = (base_addr >> L1_line_shift) & L1_line_mask; for (i = 0; i < L2_to_L1_line_size_ratio; i++) { /* if tag matches and valid, update state */ if (L1_tags[current_pid][base_L1_line + i] == L1_tag && L1_states[current_pid][base_L1_line + i] != INVALID) { SET_L1_STATE (current_pid, base_L1_line + i, new_state); } }}/*------------------------------------------------------------------*/static void UpdateL1StatesExceptOne (int current_pid, unsigned int L2_tag, unsigned int L2_line, int new_state, unsigned int except_L1_line){ int i; unsigned long base_addr = (L2_tag << L2_tag_shift) | (L2_line << L2_line_shift); unsigned int L1_tag, base_L1_line; L1_tag = (base_addr >> L1_tag_shift); base_L1_line = (base_addr >> L1_line_shift) & L1_line_mask; for (i = 0; i < L2_to_L1_line_size_ratio; i++) { /* skip the line we do not want to change */ if (base_L1_line + i == except_L1_line) continue; /* if tag matches and valid, update state */ if (L1_tags[current_pid][base_L1_line + i] == L1_tag && L1_states[current_pid][base_L1_line + i] != INVALID) { SET_L1_STATE (current_pid, base_L1_line + i, new_state); } }}/*------------------------------------------------------------------*/static int CheckL1States (int current_pid, unsigned int L2_tag, unsigned int L2_line, int state){ int i, count; unsigned long base_addr = (L2_tag << L2_tag_shift) | (L2_line << L2_line_shift); unsigned int L1_tag, base_L1_line; L1_tag = (base_addr >> L1_tag_shift); base_L1_line = (base_addr >> L1_line_shift) & L1_line_mask; count = 0; for (i = 0; i < L2_to_L1_line_size_ratio; i++) { /* if tag and state match, increase count */ if (L1_tags[current_pid][base_L1_line + i] == L1_tag && L1_states[current_pid][base_L1_line + i] == state) { ++count; } } return count; /* return count of matching lines */}/*------------------------------------------------------------------*/static int CheckAndUpdateL2Copies (int requesting_pid, unsigned int L2_tag, unsigned int L2_line, int bus_req_type){ int i, count; count = 0; /* count of snoop hits _for this request only_ */ for (i = 0; i < num_created_processes; i++) { if (i == requesting_pid) continue; /* skip processor making the request */ /* increment count of external bus requests seen by this processor */ mpcachestats[i].external_bus_requests++; /* special check if processor was just created and has not yet executed its first memory instruction that will force creation of cache data structures; if necessary, create caches here */ if (L2_tags[i] == NULL) CreateCaches (i); /* >>> FILTER: do nothing further if there is no valid tag match */ if (L2_tags[i][L2_line] != L2_tag || L2_states[i][L2_line] == INVALID) continue; /* valid tag match, so increment count of external snoop hits */ mpcachestats[i].snoop_hits++; /* for current processor */ count++; /* for this request */ if (bus_req_type == READ) { switch (L2_states[i][L2_line]) { case EXCLUSIVE: /* increment count of exclusive to shared transitions */ mpcachestats[i].exclusive_to_shared_changes++; break; case SHARED: /* no state change or action */ break; case MODIFIED: case MODIFIED_ABOVE: /* owner of modified data provides shared data response */ mpcachestats[i].shared_data_responses++; break; } /* now more than one read-only copy */ SET_L2_STATE (i, L2_line, SHARED); /* change _all_ L1 line states for L2 line */ UpdateL1States (i, L2_tag, L2_line, SHARED); } else /* read_ex request */ { switch (L2_states[i][L2_line]) { case EXCLUSIVE: case SHARED: /* for read_ex, any read-only copies are just invalidated, so increment count of external invalidations */ mpcachestats[i].external_invalidations++; break; case MODIFIED: case MODIFIED_ABOVE: /* owner of modified data provides excl data resp */ mpcachestats[i].exclusive_data_responses++; break; } /* regardless of whatever _valid_ state (invalid state already filtered out above), current processor loses its copy */ SET_L2_STATE (i, L2_line, INVALID); /* change _all_ L1 line states */ UpdateL1States (i, L2_tag, L2_line, INVALID); } } /* for read_ex requests, update invalidation statistics after all processors are dealt with */ if (bus_req_type == READ_EX) { /* increment sum and count of invalidation set size */ invalidation_set_size_sum += count; ++invalidation_set_size_samples; ++invalidation_set_size_histogram[count]; } return count; /* return snoop hit count in other processors */}/*------------------------------------------------------------------*/static void InvalidateOtherCaches (int requesting_pid, unsigned int L2_tag, unsigned int L2_line){ int i, invalidation_set_size; invalidation_set_size = 0; for (i = 0; i < num_created_processes; i++) { if (i == requesting_pid) continue; /* skip the processor that is requesting invalidation*/ /* increment count of external bus requests for this processor */ mpcachestats[i].external_bus_requests++; /* special check if processor was just created and has not yet executed its first memory instruction that will force creation of cache data structures; if necessary, create caches here */ if (L2_tags[i] == NULL) CreateCaches (i); /* check if data is in L2 cache */ if (L2_tags[i][L2_line] == L2_tag && L2_states[i][L2_line] != INVALID) { /* increment count of external snoop hits for this processor */ mpcachestats[i].snoop_hits++; /* increment count of external invalidations for this processor */ mpcachestats[i].external_invalidations++; /* increment local count of invalidation hits */ ++invalidation_set_size; /* invalidate in L2 */ SET_L2_STATE (i, L2_line, INVALID); /* change _all_ L1 line states */ UpdateL1States (i, /* which L1 cache to update */ L2_tag, L2_line, /* use to build base L1 addr */ INVALID); } } /* increment sum and count of invalidation set size */ invalidation_set_size_sum += invalidation_set_size; ++invalidation_set_size_samples; ++invalidation_set_size_histogram[invalidation_set_size];}/*------------------------------------------------------------------*/void MPCacheMemRef (int pid, unsigned long data_addr, int is_a_store){ unsigned int L1_tag = (data_addr >> L1_tag_shift); unsigned int L2_tag = (data_addr >> L2_tag_shift); unsigned int L1_line = (data_addr >> L1_line_shift) & L1_line_mask; unsigned int L2_line = (data_addr >> L2_line_shift) & L2_line_mask; int i; static int window_event_interval_count;#ifdef GRAPHICS if (pid >= NUM_DISPLAYED_PROCESSORS) fatal ("Too many processors for graphical display; limit is %d.", NUM_DISPLAYED_PROCESSORS);#endif /* GRAPHICS */ /* caches for each pid are allocated on first reference */ if (L1_tags[pid] == NULL) { CreateCaches (pid); } mpcachestats[pid].L1_accesses++; if (is_a_store) mpcachestats[pid].L1_stores++; /* probe L1 cache */ if (L1_tags[pid][L1_line] == L1_tag && L1_states[pid][L1_line] != INVALID) { /* tag match with _valid_ state, so now handle specific states */ switch (L1_states[pid][L1_line]) { case MODIFIED: /* hit in L1 for both loads and stores */ mpcachestats[pid].L1_hits++; break; case EXCLUSIVE: /* hit in L1 for loads and stores */ mpcachestats[pid].L1_hits++; /* but for stores, change L1 and L2 states to modified */ if (is_a_store) { mpcachestats[pid].exclusive_to_modified_changes++; SET_L1_STATE (pid, L1_line, MODIFIED); if (L2_states[pid][L2_line] == MODIFIED || L2_states[pid][L2_line] == EXCLUSIVE) SET_L2_STATE (pid, L2_line, MODIFIED_ABOVE); } break; case SHARED: if (! is_a_store) mpcachestats[pid].L1_hits++; /* simple hit for reads */ else { /* writes requires an upgrade request */ mpcachestats[pid].L1_upgrades++; InvalidateOtherCaches (pid, /* who is invalidating */ L2_tag, L2_line); /* change L1 and L2 states to modified */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -