📄 rf_script.c
字号:
} seq_prob = script[numAction].probability; RF_CallocAndAdd(last_access, local_num_procs, sizeof(RF_LastAccessTable_t *), (RF_LastAccessTable_t **), cleanupList); } else { pcheck += script[numAction].probability; script[numAction].trace_fd = -1; numAction++; } } if (pcheck != 100) { printf("Error: Psum '%d' != 100.\n", pcheck); exit(1); } /* if the scriptFile specified a sequential-probability line anywhere, install it as * the last entry in the script */ if (last_access) { script[numAction].probability = seq_prob; script[numAction].action = 's'; numAction++; } fclose(fp); return script;}RF_Script_t *SelectAction(script)RF_Script_t *script;{ long selP; long sumP; long pid; RF_LastAccessTable_t *la; #ifndef SIMULATE rf_get_threadid(pid);#else /* !SIMULATE */ pid = rf_GetCurrentOwner();#endif /* !SIMULATE */ if (script[0].trace_fd < 0) { /* check to see if this access should be sequential */ la = pid_hash_install(pid); if (la && la->size > 0 && (la->address+la->size < local_disk_size) && ( (rand_int()%100) < script[numAction-1].probability ) ) { script = &(script[numAction-1]); } else { selP = rand_int()%100; sumP = script->probability; while (sumP <= selP) { script++; sumP += script->probability; } } } else { /* when using a trace, the call is a NO-OP, except for the first call, which * causes the process pid to be installed in the table. */ long hashval = pid%proc_table_size; RF_ProcTable_t *p; RF_LOCK_MUTEX(trace_mutex); for (p = proc_table[hashval]; p && p->raidSim_pid != pid; p=p->next); if (!p) { RF_Calloc(p, 1, sizeof(RF_ProcTable_t), (RF_ProcTable_t *)); /* want it zeroed */ p->next = proc_table[hashval]; proc_table[hashval] = p; RF_ASSERT(trace_pid < local_num_procs); p->raidSim_pid = pid; p->trace_pid = trace_pid; p->file_location = trace_file_locations[trace_pid]; p->num_traces = num_traces[trace_pid]; p->traces_used = 0; p->traces_in_buffer = 0; p->tracebuf_size = RF_MIN(p->num_traces, MAX_TB_SIZE); p->completed = 0; /*p->tfd = open(traceFileName, O_RDONLY, 0);*/ p->tfd = script[0].trace_fd; RF_ASSERT(p->tfd >= 0); trace_pid++; RF_Calloc(p->trace_buffer, p->tracebuf_size, sizeof(RF_ScriptTraceEntryList_t), (RF_ScriptTraceEntryList_t *)); p->trace_pointer = p->trace_buffer; } RF_UNLOCK_MUTEX(trace_mutex); } return script;}/* Currently supported: access size can be (1) constant * (2) exponentially distributed * alignment can be (1) constant * seek distr can be (1) uniform * (2) local to a region of the array with prob p * outside that region with prob (1-p) * uniform within whichever region is selected * (3) sequential * The code does not allow for accesses smaller than 1KB, and so we have to use * 1+Exp(mean-1) instead of just Exp(mean). This is unfortunate because truncating * the exponential access size to integer kilobytes skews the distribution when the * mean is small. For example, it causes the case where mean=1 to become * deterministic, since Exp(0)=0. * * note that the user can totally mess things up by asking for very large * accesses that are local to a small region at the end of the array. It * then becomes impossible to find an access that meets his/her specs. To * avoid this, we just detect this case and punt on it. I should probably * punt the _first_ time this happens instead of only when it's persistent, * since if it ever happens it skews the distributions. * * now also supports traces */int ConvertActionToAccess(raidPtr, action, diskSize, op, reqSize, seekLoc, delay, async_flag)RF_Raid_t *raidPtr;RF_Script_t *action;RF_SectorCount_t diskSize; /* IN */char *op; /* OUT */long *seekLoc; /*in sectors*/ /* OUT */int *reqSize; /*in bytes*/ /* OUT */double *delay; /* OUT */int *async_flag; /* OUT */{ long loc, offset, count=0, r; RF_LastAccessTable_t *la; int tid; #ifndef SIMULATE rf_get_threadid(tid);#else /* !SIMULATE */ tid = rf_GetCurrentOwner();#endif /* !SIMULATE */ if (action->trace_fd < 0) { la = pid_hash_lookup(tid); if (action->action == 's') { /* sequential access */ RF_ASSERT(la); *op = la->op; *reqSize = la->size * 1024; *seekLoc = (la->address * (1024/ (1<<raidPtr->logBytesPerSector))); la->address += la->size; } else { if (action->distribution == 'e') *reqSize = (1 + (long) Exp((double) (action->reqSize-1)))*1024; else *reqSize = action->reqSize * 1024; if (*reqSize < 1024) *reqSize = 1024; do { loc = ((rand_int() % 100) < action->local_prob) ? 1 : 0; r = (long) rand_int(); offset = (loc) ? action->local_offset + r%action->local_size : r % (diskSize - (*reqSize/1024)); *seekLoc = ((offset/action->alignment) * action->alignment)*(1024/(1<<raidPtr->logBytesPerSector)); } while ((count++ < 25) && *seekLoc >= (diskSize* (1024/(1<<raidPtr->logBytesPerSector))) - (*reqSize/(1<<raidPtr->logBytesPerSector))); if (count >= 25) { printf("Error: Unable to find a legal access offset after 25 tries. Exiting.\n"); exit(1); } *op = action->action; if (la) { /* update last access in case next is sequential */ la->op = *op; la->size = *reqSize/1024; la->address = *seekLoc/(1024/(1<<raidPtr->logBytesPerSector)) + *reqSize/1024; } } *delay = 0.0;#ifdef SIMULATE *async_flag=RF_FALSE;#endif /* SIMULATE */ } else { /* using a trace instead of a script file */ int tid, hashval; RF_ProcTable_t *p; RF_ScriptTraceEntry_t *tp;#ifndef SIMULATE rf_get_threadid(tid);#else /* !SIMULATE */ tid = rf_GetCurrentOwner();#endif /* !SIMULATE */ hashval = tid%proc_table_size; RF_LOCK_MUTEX(trace_mutex); for (p = proc_table[hashval]; p && p->raidSim_pid != tid; p=p->next); RF_ASSERT( p ); RF_UNLOCK_MUTEX(trace_mutex);#ifndef SIMULATE /* * User */ while (1) { if (p->traces_in_buffer == 0) { if (!read_traces(p->tfd, p)) return(0); } RF_ASSERT(p->traces_in_buffer > 0); tp = &p->trace_pointer->entry; p->trace_pointer++; p->traces_in_buffer--; p->traces_used++; if (!rf_disableAsyncAccs && tp->async_flag) { DoAsyncIO(raidPtr, tp, diskSize); } else { *reqSize = tp->size; /* bytes -> bytes */ *seekLoc = tp->blkno; /* sectors -> bytes*/ *delay = tp->delay; *op = tp->op; break; } }#else /* !SIMULATE */ /* * Simulator */ if (p->traces_in_buffer == 0) { if (!read_traces(p->tfd, p)) return(0); } RF_ASSERT(p->traces_in_buffer > 0); tp = &p->trace_pointer->entry; p->trace_pointer++; p->traces_in_buffer--; p->traces_used++; if (!rf_disableAsyncAccs && tp->async_flag) { *async_flag=RF_TRUE; } else { *async_flag=RF_FALSE; } RF_ASSERT(tp->size); *reqSize = tp->size ; /* bytes -> bytes */ *seekLoc = tp->blkno ; /* sectors -> sectors */ *delay = tp->delay; *op = tp->op;#endif /* !SIMULATE */ } return(1);}static RF_LastAccessTable_t *pid_hash(pid, mustBeThere)long pid, mustBeThere;{ long hashval = pid % local_num_procs; RF_LastAccessTable_t *p; if (!last_access) return(NULL); RF_LOCK_MUTEX(trace_mutex); p = last_access[hashval]; while (p && p->pid != pid) p=p->next; if (mustBeThere) RF_ASSERT(p); if (p) return(p); RF_Calloc(p, 1, sizeof(RF_LastAccessTable_t), (RF_LastAccessTable_t *)); /* need it zeroed */ p->pid = pid; p->next = last_access[hashval]; last_access[hashval] = p; RF_UNLOCK_MUTEX(trace_mutex); return(p);}static int read_traces(fd, p)int fd;RF_ProcTable_t *p;{ int numwanted, i; char *buf; int retval = 1, tid;#ifndef SIMULATE rf_get_threadid(tid);#else /* !SIMULATE */ tid = rf_GetCurrentOwner();#endif /* !SIMULATE */ RF_ASSERT(tid == p->raidSim_pid); /* printf("[%d] read traces\n",tid); */ RF_LOCK_MUTEX(trace_mutex); if (p->traces_used == p->num_traces) { /* mark this trace as completed-at-least-once and rewind it */ if (!p->completed) completionCount++; /* this is the first time we've completed this trace */ p->completed = 1; /* printf("[%d] completed\n",tid); */ if ( (rf_testcode_degr_mode_type != RF_IO_TYPE_READ) && completionCount == local_num_procs) { /* printf("[%d] last proc to complete\n",tid); */ retval = 0; goto out; } p->file_location = trace_file_locations[p->trace_pid]; p->traces_used = 0; p->traces_in_buffer = 0; } if (lseek(fd, (off_t) p->file_location, SEEK_SET) < 0) { fprintf(stderr,"Unable to seek in trace file\n"); exit(1); } numwanted = RF_MIN(MAX_TB_SIZE, p->num_traces-p->traces_used); buf = (char *) p->trace_buffer;#ifdef __alpha for (i=0; i<numwanted; i++) { if ( read( fd, buf, sizeof(RF_ScriptTraceEntry_t)) != sizeof(RF_ScriptTraceEntry_t) ) { fprintf(stderr,"Unable to read %d traces\n",numwanted); exit(1); } buf += sizeof(RF_ScriptTraceEntryList_t); }#else /* __alpha */ /* * The "format" is a bunch of structures with alpha padding * and byte-alignment. Shyeah, how cool is _that_? */ for (i=0; i<numwanted; i++) {#define RF_STE_LEN 24 RF_ScriptTraceEntryList_t *ent_list; RF_ScriptTraceEntry_t *ent; char lbuf[RF_STE_LEN]; if (read( fd, lbuf, RF_STE_LEN) != RF_STE_LEN) { fprintf(stderr,"Unable to read %d traces\n",numwanted); exit(1); } { /* * Avoid bitchin' from overly-clever compilers. */ void *tmp; tmp = (void *)buf; ent_list = (RF_ScriptTraceEntryList_t *)tmp; /* gcc doesn't like this either. screw it */ } ent = &ent_list->entry; bcopy(&lbuf[0], &ent->blkno, 4); bcopy(&lbuf[4], &ent->size, 4); bcopy(&lbuf[8], &ent->delay, 8); bcopy(&lbuf[16], &ent->pid, 2); bcopy(&lbuf[18], &ent->op, 1); bcopy(&lbuf[19], &ent->async_flag, 1);#if RF_IS_BIG_ENDIAN > 0 RF_REV32(ent->blkno); RF_REV32(ent->size); RF_REV16(ent->pid);#endif /* RF_IS_BIG_ENDIAN > 0 */ if (rf_traceDebug > 1) { printf("blkno %d (%x)\n", ent->blkno, ent->blkno); printf("size %d (%x)\n", ent->size, ent->size); printf("delay %lf\n", ent->delay); printf("pid %hd\n", ent->pid); printf("op %02x\n", ent->op); printf("async_flag %02x\n", ent->async_flag); printf("\n"); } RF_ASSERT(ent->size); buf += sizeof(RF_ScriptTraceEntryList_t); }#endif /* __alpha */ p->file_location += numwanted * sizeof(RF_ScriptTraceEntry_t); p->traces_in_buffer = numwanted; p->trace_pointer = p->trace_buffer;out: RF_UNLOCK_MUTEX(trace_mutex); return(retval);}static void AsyncIOCallbackFunc(arg) void *arg;{#ifdef KERNEL RF_PANIC();#endif /* KERNEL */ RF_Free(arg, -1); /* -1 means don't cause an error if the size does not match */}/* function for doing an I/O that is async to the main line of the trace */static void DoAsyncIO(raidPtr, tp, diskSize) RF_Raid_t *raidPtr; RF_ScriptTraceEntry_t *tp; long diskSize;{ char *buf; int delay_secs, delay_ms, numSect; long startSect; delay_secs = (int) tp->delay; delay_ms = (int) ((tp->delay - (double) delay_secs) * 1000); RF_ASSERT(delay_secs >= 0 && delay_secs < 120 && delay_ms >= 0 && delay_ms < 1000); /* sanity check */ /* no good way to handle trace delays for async I/Os without forking a thread. * plus, delays are so short for async I/Os that DELAY_THREAD won't be nearly * accurate enough even if we were to fork one. Ergo, bag out on trace * delays for async I/Os */ if (!rf_suppressTraceDelays && (delay_secs || delay_ms)) RF_DELAY_THREAD(delay_secs, delay_ms); RF_Malloc(buf, tp->size, (char *)); numSect = tp->size / (1<<raidPtr->logBytesPerSector); startSect = (tp->blkno / (1024 / (1<<raidPtr->logBytesPerSector))) % (diskSize - numSect); if (rf_DoAccess(raidPtr, (RF_IoType_t)tp->op, NULL, startSect, numSect, buf, NULL, NULL, NULL, RF_DAG_NONBLOCKING_IO, NULL, AsyncIOCallbackFunc, (void *) buf)) { RF_ERRORMSG("Async access failed!\n"); }}/**************************************************************************************** * * thread-safe code to generate random numbers * ***************************************************************************************/RF_DECLARE_STATIC_RANDOM;static void ConfigureRandNum(){ RF_INIT_STATIC_RANDOM(1);}static RF_int32 rand_int(){ RF_int32 r; r = RF_STATIC_RANDOM()&0x0fffffff; return(r);}#define HALF_RAND_MAX 0x07ffffff/* returns an exponentially distributed random variable */static double Exp(mean) double mean;{ return( - log( 1 - ((double) rand_int()/HALF_RAND_MAX/2)) * mean);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -