📄 boolfktenv.c
字号:
/*/ (XCS-C 1.2)/ ------------------------------------/ Learning Classifier System based on accuracy// by / Martin V. Butz/ Illinois Genetic Algorithms Laboratory (IlliGAL)/ University of Illinois at Urbana/Champaign/ butz@illigal.ge.uiuc.edu// Last modified: 09-30-2003// Simulates Boolean functions: constant, random, hidden parity, (layered) multiplexer, / biased multiplexer, concatenated multiplexer, (layered) count ones problem.*/#include <assert.h>#include <stdio.h>#include <stdlib.h>#include <string.h>#include <time.h>#include <math.h>#include "boolFktEnv.h"#include "xcsMacros.h"/** * The current properties of this boolean function */struct booleanEnv *boolFuncProps = 0;/** * Returns that this is a single step environment. */ int isMultiStep(){ return 0;}/** * Returns the lenght of a problem instance. */int getConditionLength(){ return boolFuncProps->conditionLength;}/** * Returns the payment range of this problem (i.e. the maximum payoff). */int getPaymentRange(){ return boolFuncProps->paymentRange;}/** * Returns the number of possible actions. */int getNumberOfActions(){ if(boolFuncProps->concatenatedMultiplexer==1) return (int)(1<<(int)((boolFuncProps->conditionLength)/(boolFuncProps->multiplexerBits + (1<<boolFuncProps->multiplexerBits)))); return 2;}/** * Resets the state to a new problem instance. */ void resetState(char *state){ int i, redo=1; int action=0, correct=0; /* generate a random state */ while(redo) { for(i=0; i < boolFuncProps->conditionLength; i++) { state[i]=(char)(((double) rand() / (RAND_MAX+1.0))*2)+'0'; } if(boolFuncProps->samplingBias == 0.5) { redo = 0; }else{ doAction(state, action, &correct); /* accept a string with result '0' with probability samplingBias */ redo=0; if(correct==1) { if(urand() < boolFuncProps->samplingBias) redo = 1; }else{ if(urand() > boolFuncProps->samplingBias) redo = 1; } } }}/** * Execute one function classification (feedback in 'correct' and the reward value) */ double doAction(char *state, int act,int *correct){ double value; /* return the perceived reward */ if(boolFuncProps->constantFunction) { *correct = 1; value = (double)boolFuncProps->paymentRange; }else if(boolFuncProps->randomFunction) value = doRandomAction(state,act,correct); else if(boolFuncProps->parityFunction) value = doParityAction(state,act,correct); else if(boolFuncProps->multiplexerFunction) value = doMPAction(state,act,correct); else if(boolFuncProps->concatenatedMultiplexer) value = doConcatenatedMultiplexerAction(state, act, correct); else if(boolFuncProps->biasedMultiplexer) value = doBiasedMultiplexerAction(state,act,correct); else if(boolFuncProps->countOnesFunction) value = doCountOnesAction(state, act, correct); else{ value = 0; printf("Some function should have been chosen in env.h or set in the input file!\n"); } if(boolFuncProps->addNoiseToAction>0) { if(boolFuncProps->addNoiseToAction<4) { /* Add Gaussian noise to outcome value */ if( boolFuncProps->addNoiseToAction==1 || (boolFuncProps->addNoiseToAction==2 && act==0) || (boolFuncProps->addNoiseToAction==3 && act==1)) value += (boolFuncProps->actionNoiseMu) + nrand() * (boolFuncProps->actionNoiseSigma); }else{ /* Alternate outcome with a certain probabilitiy possibly action dependent */ if( boolFuncProps->addNoiseToAction<=5 || (boolFuncProps->addNoiseToAction<=6 && act==0) || (boolFuncProps->addNoiseToAction>6 && boolFuncProps->addNoiseToAction<=7 && act==1)) { if(urand() < boolFuncProps->addNoiseToAction-(double)(int)(boolFuncProps->addNoiseToAction)) { if(value == boolFuncProps->paymentRange) value = 0; else value = boolFuncProps->paymentRange; } } } } return value;}/** * Random Function */double doRandomAction(char *state, int act, int *correct){ *correct = ((double) rand() / (RAND_MAX+1.0)) *2; return (double)((int)(((double) rand() / (RAND_MAX+1.0)) *2) * (boolFuncProps->paymentRange));}/** * Count Ones Function */double doCountOnesAction(char *state, int act, int *correct){ int i,j; for(i=0, j=0; i<boolFuncProps->countOnesSize; i++){ if(state[i]=='1')/* count the number of ones */ j++; } switch(boolFuncProps->countOnesType){ case 0: /* the number of ones determines the outcome */ if(j*2 >= boolFuncProps->countOnesSize){ /* this is the action one */ if(act==1){ *correct = 1; return boolFuncProps->paymentRange; }else{ *correct = 0; return 0; } }else { /* If equal or less, the action should be zero! */ if(act==0){ *correct = 1; return boolFuncProps->paymentRange; }else{ *correct = 0; return 0; } } break; case 1: /* The number of ones determines the payoff level returned */ if(j*2 > boolFuncProps->countOnesSize){ /* this is the action one */ if(act==1) { *correct = 1; return (boolFuncProps->paymentRange) * j / (boolFuncProps->countOnesSize); }else{ *correct = 0; return (boolFuncProps->paymentRange) * ((boolFuncProps->countOnesSize)-j) / (boolFuncProps->countOnesSize); } }else if(j*2 == boolFuncProps->countOnesSize) { /* 50/50 here - with this it is not possible to distinguish between action - so either action is correct!*/ *correct = 1; return (boolFuncProps->paymentRange)/2; }else{ if(act==0){ *correct = 1; return (boolFuncProps->paymentRange)*((boolFuncProps->countOnesSize)-j) / (boolFuncProps->countOnesSize); }else{ *correct = 0; return (boolFuncProps->paymentRange) * j / (boolFuncProps->countOnesSize); } } break; default: printf("Incorrect Count Ones Type\n"); break; } return 0;}/** * Hidden Parity Function. */double doParityAction(char *state, int act, int *correct){ int i, sum=0; for(i=0; i<boolFuncProps->paritySize; i++){ if(state[i]=='1') sum++; } if(sum%2==0) if(act==0){ *correct =1; return boolFuncProps->paymentRange; }else{ *correct =0; return 0.; } else if(act==0){ *correct = 0; return 0.; }else{ *correct = 1; return boolFuncProps->paymentRange; }}/** * Multiplexer Function */double doMPAction(char *state, int act, int *correct){ int place=boolFuncProps->multiplexerBits; int i; double reward; /* get the place of the by the first index bits referenced spot */ for(i=0,place=boolFuncProps->multiplexerBits; i<boolFuncProps->multiplexerBits; i++) { if(state[i]=='1') place += (int)(1<<((boolFuncProps->multiplexerBits)-1-i)); } /* determine the corresponding reward and set 'correct' */ if((act==1 && state[place]=='1') || (act==0 && state[place]=='0')) { /* the correct classification was chosen */ *correct=1; if(boolFuncProps->payoffLandscape) reward= 300.+(double)(((place-(boolFuncProps->multiplexerBits))*200)+100*(int)(state[place]-'0')); else reward = boolFuncProps->paymentRange; }else{ /* the incorrect classification was chosen */ *correct=0; if(boolFuncProps->payoffLandscape) reward= 0.+(double)(((place - (boolFuncProps->multiplexerBits))*200)+100*(int)(state[place]-'0')); else reward = 0; } return reward;}/** * This is the Biased Multiplexer Function! */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -