⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 evolutionary_game_gtft.m

📁 GTFT在一个无标度网络上的进化博弈
💻 M
字号:
%function [Ave_P_list,std_P_list,Ave_ayoff_list]=evolutionary_game_GTFT(Nodes,b,step_num,times,fname)
%evolutionary game theory adopting prisoner's dilemma
%game payoff:[(R,R),(S,T);(T,S),(P,P)]:T=b,R=1,P=S=0
%See Ref.[Scale-free networks provide a unifying framework for the 
%emergence of cooperation]
%
%Input: Nodes-- N*N adjacent matrix
%       b -- the advantage of defectors for PD, b belongs to (1,2], larger
%            b standing for more defect
%      step_num -- generation number
%Algorithm:O(Round*M)
%Write by rock on 06/01/04
%Write by rock on 06/07/03, changing interface and add Vertex_payoff_list
%Modified a bug on temp_Vertex_status_list=Vertex_status_list on 06/09.06;
%Modified on 08.08.14 for PGG
%Modified on 08.12.28 for GTFT
%研究目的:
%对于给定的PD(RSTP),个体i对于对手C/D采取C的概率为Prob=(p,q),则随着时间演化,什么样的Prob是最优的?GTFT
%AllC:Prob=(1,1,);AllD:Prob=(0,0);TFT:Prob=(1,0);GTFT:Prob=(1,1/3)
%
%1.初始化时,个体(0,1)随机选择Prob
%1.Game:Nowak<<Evolutionary dynamics>>Eq.(5.17)
%
%2.每个个体与邻居博弈,收益累计为Payoff_i=sum_j(payoff_ij)
%3.策略演化:
%Porb_j-->Porb_i=1/(1+exp(Payoff_i-Payoff_j)/K)
%
%程序伪码:
%
%输入:game参数(RSTP),网络结构,噪音K,Gen_num
%初始化:个体i随机选择P1_i,P2_i,P3_i,P4_i
%
%for i=2:Gen_num
%	(1)Game:Pyaoff_i
%	(2)策略演化:P1_list,P2_list,P3_list,P4_list
%	(3)计算Ave_P_list,delta_P_list和Total_Payoff_list
%end

clear

TEST=1;
if TEST==1
    fname='Grid100'    

    %Nodes=Nearest_Neighbor_Growing2(1000,5);
    %Grid_Nodes=Grid_4_with_period_Growing(100,100);
    %Write_Matrix(Grid_Nodes,'Grid10000')
    %HoWS_Nodes=get_HoWS_Network2(Nodes,1);
    %Write_Matrix(HoWS_Nodes,'HoER10000')

    Nodes=spconvert(load([fname,'.adj']));

    %b=2.5
    step_num=10; 
    TEST_Game=5;    
    times=1;
end


TEST_Game=5;
    
N=length(Nodes);
Degree=zeros(N,1);
Degree=full(sum(Nodes(1:end,:))');
ave_k=nnz(Nodes)/N;

[Row,Col,Weight]=find(Nodes);

%Similarity_Matrix=get_vertex_similarity(Nodes,1);
%payoff

if TEST_Game==1 %SG
    b=(b+1)/(2*b);
    T=b;
    R=b-0.5;
    P=0;
    S=b-1;
    D=T-P;
elseif TEST_Game==2 %PD
    T=b;
    R=1;
    P=0;
    S=0;
    D=T-S;
elseif TEST_Game==3 %Wang's SG
    T=1+b;
    R=1;
    P=0;
    S=1-b;
    D=T-S;
elseif TEST_Game==4 %PGG
    R=b;
    S=1.0;
elseif TEST_Game==5 %Axelrod's payoff matrix
    R=3;
    T=5;
    P=1;
    S=0;
end

K=0.1;%noise

%P_list=rand(N,2);%[p1,p2,p3,p4]reactive strategies,p1=p3,p2=p4
P_list=ceil(rand(N,2)*100)/100;
%P_list(end-5:end,1)=1;
%P_list(end-5:end,2)=0.33;
Init_P_list=P_list;
Ave_P_list=zeros(step_num,2);
std_P_list=zeros(step_num,2);
Ave_Payoff_list=zeros(step_num,1);

Vertex_payoff_list=zeros(N,1);
temp_P_list=zeros(N,1);%Modified on 06.09.06

for i=1:step_num
    i
    Vertex_payoff_list(1:end)=0;

    base_neighbor=1;
    for j=1:N%play GTFT game
        Neighbor_j_num=Degree(j);
        Neighbor_j=zeros(Neighbor_j_num,1);
        Neighbor_j(1:Neighbor_j_num)=Row(base_neighbor:base_neighbor+Neighbor_j_num-1);    
        base_neighbor=base_neighbor+Neighbor_j_num;
        
       
        for h=1:Neighbor_j_num            
            temp_p1=P_list(j,1);
            temp_q1=P_list(j,2);
            temp_p2=P_list(Neighbor_j(h),1);
            temp_q2=P_list(Neighbor_j(h),2);
            temp_r1=temp_p1-temp_q1;
            temp_r2=temp_p2-temp_q2;
            temp_s1=(temp_q2*temp_r1+temp_q1)/(1-temp_r1*temp_r2);
            temp_s2=(temp_q1*temp_r2+temp_q2)/(1-temp_r1*temp_r2);
            Vertex_payoff_list(j)=Vertex_payoff_list(j)+R*temp_s1*temp_s2+S*temp_s1*(1-temp_s2)+T*(1-temp_s1)*temp_s2+P*(1-temp_s1)*(1-temp_s2);            
        end
    end
    
   
    %changing status list accoring to payoff_list
    base_neighbor=1;
    temp_P_list=P_list;%add on 06.09.06
    for j=1:N
        Neighbor_j_num=Degree(j);
        Neighbor_j=zeros(Neighbor_j_num,1);
        Neighbor_j(1:Neighbor_j_num)=Row(base_neighbor:base_neighbor+Neighbor_j_num-1);
        base_neighbor=base_neighbor+Neighbor_j_num;
        
        select_ID=ceil(rand(1)*Neighbor_j_num);
        y=Neighbor_j(select_ID);
        
        %w=1/(1+exp(Vertex_payoff_list(j)-Vertex_payoff_list(y))/K);
        w=1/(1+exp(Vertex_payoff_list(j)/Degree(j)-Vertex_payoff_list(y)/Degree(y))/K);
        if w>rand(1)
            P_list(j,:)=temp_P_list(y,:);
        end        
    end
   
    Ave_P_list(i,:)=sum(P_list)/N;%zeros(step_num,4);
    std_P_list(i,:)=std(P_list);
    Ave_ayoff_list(i)=sum(Vertex_payoff_list)/N/ave_k;
    
    Ave_P_list(i,:)
    std_P_list(i,:)
    Ave_ayoff_list(i)
end

save([fname,'b',num2str(b*100),'_replicator',num2str(times)])

if TEST==1
    Color_Line=['r-';'b-';'g-';'m-'];
    figure
    for i=1:2
        plot([1:step_num],Ave_P_list(1:end,i),Color_Line(i,:));
        hold on
        xlabel('gen num')
        ylabel('p')
    end
    
    figure
    for i=1:2
        plot([1:step_num],std_P_list(1:end,i),Color_Line(i,:));
        hold on
        xlabel('gen num')
        ylabel('std p')
    end
    
    figure
    plot([1:step_num],Ave_ayoff_list(1:end),Color_Line(1,:));
    xlabel('gen num')
    ylabel('ave payoff')

    figure
    plot(Init_P_list(:,1),Init_P_list(:,2),'b.')
    hold on    
    plot(P_list(:,1),P_list(:,2),'r*')
    
    figure
    plot(P_list(:,1),P_list(:,2),'r*')
end


return

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -