⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 behavextend.cc

📁 Gambit 是一个游戏库理论软件
💻 CC
📖 第 1 页 / 共 2 页
字号:
//// $Source: /home/gambit/CVS/gambit/sources/nash/behavextend.cc,v $// $Date: 2002/08/27 18:29:37 $// $Revision: 1.2 $//// DESCRIPTION:// Algorithms for extending behavior profiles to Nash equilibria//// This file is part of Gambit// Copyright (c) 2002, The Gambit Project//// This program is free software; you can redistribute it and/or modify// it under the terms of the GNU General Public License as published by// the Free Software Foundation; either version 2 of the License, or// (at your option) any later version.//// This program is distributed in the hope that it will be useful,// but WITHOUT ANY WARRANTY; without even the implied warranty of// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the// GNU General Public License for more details.//// You should have received a copy of the GNU General Public License// along with this program; if not, write to the Free Software// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.//#include "behavextend.h"#include "poly/ineqsolv.h"//// Design choice: the auxiliary functions here are made static// rather than members to help hide the gPoly-related details of// the implementation.  Some of these functions might be more// generally useful, in which case they should be made visible// somehow.  Also, a namespace would be preferable to using// static, but static is used for portability.  -- TLT, 5/2001.////=========================================================================//                      class algExtendsToNash//=========================================================================static void DeviationInfosets(gList<Infoset *> &answer,			      const EFSupport & big_supp,			      const EFPlayer *pl,			      const Node* node,			      const Action *act){  Node *child  = node->GetChild(act);  if ( child->IsNonterminal() ) {    Infoset *iset = child->GetInfoset();    if ( iset->GetPlayer() == pl ) {      int insert = 0;      bool done = false;      while (!done) {	insert ++;	if (insert > answer.Length() ||	    iset->Precedes(answer[insert]->GetMember(1)))	  done = true;      }      answer.Insert(iset,insert);    }    gList<Action *> action_list = iset->ListOfActions();    for (int j = 1; j <= action_list.Length(); j++) {      DeviationInfosets(answer,big_supp,pl,child,action_list[j]);    }  }}static gList<Infoset *> DeviationInfosets(const EFSupport &big_supp,					  const EFPlayer *pl,					  const Infoset *iset,					  const Action *act){  gList<Infoset *> answer;    gList<const Node *> node_list = iset->ListOfMembers();  for (int i = 1; i <= node_list.Length(); i++) {    DeviationInfosets(answer,big_supp,pl,node_list[i],act);  }  return answer;}static gPolyList<gDouble> ActionProbsSumToOneIneqs(const BehavSolution &p_solution,			 const gSpace &BehavStratSpace, 			 const term_order &Lex,			 const EFSupport &big_supp,			 const gList<gList<int> > &var_index) {  gPolyList<gDouble> answer(&BehavStratSpace, &Lex);  for (int pl = 1; pl <= p_solution.GetGame().NumPlayers(); pl++)     for (int i = 1; i <= p_solution.GetGame().Players()[pl]->NumInfosets(); i++) {      Infoset *current_infoset = p_solution.GetGame().Players()[pl]->Infosets()[i];      if ( !big_supp.HasActiveActionAt(current_infoset) ) {	int index_base = var_index[pl][i];	gPoly<gDouble> factor(&BehavStratSpace, (gDouble)1.0, &Lex);	for (int k = 1; k < current_infoset->NumActions(); k++)	  factor -= gPoly<gDouble>(&BehavStratSpace, index_base + k, 1, &Lex);	answer += factor;      }    }  return answer;}static gList<EFSupport> DeviationSupports(const EFSupport & big_supp,		  const gList<Infoset *> & isetlist,		  const EFPlayer */*pl*/,		  const Infoset */*iset*/,		  const Action */*act*/){  gList<EFSupport> answer;  gArray<int> active_act_no(isetlist.Length());  for (int k = 1; k <= active_act_no.Length(); k++)    active_act_no[k] = 0;   EFSupport new_supp(big_supp);  for (int i = 1; i <= isetlist.Length(); i++) {    for (int j = 1; j < isetlist[i]->NumActions(); j++)      new_supp.RemoveAction(isetlist[i]->GetAction(j));    new_supp.AddAction(isetlist[i]->GetAction(1));    active_act_no[i] = 1;    for (int k = 1; k < i; k++)      if (isetlist[k]->Precedes(isetlist[i]->GetMember(1)))	if (isetlist[k]->GetAction(1)->Precedes(isetlist[i]->GetMember(1))) {	  new_supp.RemoveAction(isetlist[i]->GetAction(1));	  active_act_no[i] = 0;	}  }  answer += new_supp;  int iset_cursor = isetlist.Length();  while (iset_cursor > 0) {    if ( active_act_no[iset_cursor] == 0 || 	 active_act_no[iset_cursor] == isetlist[iset_cursor]->NumActions() )      iset_cursor--;    else {      new_supp.RemoveAction(isetlist[iset_cursor]->			    GetAction(active_act_no[iset_cursor]));      active_act_no[iset_cursor]++;      new_supp.AddAction(isetlist[iset_cursor]->			 GetAction(active_act_no[iset_cursor]));      for (int k = iset_cursor + 1; k <= isetlist.Length(); k++) {	if (active_act_no[k] > 0)	  new_supp.RemoveAction(isetlist[k]->GetAction(1));	int h = 1;	bool active = true;	while (active && h < k) {	  if (isetlist[h]->Precedes(isetlist[k]->GetMember(1)))	    if (active_act_no[h] == 0 || 		!isetlist[h]->GetAction(active_act_no[h])->	              Precedes(isetlist[k]->GetMember(1))) {	      active = false;	      if (active_act_no[k] > 0) {		new_supp.RemoveAction(isetlist[k]->				      GetAction(active_act_no[k]));		active_act_no[k] = 0;	      }	    }	  h++;	}	if (active){	  new_supp.AddAction(isetlist[k]->GetAction(1));	  active_act_no[k] = 1;	}      }      answer += new_supp;    }  }  return answer;}static bool NashNodeProbabilityPoly(const BehavSolution &p_solution,			gPoly<gDouble> & node_prob,			const gSpace &BehavStratSpace, 			const term_order &Lex,			const EFSupport &dsupp,			const gList<gList<int> > &var_index,			const Node *tempnode,			const EFPlayer */*pl*/,			const Infoset *iset,			const Action *act){  while (tempnode != p_solution.GetGame().RootNode()) {    const Action *last_action = tempnode->GetAction();    Infoset *last_infoset = last_action->BelongsTo();        if (last_infoset->IsChanceInfoset())       node_prob *= (gDouble) p_solution.GetGame().GetChanceProb(last_action);    else       if (dsupp.HasActiveActionAt(last_infoset)) {	if (last_infoset == iset) {	  if (act != last_action) {	    return false;	  }	}	else	  if (dsupp.ActionIsActive((Action *)last_action)) {	    if ( last_action->BelongsTo()->GetPlayer() !=		         act->BelongsTo()->GetPlayer()     ||		 !act->Precedes(tempnode) )	    node_prob *= (gDouble) p_solution.ActionProb(last_action);	  }	  else {	    return false;	  }      }      else {	int initial_var_no =  var_index[last_infoset->GetPlayer()->GetNumber()][last_infoset->GetNumber()];	if (last_action->GetNumber() < last_infoset->NumActions()){	  int varno = initial_var_no + last_action->GetNumber();	  node_prob *= gPoly<gDouble>(&BehavStratSpace, varno, 1, &Lex);	}	else {	  gPoly<gDouble> factor(&BehavStratSpace, (gDouble)1.0, &Lex);	  int k;	  for (k = 1; k < last_infoset->NumActions(); k++)	    factor -= gPoly<gDouble>(&BehavStratSpace,				     initial_var_no + k, 1, &Lex);	  node_prob *= factor;	}      }     tempnode = tempnode->GetParent();  }  return true;}static gPolyList<gDouble> NashExpectedPayoffDiffPolys(const BehavSolution &p_solution,			    const gSpace &BehavStratSpace, 			    const term_order &Lex,			    const EFSupport &little_supp,			    const EFSupport &big_supp,			    const gList<gList<int> > &var_index) {  gPolyList<gDouble> answer(&BehavStratSpace, &Lex);  gList<Node *> terminal_nodes = p_solution.GetGame().TerminalNodes();  const gArray<EFPlayer *> players = p_solution.GetGame().Players();  for (int pl = 1; pl <= players.Length(); pl++) {    const gArray<Infoset *> isets_for_pl = players[pl]->Infosets();    for (int i = 1; i <= isets_for_pl.Length(); i++) {      if (little_supp.MayReach(isets_for_pl[i])) {	const gArray<Action *> acts_for_iset = isets_for_pl[i]->Actions();	for (int j = 1; j <= acts_for_iset.Length(); j++)	  if ( !little_supp.ActionIsActive(acts_for_iset[j]) ) {	    gList<Infoset *> isetlist = DeviationInfosets(big_supp, 							  players[pl],							  isets_for_pl[i],							  acts_for_iset[j]);	    gList<EFSupport> dsupps = DeviationSupports(big_supp, 							isetlist, 							players[pl],							isets_for_pl[i],							acts_for_iset[j]);	    for (int k = 1; k <= dsupps.Length(); k++) {	    // This will be the utility difference between the	    // payoff resulting from the profile and deviation to 	    // the strategy for pl specified by dsupp[k]	      gPoly<gDouble> next_poly(&BehavStratSpace, &Lex);	      for (int n = 1; n <= terminal_nodes.Length(); n++) {		gPoly<gDouble> node_prob(&BehavStratSpace, (gDouble)1.0, &Lex);		if (NashNodeProbabilityPoly(p_solution, node_prob,					    BehavStratSpace,					    Lex,					    dsupps[k],					    var_index,					    terminal_nodes[n],					    players[pl],					    isets_for_pl[i],					    acts_for_iset[j])) {		  node_prob *= 		    (gDouble) p_solution.GetGame().Payoff(terminal_nodes[n],						       p_solution.GetGame().Players()[pl]);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -