📄 behavextend.cc
字号:
next_poly += node_prob; } } answer += -next_poly + (gDouble) p_solution.Payoff(pl); } } } } } return answer;}static gPolyList<gDouble> ExtendsToNashIneqs(const BehavSolution &p_solution, const gSpace &BehavStratSpace, const term_order &Lex, const EFSupport &little_supp, const EFSupport &big_supp, const gList<gList<int> > &var_index){ gPolyList<gDouble> answer(&BehavStratSpace, &Lex); answer += ActionProbsSumToOneIneqs(p_solution, BehavStratSpace, Lex, big_supp, var_index); answer += NashExpectedPayoffDiffPolys(p_solution, BehavStratSpace, Lex, little_supp, big_supp, var_index); return answer;}bool algExtendsToNash::ExtendsToNash(const BehavSolution &p_solution, const EFSupport &little_supp, const EFSupport &big_supp, gStatus &m_status){ // This asks whether there is a Nash extension of the BehavSolution to // all information sets at which the behavioral probabilities are not // specified. The assumption is that the support has active actions // at infosets at which the behavioral probabilities are defined, and // no others. Also, the BehavSol is assumed to be already a Nash // equilibrium for the truncated game obtained by eliminating stuff // outside little_supp. // First we compute the number of variables, and indexing information int num_vars(0); gList<gList<int> > var_index; int pl; for (pl = 1; pl <= p_solution.GetGame().NumPlayers(); pl++) { gList<int> list_for_pl; for (int i = 1; i <= p_solution.GetGame().Players()[pl]->NumInfosets(); i++) { list_for_pl += num_vars; if ( !big_supp.HasActiveActionAt(p_solution.GetGame().Players()[pl]->Infosets()[i]) ) { num_vars += p_solution.GetGame().Players()[pl]->Infosets()[i]->NumActions() - 1; } } var_index += list_for_pl; } // We establish the space gSpace BehavStratSpace(num_vars); ORD_PTR ptr = &lex; term_order Lex(&BehavStratSpace, ptr); num_vars = BehavStratSpace.Dmnsn(); gPolyList<gDouble> inequalities = ExtendsToNashIneqs(p_solution, BehavStratSpace, Lex, little_supp, big_supp, var_index); // set up the rectangle of search gVector<gDouble> bottoms(num_vars), tops(num_vars); bottoms = (gDouble)0; tops = (gDouble)1; gRectangle<gDouble> Cube(bottoms, tops); // Set up the test and do it IneqSolv<gDouble> extension_tester(inequalities,m_status); gVector<gDouble> sample(num_vars); bool answer = extension_tester.ASolutionExists(Cube,sample); // assert (answer == m_profile->ExtendsToNash(little_supp, big_supp, m_status)); return answer;}//=========================================================================// class algExtendsToAgentNash//=========================================================================static bool ANFNodeProbabilityPoly(const BehavSolution &p_solution, gPoly<gDouble> & node_prob, const gSpace &BehavStratSpace, const term_order &Lex, const EFSupport &big_supp, const gList<gList<int> > &var_index, const Node *tempnode, const int &pl, const int &i, const int &j){ while (tempnode != p_solution.GetGame().RootNode()) { const Action *last_action = tempnode->GetAction(); Infoset *last_infoset = last_action->BelongsTo(); if (last_infoset->IsChanceInfoset()) node_prob *= (gDouble)p_solution.GetGame().GetChanceProb(last_action); else if (big_supp.HasActiveActionAt(last_infoset)) { if (last_infoset == p_solution.GetGame().Players()[pl]->Infosets()[i]) { if (j != last_action->GetNumber()) return false; } else if (big_supp.ActionIsActive((Action *)last_action)) node_prob *= (gDouble) p_solution.ActionProb(last_action); else return false; } else { int initial_var_no = var_index[last_infoset->GetPlayer()->GetNumber()][last_infoset->GetNumber()]; if (last_action->GetNumber() < last_infoset->NumActions()){ int varno = initial_var_no + last_action->GetNumber(); node_prob *= gPoly<gDouble>(&BehavStratSpace, varno, 1, &Lex); } else { gPoly<gDouble> factor(&BehavStratSpace, (gDouble)1.0, &Lex); int k; for (k = 1; k < last_infoset->NumActions(); k++) factor -= gPoly<gDouble>(&BehavStratSpace, initial_var_no + k, 1, &Lex); node_prob *= factor; } } tempnode = tempnode->GetParent(); } return true;}static gPolyList<gDouble> ANFExpectedPayoffDiffPolys(const BehavSolution &p_solution, const gSpace &BehavStratSpace, const term_order &Lex, const EFSupport &little_supp, const EFSupport &big_supp, const gList<gList<int> > &var_index){ gPolyList<gDouble> answer(&BehavStratSpace, &Lex); gList<Node *> terminal_nodes = p_solution.GetGame().TerminalNodes(); for (int pl = 1; pl <= p_solution.GetGame().NumPlayers(); pl++) for (int i = 1; i <= p_solution.GetGame().Players()[pl]->NumInfosets(); i++) { Infoset *infoset = p_solution.GetGame().Players()[pl]->Infosets()[i]; if (little_supp.MayReach(infoset)) for (int j = 1; j <= infoset->NumActions(); j++) if (!little_supp.ActionIsActive(pl,i,j)) { // This will be the utility difference between the // payoff resulting from the profile and deviation to // action j gPoly<gDouble> next_poly(&BehavStratSpace, &Lex); for (int n = 1; n <= terminal_nodes.Length(); n++) { gPoly<gDouble> node_prob(&BehavStratSpace, (gDouble)1.0, &Lex); if (ANFNodeProbabilityPoly(p_solution, node_prob, BehavStratSpace, Lex, big_supp, var_index, terminal_nodes[n], pl,i,j)) { node_prob *= (gDouble)p_solution.GetGame().Payoff(terminal_nodes[n], p_solution.GetGame().Players()[pl]); next_poly += node_prob; } } answer += -next_poly + (gDouble) p_solution.Payoff(pl); } } return answer;}static gPolyList<gDouble> ExtendsToANFNashIneqs(const BehavSolution &p_solution, const gSpace &BehavStratSpace, const term_order &Lex, const EFSupport &little_supp, const EFSupport &big_supp, const gList<gList<int> > &var_index){ gPolyList<gDouble> answer(&BehavStratSpace, &Lex); answer += ActionProbsSumToOneIneqs(p_solution, BehavStratSpace, Lex, big_supp, var_index); answer += ANFExpectedPayoffDiffPolys(p_solution, BehavStratSpace, Lex, little_supp, big_supp, var_index); return answer;}bool algExtendsToAgentNash::ExtendsToAgentNash(const BehavSolution &p_solution, const EFSupport &little_supp, const EFSupport &big_supp, gStatus &p_status){ // This asks whether there is an ANF Nash extension of the BehavSolution to // all information sets at which the behavioral probabilities are not // specified. The assumption is that the support has active actions // at infosets at which the behavioral probabilities are defined, and // no others. // First we compute the number of variables, and indexing information int num_vars(0); gList<gList<int> > var_index; int pl; for (pl = 1; pl <= p_solution.GetGame().NumPlayers(); pl++) { gList<int> list_for_pl; for (int i = 1; i <= p_solution.GetGame().Players()[pl]->NumInfosets(); i++) { list_for_pl += num_vars; if ( !big_supp.HasActiveActionAt(p_solution.GetGame().Players()[pl]->Infosets()[i]) ) { num_vars += p_solution.GetGame().Players()[pl]->Infosets()[i]->NumActions() - 1; } } var_index += list_for_pl; } // We establish the space gSpace BehavStratSpace(num_vars); ORD_PTR ptr = &lex; term_order Lex(&BehavStratSpace, ptr); num_vars = BehavStratSpace.Dmnsn(); gPolyList<gDouble> inequalities = ExtendsToANFNashIneqs(p_solution, BehavStratSpace, Lex, little_supp, big_supp, var_index); // set up the rectangle of search gVector<gDouble> bottoms(num_vars), tops(num_vars); bottoms = (gDouble)0; tops = (gDouble)1; gRectangle<gDouble> Cube(bottoms, tops); // Set up the test and do it IneqSolv<gDouble> extension_tester(inequalities, p_status); gVector<gDouble> sample(num_vars); // Temporarily, we check the old set up vs. the new bool ANFanswer = extension_tester.ASolutionExists(Cube,sample); // assert (ANFanswer == m_profile->ExtendsToANFNash(little_supp, // big_supp, // m_status)); /* bool NASHanswer = m_profile->ExtendsToNash(Support(),Support(),m_status); //DEBUG if (ANFanswer && !NASHanswer) gout << "The following should be extendable to an ANF Nash, but not to a Nash:\n" << *m_profile << "\n\n"; if (NASHanswer && !ANFanswer) gout << "ERROR: said to be extendable to a Nash, but not to an ANF Nash:\n" << *m_profile << "\n\n"; */ return ANFanswer;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -