Skip to content

Commit c05e846

Browse files
committed
Update games.py to third edition.
1 parent c3a6fbc commit c05e846

File tree

1 file changed

+48
-59
lines changed

1 file changed

+48
-59
lines changed

games.py

Lines changed: 48 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
1-
"""Games, or Adversarial Search. (Chapter 6)
2-
1+
"""Games, or Adversarial Search. (Chapter 5)
32
"""
4-
# (Written for the second edition of AIMA; expect some discrepanciecs
5-
# from the third edition until this gets reviewed.)
63

74
from utils import *
85
import random
@@ -12,46 +9,44 @@
129

1310
def minimax_decision(state, game):
1411
"""Given a state in a game, calculate the best move by searching
15-
forward all the way to the terminal states. [Fig. 6.4]"""
12+
forward all the way to the terminal states. [Fig. 5.3]"""
1613

1714
player = game.to_move(state)
1815

1916
def max_value(state):
2017
if game.terminal_test(state):
2118
return game.utility(state, player)
2219
v = -infinity
23-
for (a, s) in game.successors(state):
24-
v = max(v, min_value(s))
20+
for a in game.actions(state):
21+
v = max(v, min_value(game.result(state, a)))
2522
return v
2623

2724
def min_value(state):
2825
if game.terminal_test(state):
2926
return game.utility(state, player)
3027
v = infinity
31-
for (a, s) in game.successors(state):
32-
v = min(v, max_value(s))
28+
for a in game.actions(state):
29+
v = min(v, max_value(game.result(state, a)))
3330
return v
3431

35-
# Body of minimax_decision starts here:
36-
action, state = argmax(game.successors(state),
37-
lambda ((a, s)): min_value(s))
38-
return action
39-
32+
# Body of minimax_decision:
33+
return argmax(game.actions(state),
34+
lambda a: min_value(game.result(state, a)))
4035

4136
#______________________________________________________________________________
4237

4338
def alphabeta_full_search(state, game):
4439
"""Search game to determine best action; use alpha-beta pruning.
45-
As in [Fig. 6.7], this version searches all the way to the leaves."""
40+
As in [Fig. 5.7], this version searches all the way to the leaves."""
4641

4742
player = game.to_move(state)
4843

4944
def max_value(state, alpha, beta):
5045
if game.terminal_test(state):
5146
return game.utility(state, player)
5247
v = -infinity
53-
for (a, s) in game.successors(state):
54-
v = max(v, min_value(s, alpha, beta))
48+
for a in game.actions(state):
49+
v = max(v, min_value(game.result(state, a), alpha, beta))
5550
if v >= beta:
5651
return v
5752
alpha = max(alpha, v)
@@ -61,17 +56,17 @@ def min_value(state, alpha, beta):
6156
if game.terminal_test(state):
6257
return game.utility(state, player)
6358
v = infinity
64-
for (a, s) in game.successors(state):
65-
v = min(v, max_value(s, alpha, beta))
59+
for a in game.actions(state):
60+
v = min(v, max_value(game.result(state, a), alpha, beta))
6661
if v <= alpha:
6762
return v
6863
beta = min(beta, v)
6964
return v
7065

71-
# Body of alphabeta_search starts here:
72-
action, state = argmax(game.successors(state),
73-
lambda ((a, s)): min_value(s, -infinity, infinity))
74-
return action
66+
# Body of alphabeta_search:
67+
return argmax(game.actions(state),
68+
lambda a: min_value(game.result(state, a),
69+
-infinity, infinity))
7570

7671
def alphabeta_search(state, game, d=4, cutoff_test=None, eval_fn=None):
7772
"""Search game to determine best action; use alpha-beta pruning.
@@ -83,8 +78,9 @@ def max_value(state, alpha, beta, depth):
8378
if cutoff_test(state, depth):
8479
return eval_fn(state)
8580
v = -infinity
86-
for (a, s) in game.successors(state):
87-
v = max(v, min_value(s, alpha, beta, depth+1))
81+
for a in game.actions(state):
82+
v = max(v, min_value(game.result(state, a),
83+
alpha, beta, depth+1))
8884
if v >= beta:
8985
return v
9086
alpha = max(alpha, v)
@@ -94,8 +90,9 @@ def min_value(state, alpha, beta, depth):
9490
if cutoff_test(state, depth):
9591
return eval_fn(state)
9692
v = infinity
97-
for (a, s) in game.successors(state):
98-
v = min(v, max_value(s, alpha, beta, depth+1))
93+
for a in game.actions(state):
94+
v = min(v, max_value(game.result(state, a),
95+
alpha, beta, depth+1))
9996
if v <= alpha:
10097
return v
10198
beta = min(beta, v)
@@ -106,9 +103,9 @@ def min_value(state, alpha, beta, depth):
106103
cutoff_test = (cutoff_test or
107104
(lambda state,depth: depth>d or game.terminal_test(state)))
108105
eval_fn = eval_fn or (lambda state: game.utility(state, player))
109-
action, state = argmax(game.successors(state),
110-
lambda (a, s): min_value(s, -infinity, infinity, 0))
111-
return action
106+
return argmax(game.actions(state),
107+
lambda a: min_value(game.result(state, a),
108+
-infinity, infinity, 0))
112109

113110
#______________________________________________________________________________
114111
# Players for Games
@@ -120,21 +117,21 @@ def query_player(game, state):
120117

121118
def random_player(game, state):
122119
"A player that chooses a legal move at random."
123-
return random.choice(game.legal_moves(state))
120+
return random.choice(game.actions(state))
124121

125122
def alphabeta_player(game, state):
126123
return alphabeta_search(state, game)
127124

128125
def play_game(game, *players):
129126
"""Play an n-person, move-alternating game.
130-
>>> play_game(Fig62Game(), alphabeta_player, alphabeta_player)
127+
>>> play_game(Fig52Game(), alphabeta_player, alphabeta_player)
131128
3
132129
"""
133130
state = game.initial
134131
while True:
135132
for player in players:
136133
move = player(game, state)
137-
state = game.make_move(move, state)
134+
state = game.result(state, move)
138135
if game.terminal_test(state):
139136
return game.utility(state, game.to_move(game.initial))
140137

@@ -144,17 +141,17 @@ def play_game(game, *players):
144141
class Game:
145142
"""A game is similar to a problem, but it has a utility for each
146143
state and a terminal test instead of a path cost and a goal
147-
test. To create a game, subclass this class and implement
148-
legal_moves, make_move, utility, and terminal_test. You may
149-
override display and successors or you can inherit their default
150-
methods. You will also need to set the .initial attribute to the
151-
initial state; this can be done in the constructor."""
144+
test. To create a game, subclass this class and implement actions,
145+
result, utility, and terminal_test. You may override display and
146+
successors or you can inherit their default methods. You will also
147+
need to set the .initial attribute to the initial state; this can
148+
be done in the constructor."""
152149

153-
def legal_moves(self, state):
150+
def actions(self, state):
154151
"Return a list of the allowable moves at this point."
155152
abstract
156153

157-
def make_move(self, move, state):
154+
def result(self, state, move):
158155
"Return the state that results from making a move from a state."
159156
abstract
160157

@@ -164,7 +161,7 @@ def utility(self, state, player):
164161

165162
def terminal_test(self, state):
166163
"Return True if this is a final state for the game."
167-
return not self.legal_moves(state)
164+
return not self.actions(state)
168165

169166
def to_move(self, state):
170167
"Return the player whose move it is in this state."
@@ -174,17 +171,12 @@ def display(self, state):
174171
"Print or otherwise display the state."
175172
print state
176173

177-
def successors(self, state):
178-
"Return a list of legal (move, state) pairs."
179-
return [(move, self.make_move(move, state))
180-
for move in self.legal_moves(state)]
181-
182174
def __repr__(self):
183175
return '<%s>' % self.__class__.__name__
184176

185-
class Fig62Game(Game):
186-
"""The game represented in [Fig. 6.2]. Serves as a simple test case.
187-
>>> g = Fig62Game()
177+
class Fig52Game(Game):
178+
"""The game represented in [Fig. 5.2]. Serves as a simple test case.
179+
>>> g = Fig52Game()
188180
>>> minimax_decision('A', g)
189181
'a1'
190182
>>> alphabeta_full_search('A', g)
@@ -199,15 +191,12 @@ class Fig62Game(Game):
199191
utils = Dict(B1=3, B2=12, B3=8, C1=2, C2=4, C3=6, D1=14, D2=5, D3=2)
200192
initial = 'A'
201193

202-
def legal_moves(self, state):
203-
return [move for (move, next) in self.successors(state)]
194+
def actions(self, state):
195+
return self.succs.get(state, {}).keys()
204196

205-
def make_move(self, move, state):
197+
def result(self, state, move):
206198
return self.succs[state][move]
207199

208-
def successors(self, state):
209-
return self.succs.get(state, {}).items()
210-
211200
def utility(self, state, player):
212201
if player == 'MAX':
213202
return self.utils[state]
@@ -231,11 +220,11 @@ def __init__(self, h=3, v=3, k=3):
231220
for y in range(1, v+1)]
232221
self.initial = Struct(to_move='X', utility=0, board={}, moves=moves)
233222

234-
def legal_moves(self, state):
223+
def actions(self, state):
235224
"Legal moves are any square not yet taken."
236225
return state.moves
237226

238-
def make_move(self, move, state):
227+
def result(self, state, move):
239228
if move not in state.moves:
240229
return state # Illegal move has no effect
241230
board = state.board.copy(); board[move] = state.to_move
@@ -291,12 +280,12 @@ class ConnectFour(TicTacToe):
291280
def __init__(self, h=7, v=6, k=4):
292281
TicTacToe.__init__(self, h, v, k)
293282

294-
def legal_moves(self, state):
283+
def actions(self, state):
295284
return [(x, y) for (x, y) in state.moves
296285
if y == 0 or (x, y-1) in state.board]
297286

298287
__doc__ += random_tests("""
299-
>>> play_game(Fig62Game(), random_player, random_player)
288+
>>> play_game(Fig52Game(), random_player, random_player)
300289
6
301290
>>> play_game(TicTacToe(), random_player, random_player)
302291
0

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy