孙裕道
def state_value_iteration(env, theta=0.0001, discount_factor=0.8):
def one_step_action_choice(state, V):
A = np.zeros(env.nA)
for a in range(env.nA):
for prob, next_state, reward, done in env.P[state][a]:
A[a] += prob * (reward + discount_factor * V[next_state])
return A
V = np.zeros(env.nS)
while True:
delta = 0
for s in range(env.nS):
# find the best action
A = one_step_action_choice(s, V)
best_action_value = np.max(A)
# Calculate terminate condition
delta = max(delta, np.abs(best_action_value - V[s]))
# Update the value function
V[s] = best_action_value
# Check if we can stop
if delta < theta:
break
policy = np.zeros([env.nS, env.nA])
for s in range(env.nS):
A = one_step_action_choice(s, V)
best_action = np.argmax(A)
policy[s, best_action] = 1.0
return policy, V
import numpy as np
import os
import random
def random_action(V):
index_list = []
for index, s in enumerate(list(V)):
if s >= 0:
index_list.append(index)
return random.choice(index_list)
def reward_setting(state_num, action_num):
R = -1 * np.ones((state_num , action_num))
R[0,4] = 0
R[1,3] = 0
R[1,5] = 100
R[2,3] = 0
R[3,1] = 0
R[3,2] = 0
R[3,4] = 0
R[4,0] = 0
R[4,3] = 0
R[4,5] = 100
R[5,1] = 0
R[5,4] = 0
R[5,5] = 100
return R
if __name__ == '__main__':
action_num = 6
state_num = 6
gamma = 0.8
epoch_number = 200
condition_stop = 5
Q = np.zeros((state_num , action_num))
R = reward_setting(state_num , action_num)
for epoch in range(epoch_number):
for s in range(state_num):
loop = True
while loop:
# Obtain random action a
a = random_action(R[s,:])
# Calculate maximum Q value
q_max = np.max(Q[a,:])
# Bellman optimal iteration equation
Q[s,a] = R[s,a] + gamma * q_max
s = a
if s == condition_stop:
loop = False
Q = (Q / 5).astype(int)
print(Q)
特别鸣谢
感谢 TCCI 天桥脑科学研究院对于 PaperWeekly 的支持。TCCI 关注大脑探知、大脑功能和大脑健康。
联系客服