forked from HAFSGo/HAFSGo
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent_network.py
More file actions
83 lines (64 loc) · 2.47 KB
/
agent_network.py
File metadata and controls
83 lines (64 loc) · 2.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import tensorflow as tf
import numpy as np
from cocob_optimizer import COCOB
slim = tf.contrib.slim
class Agent:
def __init__(self):
self.action_size = 19 * 19 + 1
self.is_training = True
self.batch_norm_params = {'decay': 0.9, 'epsilon': 0.001, 'is_training': self.is_training, 'scope': 'batch_norm'}
def predict(self, x, is_training=True):
# input: [19, 19, 17]
# conv: [19, 19, 256]
conv = self.conv_layer(x)
# resnet: [19, 19, 256]
resnet = self.resnet_layer(x=conv, output_channel=256, n_layer=40)
# policy: [1, 19*19+1] == [1, 392]
policy = self.policy_head(resnet)
# value: [1, 1]
value = self.value_head(resnet)
# [1, 393]
return tf.concat([policy, value], axis=1)
def act(self, x, is_training=True):
policy = self.predict(x)[:-1]
if is_training:
act = np.random.choice(range(self.action_size), 1, p=policy)
else:
act = np.argmax(policy)
return act
def get_policy(self, x):
return self.predict(x)[:-1]
def conv_layer(self, x, output_channel=256, kernel_size=[3,3], is_training=True):
# input: [19, 19, 17]
# conv: [19, 19, 256]
conv = slim.conv2d(x, output_channel, kernel_size=[3, 3],
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params)
return conv
def residual_block(self, x, output_channel=256, is_training=True, is_end=False):
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params, kernel_size=[3, 3]):
h1 = slim.conv2d(x, output_channel)
h2 = slim.conv2d(h1, output_channel, activation_fn=None)
if is_end:
return h2 + x
return tf.nn.relu(h2 + x)
def resnet_layer(self, x, output_channel=256, n_layer=40):
net = x
for i in range(n_layer):
with tf.variable_scope('res'+str(i)):
net = self.residual_block(net, output_channel, is_end=(i==n_layer-1))
return net
def policy_head(self, x):
conv = self.conv_layer(x, output_channel=2, kernel_size=[1, 1])
conv_flatten = slim.flatten(conv)
policy = slim.fully_connected(conv_flatten, 19*19+1, activation_fn=None)
return policy
def value_head(self, x):
conv = self.conv_layer(x, output_channel=1, kernel_size=[1, 1])
conv_flatten = slim.flatten(conv)
fc = slim.fully_connected(conv_flatten, 256)
value = slim.fully_connected(fc, 1, activation_fn=tf.nn.tanh)
return value
def loss(self, x, mcts):
pass