-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
119 lines (103 loc) · 3.66 KB
/
utils.py
File metadata and controls
119 lines (103 loc) · 3.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import tensorflow as tf
def relu(x):
return tf.nn.relu(x)
def leaky_relu(x, alpha = 0.2):
return tf.maximum(alpha * x, x)
def parametric_relu(x, trainable = True):
alpha = tf.get_variable(
name = 'alpha',
shape = x.het_shape()[-1],
dtype = tf.float32,
initializer = tf.constant_initializer(0.0),
trainable = trainable
)
def sigmoid(x):
return tf.nn.sigmoid(x)
def tanh(x):
return tf.nn.tanh(x)
def convolution(x, out_channels, kernel, stride, padding = 'SAME', trainable = True):
in_channels = x.get_shape()[-1]
w = tf.get_variable(
name = 'weights',
shape = [kernel[0], kernel[1], in_channels, out_channels],
initializer = tf.contrib.layers.xavier_initializer(),
trainable = trainable
)
b = tf.get_variable(
name = 'biases',
shape = [out_channels],
initializer = tf.constant_initializer(0.0),
trainable = trainable
)
x = tf.nn.conv2d(x, w, [1, stride[0], stride[1], 1], padding = padding)
x = tf.nn.bias_add(x, b)
return x
def deconvolution(x, out_channels, kernel, stride, padding, trainable):
conv = tf.layers.conv2d_transpose(
x,
out_channels,
kernel_size = kernel,
strides = stride,
padding = padding,
kernel_initializer = tf.contrib.layers.xavier_initializer(),
trainable = trainable
)
return conv
def pooling(x, kernel, stride, padding = 'SAME', mode = 'MAX'):
if mode == 'MAX':
x = tf.nn.max_pool(x, kernel, strides = stride, padding = padding)
elif mode == 'AVG':
x = tf.nn.avg_pool(x, kernel, strides = stride, padding = padding)
elif mode == 'MIN':
x = tf.nn.min_pool(x, kernel, strides = stride, padding = padding)
return x
def flatten(x):
shape = x.get_shape()
size = shape[1].value * shape[2].value * shape[3].value
flat_x = tf.reshape(x, [-1, size])
return flat_x
def dense(x, out_nodes, trainable = True):
shape = x.get_shape()
size = shape[-1].value
w = tf.get_variable(
name = 'weights',
shape = [size, out_nodes],
initializer = tf.contrib.layers.xavier_initializer()
)
b = tf.get_variable(
name = 'biases',
shape = [out_nodes],
initializer = tf.constant_initializer(0.0)
)
x = tf.nn.bias_add(tf.matmul(x, w), b)
return x
def dropout(x, rate = 0.5, trainable = False):
return tf.layers.dropout(x, rate = rate, training = trainable)
def batch_normalization(x, epsilon = 1e-5, decay = 0.9, trainable = True):
return tf.contrib.layers.batch_norm(x, is_training = trainable, epsilon = epsilon, decay = decay, updates_collections = None)
def pixel_shuffle(x, r, color = True):
def _phase_shift(I, r):
bsize, a, b, c = I.get_shape().as_list()
X = tf.reshape(I, (bsize, a, b, r, r))
X = tf.transpose(X, (0, 1, 2, 4, 3))
X = tf.split(1, a, X)
X = tf.concat(2, [tf.squeeze(x) for x in X])
X = tf.split(1, b, X)
X = tf.concat(2, [tf.squeeze(x) for x in X])
bsize, a*r, b*r
return tf.reshape(X, (bsize, a*r, b*r, 1))
if color:
Xc = tf.split(3, 3, X)
X = tf.concat(3, [_phase_shift(x, r) for x in Xc])
else:
X = _phase_shift(X, r)
return X
def cross_entropy_logit(logits, labels):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels)
loss = tf.reduce_mean(cross_entropy)
return loss
def accuracy_logit(logits, labels):
correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1))
correct = tf.cast(correct, tf.float32)
accuracy = tf.reduce_mean(correct) * 100.0
return accuracy