增加基于tensorflow的VGG16网络搭建
This commit is contained in:
parent
3691c214e0
commit
544ee6e80d
|
|
@ -293,6 +293,77 @@ input_label=tf.placeholder(tf.float32,[None,2],"input_label")
|
|||
|
||||
  shape为[None,2],说明数据第一个维度是不确定的,然后TensorFlow会根据我们传递的数据动态推断第一个维度,这样我们就可以在运行时改变batch的大小。比如一个数据是2维,一次传递10个数据对应的tensor的shape就是[10,2]。可不可以把多个维度指定为None?理论上不可以!
|
||||
|
||||
## 12.1.6 如何基于tensorflow搭建VGG16
|
||||
|
||||
介绍完关于tensorflow的基础知识,是时候来一波网络搭建实战了。虽然网上有很多相关教程,但我想从最标准的tensorflow代码和语法出发(而不是调用更高级的API,失去了原来的味道),向大家展示如何搭建其标准的VGG16网络架构。话不多说,上代码:
|
||||
|
||||
```python
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
def get_weight_variable(shape):
|
||||
return tf.get_variable('weight', shape=shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
|
||||
|
||||
def get_bias_variable(shape):
|
||||
return tf.get_variable('bias', shape=shape, initializer=tf.constant_initializer(0))
|
||||
|
||||
def conv2d(x, w, padding = 'SAME', s=1):
|
||||
x = tf.nn.conv2d(x, w, strides=[1, s, s, 1], padding = padding)
|
||||
return x
|
||||
|
||||
def maxPoolLayer(x):
|
||||
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1],
|
||||
strides = [1, 2, 2, 1], padding = 'SAME')
|
||||
|
||||
def conv2d_layer(x,in_chs, out_chs, ksize, layer_name):
|
||||
with tf.variable_scope(layer_name):
|
||||
w = get_weight_variable([ksize, ksize, in_chs, out_chs])
|
||||
b = get_bias_variable([out_chs])
|
||||
y = tf.nn.relu(tf.bias_add(conv2d(x,w,padding = 'SAME', s=1), b))
|
||||
return y
|
||||
|
||||
def fc_layer(x,in_kernels, out_kernels, layer_name):
|
||||
with tf.variable_scope(layer_name):
|
||||
w = get_weight_variable([in_kernels,out_kernels])
|
||||
b = get_bias_variable([out_kernels])
|
||||
y = tf.nn.relu(tf.bias_add(tf.matmul(x,w),b))
|
||||
return y
|
||||
|
||||
def VGG16(x):
|
||||
conv1_1 = conv2d_layer(x,tf.get_shape(x).as_list()[-1], 64, 3, 'conv1_1')
|
||||
conv1_2 = conv2d_layer(conv1_1,64, 64, 3, 'conv1_2')
|
||||
pool_1 = maxPoolLayer(conv1_2)
|
||||
|
||||
conv2_1 = conv2d_layer(pool1,64, 128, 3, 'conv2_1')
|
||||
conv2_2 = conv2d_layer(conv2_1,128, 128, 3, 'conv2_2')
|
||||
pool2 = maxPoolLayer(conv2_2)
|
||||
|
||||
conv3_1 = conv2d_layer(pool2,128, 256, 3, 'conv3_1')
|
||||
conv3_2 = conv2d_layer(conv3_1,256, 256, 3, 'conv3_2')
|
||||
conv3_3 = conv2d_layer(conv3_2,256, 256, 3, 'conv3_3')
|
||||
pool3 = maxPoolLayer(conv3_3)
|
||||
|
||||
conv4_1 = conv2d_layer(pool3,256, 512, 3, 'conv4_1')
|
||||
conv4_2 = conv2d_layer(conv4_1,512, 512, 3, 'conv4_2')
|
||||
conv4_3 = conv2d_layer(conv4_2,512, 512, 3, 'conv4_3')
|
||||
pool4 = maxPoolLayer(conv4_3)
|
||||
|
||||
conv5_1 = conv2d_layer(pool4,512, 512, 3, 'conv5_1')
|
||||
conv5_2 = conv2d_layer(conv5_1,512, 512, 3, 'conv5_2')
|
||||
conv5_3 = conv2d_layer(conv5_1,512, 512, 3, 'conv5_3')
|
||||
pool5 = maxPoolLayer(conv5_3)
|
||||
|
||||
pool5_flatten_dims = int(np.prod(pool5.get_shape().as_list()[1:]))
|
||||
pool5_flatten = tf.reshape(pool5,[-1,pool5_flatten_dims])
|
||||
|
||||
fc_6 = fc_layer(pool5_flatten, pool5_flatten_dims, 4096, 'fc6')
|
||||
fc_7 = fc_layer(fc_6, 4096, 4096, 'fc7')
|
||||
fc_8 = fc_layer(fc_7, 4096, 10, 'fc8')
|
||||
|
||||
return fc_8
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
# 12.2 Pytorch
|
||||
|
|
|
|||
Loading…
Reference in New Issue