编程作业——tensorflow入门、手势数字识别

Tensorflow 基础语法

1
2
3
4
5
6
7
8
9
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
%matplotlib inline
np.random.seed(1)
1
2
3
4
5
6
7
y_hat = tf.constant(36, name="y_hat")
y = tf.constant(39, name="y")
loss = tf.Variable((y-y_hat)**2, name="loss")
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
print(session.run(loss))
9
1
2
3
4
a=tf.constant(2, name="a")
b=tf.constant(10, name="b")
c=tf.multiply(a, b)
print(c) #必须要创建并运行session
Tensor("Mul:0", shape=(), dtype=int32)
1
2
sess = tf.Session()
print(sess.run(c))
20
1
2
3
x=tf.placeholder(tf.int64, name="x")
print(sess.run(2*x, feed_dict={x: 3}))
sess.close()
6

linear function

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
def linear_function():

np.random.seed(1)

X=tf.constant(np.random.randn(3,1), name="X")
W=tf.constant(np.random.randn(4,3), name="W")
b=tf.constant(np.random.randn(4,1), name="b")
Y=tf.add(tf.matmul(W, X), b)

sess = tf.Session()
result = sess.run(Y)

sess.close()

return result

激活函数

1
2
3
4
5
6
7
8
9
def sigmoid(z):
x = tf.placeholder(tf.float32, name="x")

sigmoid = tf.sigmoid(x)

with tf.Session() as sess:
result = sess.run(sigmoid, feed_dict={x:z})

return result

损失函数

1
2
3
4
5
6
7
8
9
10
11
12
13
def cost(logits, labels):
z=tf.placeholder(tf.float32, name="z")
y=tf.placeholder(tf.float32, name="y")

cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y)

sess = tf.Session()

cost = sess.run(cost, feed_dict={z:logits, y:labels})

sess.close()

return cost

独热编码One Hot encodings

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
def one_hot_matrix(labels, C):
C = tf.constant(C, name="C")
one_hot_matrix = tf.one_hot(indices=labels, depth=C, axis=0)
"""
当axis=0,返回的是(C*features)的矩阵
例如:indices = [2,3,1,6] #长度为features=4的向量
当axis=0/-1,返回的是(features*C)的矩阵
"""
sess = tf.Session()

one_hot = sess.run(one_hot_matrix)

sess.close()

return one_hot

初始化参数为0/1

1
2
3
4
5
6
7
8
9
10
def ones(shape):
ones = tf.ones(shape)

sess = tf.Session()

ones = sess.run(ones)

sess.close()

return ones

构建手势数字识别

加载数据

1
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
1
2
3
index = 24
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))

1
2
3
4
5
6
7
8
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T

X_train = X_train_flatten/255
X_test = X_test_flatten/255

Y_train = convert_to_one_hot(Y_train_orig, 6) #图片的类别总共有6个(数字0-5)
Y_test = convert_to_one_hot(Y_test_orig, 6)

创建字符X,Y

1
2
3
4
5
def create_placeholders(n_x, n_y):
X = tf.placeholder(tf.float32, [n_x, None], name="X")
Y = tf.placeholder(tf.float32, [n_y, None], name="Y")

return X,Y

初始化变量

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
def initialize_parameters():
tf.set_random_seed(1)

W1 = tf.get_variable("W1", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable("b1", [25, 1], initializer=tf.zeros_initializer())
W2 = tf.get_variable("W2", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1))
b2 = tf.get_variable("b2", [12, 1], initializer=tf.zeros_initializer())
W3 = tf.get_variable("W3", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1))
b3 = tf.get_variable("b3", [6, 1], initializer=tf.zeros_initializer())

parmeters = {"W1":W1,
"W2":W2,
"W3":W3,
"b1":b1,
"b2":b2,
"b3":b3}
return parmeters

向前传播

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
def forward_propagation(X, parameters):
W1 = parameters["W1"]
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']

# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX

Z1 = tf.add(tf.matmul(W1, X), b1)
A1 = tf.nn.relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2)
A2 = tf.nn.relu(Z2)
Z3 = tf.add(tf.matmul(W3, Z2), b3)
# 算到Z3就停止,原因是在tensorflow中,最后一个线性层的输出作为计算损耗的函数的输入

return Z3

损失函数

1
2
3
4
5
6
def compute_cost(Z3, Y):
logits = tf.transpose(Z3)
labels = tf.transpose(Y)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
return cost

向后传播,更新参数

1
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)
1
_ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})

创建模型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, num_epochs = 1500, minibatch_size = 32, print_cost = True):
ops.reset_default_graph() #在不覆盖tf变量的情况下重新运行模型
tf.set_random_seed(1)
seed = 3
(n_x, m) = X_train.shape
n_y = Y_train.shape[0]
costs = []

#创建Placeholder
X, Y = creat_placeholders(n_x, n_y)

#初始化参数
parameters = initialize_parameters()

#向前传播
Z3 = forward_propagation(X, parameters)

#计算损失
cost = compute_cost(Z3, Y)

#向后传播
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# 初始化所有变量
init = tf.global_variables_initializer()

with tf.Session() as sess:

sess.run(init)

for epoch in range(num_epochs):
epoch_cost = 0
num_minibatches = int(m/minibatch_size)
seed = seed+1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)

for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch

_, minibatch_cost = sess.run([optimizer, cost], feed_dict={X:minibatch_X, Y:minibatch_Y})

epoch_cost = epoch_cost+minibatch_cost/num_minibatches

if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)

plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()

parameters = sess.run(parameters)
print ("Parameters have been trained!")

correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))

return parameters

开始训练

1
parameters = model(X_train, Y_train, X_test, Y_test)
WARNING:tensorflow:From <ipython-input-64-3b18fa0bc2c0>:5: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:

Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.

See `tf.nn.softmax_cross_entropy_with_logits_v2`.

Cost after epoch 0: 1.895938
Cost after epoch 100: 1.400813
Cost after epoch 200: 1.255541
Cost after epoch 300: 1.181168
Cost after epoch 400: 1.114092
Cost after epoch 500: 1.073960
Cost after epoch 600: 1.032575
Cost after epoch 700: 0.977509
Cost after epoch 800: 0.937244
Cost after epoch 900: 0.886205
Cost after epoch 1000: 0.882042
Cost after epoch 1100: 0.824795
Cost after epoch 1200: 0.818317
Cost after epoch 1300: 0.787285
Cost after epoch 1400: 0.770850

Parameters have been trained!
Train Accuracy: 0.723148
Test Accuracy: 0.516667
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import scipy
from PIL import Image
import matplotlib.image as mping
import cv2

for i in range(1,3):
my_image = str(i)+".jpg"

fileName = "C:\\Users\\董润泽\\Desktop\\手势数字\\"+my_image

image = mping.imread(fileName)

plt.imshow(image)
plt.show()

image = cv2.resize(image, (64,64))

my_image = image.reshape((1, 64*64*3)).T
my_image_prediction = predict(my_image, parameters)

print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)))

Your algorithm predicts: y = 3

Your algorithm predicts: y = 3

Your algorithm predicts: y = 1

正确率很低啊!!


转载请注明来源,欢迎对文章中的引用来源进行考证,欢迎指出任何有错误或不够清晰的表达。可以在下面评论区评论,也可以邮件至 2470290795@qq.com

文章标题:编程作业——tensorflow入门、手势数字识别

文章字数:1.2k

本文作者:runze

发布时间:2020-02-21, 11:43:06

最后更新:2020-02-23, 08:30:17

原始链接:http://yoursite.com/2020/02/21/%E5%90%B4%E6%81%A9%E8%BE%BE%20%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/02%E6%94%B9%E5%96%84%E6%B7%B1%E5%B1%82%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%EF%BC%9A%E8%B6%85%E5%8F%82%E6%95%B0%E8%B0%83%E8%AF%95%E3%80%81%E6%AD%A3%E5%88%99%E5%8C%96%E4%BB%A5%E5%8F%8A%E4%BC%98%E5%8C%96/%E7%BC%96%E7%A8%8B%E4%BD%9C%E4%B8%9A%E2%80%94%E2%80%94tensorflow%E5%85%A5%E9%97%A8%E3%80%81%E6%89%8B%E5%8A%BF%E6%95%B0%E5%AD%97%E8%AF%86%E5%88%AB/

版权声明: "署名-非商用-相同方式共享 4.0" 转载请保留原文链接及作者。

目录
×

喜欢就点赞,疼爱就打赏