from functions import *
from gradient import numerical_gradient
# 构造的是两层神经网络
class TwoLayerNet:
# 初始化方法
def __init__(self, input_size, hidden_size, output_size,
weight_init_std=0.01):
# 初始化权重
self.params = {}
# 权重的初始值,使用标准正态分布生成, 再缩小100倍
self.params[\'W1\'] = weight_init_std * np.random.randn(input_size, hidden_size)
# 偏置的初始值设置为0
self.params[\'b1\'] = np.zeros(hidden_size)
self.params[\'W2\'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params[\'b2\'] = np.zeros(output_size)
def predict(self, x):
# 信号向前传播过程
W1, W2 = self.params[\'W1\'], self.params[\'W2\']
b1, b2 = self.params[\'b1\'], self.params[\'b2\']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
return y
# x:输入数据, t:监督数据
def loss(self, x, t):
# 计算损失函数
y = self.predict(x)
# 交叉熵误差计算损失函数的值
return cross_entropy_error(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# x:输入数据, t:监督数据
def numerical_gradient(self, x, t):
# 梯度计算
loss_W = lambda W: self.loss(x, t)
grads = {}
grads[\'W1\'] = numerical_gradient(loss_W, self.params[\'W1\'])
grads[\'b1\'] = numerical_gradient(loss_W, self.params[\'b1\'])
grads[\'W2\'] = numerical_gradient(loss_W, self.params[\'W2\'])
grads[\'b2\'] = numerical_gradient(loss_W, self.params[\'b2\'])
return grads
def gradient(self, x, t):
# 反向传播法 ,backward
W1, W2 = self.params[\'W1\'], self.params[\'W2\']
b1, b2 = self.params[\'b1\'], self.params[\'b2\']
grads = {}
batch_num = x.shape[0]
# forward
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
# backward
dy = (y - t) / batch_num
grads[\'W2\'] = np.dot(z1.T, dy)
grads[\'b2\'] = np.sum(dy, axis=0)
da1 = np.dot(dy, W2.T)
dz1 = sigmoid_grad(a1) * da1
grads[\'W1\'] = np.dot(x.T, dz1)
grads[\'b1\'] = np.sum(dz1, axis=0)
return grads


雷达卡


京公网安备 11010802022788号







