NN
神经网络就是模拟人脑中神经元的工作方式,我们主要输入层、隐藏层、输出层。我们通过我们对应的参数进行拟合数据传入隐藏层,再进行激活,一层层传递直至传至最后一层。这也就是我们的前向传播。
BP
神经网络很好理解,但是相对应的,我们也需要对他的参数进行拟合,假设我们用来处理的多分类问题,我们还需要对分类进行一个预处理
one-hot编码
为了训练神经网络,我们需要对y做变换,这个变换为one-hot编码。每个样本中只有1位处于状态1,其它的都是0.
costfunction
谈到拟合参数,我们就离不开损失函数
这个损失函数我们同样由逻辑回归推广而来。在神经网络中,我们通常是需要加上正则化这一项的,。
以一个三层的神经网络为例,它的带有正则化项的损失函数如下:
梯度下降(BP)
有了损失函数,我们自然要去进行梯度下降算法,不过在神经网络中我们采用的是BP。
random initialization
初始化θ系数矩阵,值应该在[-0.1,0.1]这样小的区间中随机生成。
在初始化结束后,我们使用初始化的θ进行前向传播,并保留各层的数据,最后我们通过类似前向传播的方法,进行梯度下降,逐层反向传播,计算他的梯度向量,最后运用优化函数进行求解。
这里以三层的神经网络做一个推导,要注意在BP进行时,我们的bias项是不参与进来的
例子
这里的例子依然对数字0-9的多分类问题,在这里的神经网络只建立了三层。
eg:
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def one_hot_encoder(raw_y):
result = []
for i in raw_y:
y_temp = np.zeros(10)
y_temp[i - 1] = 1
result.append(y_temp)
return np.array(result)
def serialize(a, b):
return np.append(a.flatten(), b.flatten())
def deserialize(theta_serialize):
theta1 = theta_serialize[:25*401].reshape(25, 401)
theta2 = theta_serialize[25*401:].reshape(10, 26)
return theta1, theta2
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def feed_forward(theta_serialize, x):
theta1, theta2 = deserialize(theta_serialize)
a1 = x
z2 = a1 @ theta1.T
a2 = sigmoid(z2)
a2 = np.insert(a2, 0, values=1, axis=1) # bias
z3 = a2 @ theta2.T
h = sigmoid(z3)
return a1, z2, a2, z3, h
def cost(theta_serialize, x, y):
a1, z2, a2, z3, h = feed_forward(theta_serialize, x)
j = -np.sum(y*np.log(h)+(1-y)*np.log(1-h)) / len(x)
return j
def reg_cost(theta_serialize, x, y, lamda):
theta1, theta2 = deserialize(theta_serialize)
sum1 = np.sum(np.power(theta1[:, 1:], 2))
sum2 = np.sum(np.power(theta2[:, 1:], 2))
reg = (sum1 + sum2) * lamda / (2*len(x))
return reg + cost(theta_serialize, x, y)
def sigmoid_gradient(z):
return sigmoid(z)*(1-sigmoid(z))
def gradient(theta_serialize, x, y):
theta1, theta2 = deserialize(theta_serialize)
a1, z2, a2, z3, h = feed_forward(theta_serialize, x)
d3 = h - y
d2 = d3 @ theta2[:, 1:] * sigmoid_gradient(z2)
D2 = (d3.T @ a2) / len(x)
D1 = (d2.T @ a1) / len(x)
return serialize(D1, D2)
def reg_gradient(theta_serialize, x, y, lamda):
D = gradient(theta_serialize, x, y)
D1, D2 = deserialize(D)
theta1, theta2 = deserialize(theta_serialize)
D1[:, 1:] = D1[:, 1:] + theta1[:, 1:] * lamda / len(x)
D2[:, 1:] = D2[:, 1:] + theta2[:, 1:] * lamda / len(x)
return serialize(D1, D2)
def nn_training(x, y, lamda):
init_theta = np.random.uniform(-0.5, 0.5, 10285)
res = minimize(fun=reg_cost,
x0=init_theta,
args=(x, y, lamda),
method='TNC',
jac=reg_gradient,
options={
'maxiter': 300})
return res
def plot_hidden_layer(theta):
theta1, _ = deserialize(theta)
hidden_layer = theta1[:, 1:] # 25,400
fig, ax = plt.subplots(ncols=5, nrows=5, figsize=(8, 8), sharex=True, sharey=True)
for r in range(5):
for c in range(5):
ax[r, c].imshow(hidden_layer[5 * r + c].reshape(20, 20).T, cmap='gray_r')
plt.xticks([])
plt.yticks([])
plt.savefig("hiden.png")
plt.show()
def main():
data = sio.loadmat('C:\\Users\\CSC\\Desktop\\hash\\ML_NG\\04-neural network(bp)\\ex4data1.mat')
raw_x = data['X']
raw_y = data['y']
x = np.insert(raw_x, 0, values=1, axis=1)
y = one_hot_encoder(raw_y)
lamda = 3
res = nn_training(x, y, lamda)
raw_y = data['y'].reshape(5000, )
_, _, _, _, h = feed_forward(res.x, x)
y_pred = np.argmax(h, axis=1) + 1
accuracy = np.mean(y_pred == raw_y)
print(accuracy)
plot_hidden_layer(res.x)
if __name__ == '__main__':
main()
转载:https://blog.csdn.net/weixin_45720246/article/details/116610620
查看评论