三层神经网络
# python notebook for Make Your Own Neural Network
# code for 3-layer neural network, and code for learning the MNIST dataset
# 20190603
import numpy as np
import matplotlib.pyplot as plt
# scipy.special for the sigmoid function expit()
import scipy.special as special
# ensure the plots are inside this notebook, not an external window
%matplotlib inline
# neural network class definition
class neuralNetwork(object):
# initialise the neural network
def __init__(self, inputNodes, hiddenNodes, outputNodes, learningRate=0.5):
# set number of nodes in each input, hidden, output layer
self.iNodes = inputNodes
self.hNodes = hiddenNodes
self.oNodes = outputNodes
# link weight matrices, wih and who
# weights inside the arrays are w_i_j, where link is from node i to node j in
# the next layer
# w11 w21
# w12 w22 etc
# pow(x, y), 返回x的y次方
self.wih = np.random.normal(0.0, pow(self.hNodes, -0.5), (self.hNodes, self.iNodes))
self.who = np.random.normal(0.0, pow(self.oNodes, -0.5), (self.oNodes, self.hNodes))
# learning rate
self.lr = learningRate
# activation function is the sigmoid function
# lambda x: special.expit(x) 表示接受x,返回special.expit(x)函数
self.activation_function = lambda x: special.expit(x)
pass
# train the neural network
def train(self, inputs_list, targets_list):
# convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = np.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_outputs)
# calculate signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
# error is the (targets - final_outputs)
output_errors = (targets - final_outputs)
# hidden layer error is the output_errors, split by weights, recombined at
# hidden nodes
hidden_errors = np.dot(self.who.T, output_errors)
# update the weights for the links between the hidden and output layers
# np.transpose(a) 表示矩阵a的转秩
self.who += self.lr * np.dot(output_errors * final_outputs *
(1.0 - final_outputs), np.transpose(hidden_outputs))
# update the weights for the links between the input and hidden layers
self.wih += self.lr * np.dot(hidden_errors * hidden_outputs *
(1.0 - hidden_outputs), np.transpose(inputs))
pass
# query the neural network
def query(self, inputs_list):
# convert inputs list to 2d array
# ndmin=2 表示指定最小维数为2
# .T 表示矩阵的转秩
inputs = np.array(inputs_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = np.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
# number of input, hidden and output nodes
input_nodes = 784
hidden_nodes = 100
output_nodes = 10
# learning rate is 0.3
learning_rate = 0.1
# creat instance of neural network
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
# load the mnist training data CSV file into a list
training_data_file = open("mnist_dataset/mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# train the neural network
# go through all recordes in the training data set
final_training_data = []
for record in training_data_list:
# split the record by the ',' commas
all_values = record.split(',')
# scale and shift the inputs
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# create the target output values (all 0.01, except the desired label which is 0.99)
targets = np.zeros(output_nodes) + 0.01
# all_values[0] is the target label for this record
targets[int(all_values[0])] = 0.99
# train Model
final_training_data.append([inputs, targets])
pass
# train Model
for i in range(len(final_training_data)):
n.train(final_training_data[i][0], final_training_data[i][1])
# load the minist test data CSV file into a list
test_data_file = open("mnist_dataset/mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# test the neural network
# scorecard for how well the network performs, initially empty
scorecard = []
# go through all the records in the test data set
for record in test_data_list:
# split the record by the ',' commas
all_values = record.split(',')
# correct answer is the first value
correct_label = int(all_values[0])
# print("Correct label : ", correct_label)
# scale and shift the inputs
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# query the network
outputs = n.query(inputs)
# the index of the highest value corresponds to the label
label = np.argmax(outputs)
# print("NeuralNetwork's answer : ", label)
# append correct or incorrect to list
if(label == correct_label):
# network's answer matches correct answer, add 1 to scorecard
scorecard.append(1)
else:
#network's answer doesn't match correct answer, add 0 to scorecard
scorecard.append(0)
pass
pass
# calculate the performance score, the fraction of correct answers
scorecard_array = np.asarray(scorecard)
print("Performance : ", scorecard_array.sum() / scorecard_array.size)
四层神经网络
# python notebook for Make Your Own Neural Network
# code for 4-layer neural network, and code for learning the MNIST dataset
# 20190604
import numpy as np
import matplotlib.pyplot as plt
# scipy.special for the sigmoid function expit()
import scipy.special as special
# ensure the plots are inside this notebook, not an external window
%matplotlib inline
# neural network class definition
class neuralNetwork(object):
# initialise the neural network
def __init__(self, inputNodes, hiddenNodes, hiddenNodes_2, outputNodes, learningRate=0.5):
# set number of nodes in each input, hidden, output layer
self.iNodes = inputNodes
self.hNodes = hiddenNodes
self.hNodes_2 = hiddenNodes_2
self.oNodes = outputNodes
# link weight matrices, wih and who
# weights inside the arrays are w_i_j, where link is from node i to node j in
# the next layer
# w11 w21
# w12 w22 etc
# pow(x, y), 返回x的y次方
self.wih = np.random.normal(0.0, pow(self.hNodes, -0.5), (self.hNodes, self.iNodes))
self.whh = np.random.normal(0.0, pow(int((self.hNodes + self.hNodes_2) / 2) , -0.5), (self.hNodes_2, self.hNodes))
self.who = np.random.normal(0.0, pow(self.oNodes, -0.5), (self.oNodes, self.hNodes_2))
# learning rate
self.lr = learningRate
# activation function is the sigmoid function
# lambda x: special.expit(x) 表示接受x,返回special.expit(x)函数
self.activation_function = lambda x: special.expit(x)
pass
# train the neural network
def train(self, inputs_list, targets_list):
# convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = np.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into hidden_2 layer
hidden_2_inputs = np.dot(self.whh, hidden_outputs)
# calculate the signals emerging from hedden_2 layer
hidden_2_outputs = self.activation_function(hidden_2_inputs)
# calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_2_outputs)
# calculate signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
# error is the (targets - final_outputs)
output_errors = (targets - final_outputs)
# hidden layer error is the output_errors, split by weights, recombined at
# hidden nodes
hidden_2_errors = np.dot(self.who.T, output_errors)
# hidden_2 layer error is
hidden_errors = np.dot(self.whh.T, hidden_2_errors)
# update the weights for the links between the hidden and output layers
# np.transpose(a) 表示矩阵a的转秩
self.who += self.lr * np.dot(output_errors * final_outputs *
(1.0 - final_outputs), np.transpose(hidden_2_outputs))
# update the weights for the links between the hideen and hidden_2 layers
self.whh += self.lr * np.dot(hidden_2_errors * hidden_2_outputs *
(1.0 - hidden_2_outputs), np.transpose(hidden_outputs))
# update the weights for the links between the input and hidden layers
self.wih += self.lr * np.dot(hidden_errors * hidden_outputs *
(1.0 - hidden_outputs), np.transpose(inputs))
pass
# query the neural network
def query(self, inputs_list):
# convert inputs list to 2d array
# ndmin=2 表示指定最小维数为2
# .T 表示矩阵的转秩
inputs = np.array(inputs_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = np.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into hidden_2 layer
hidden_2_inputs = np.dot(self.whh, hidden_outputs)
# calculate the signals emerging from hidden_2 layer
hidden_2_outputs = self.activation_function(hidden_2_inputs)
# calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_2_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
# number of input, hidden and output nodes
input_nodes = 784
hidden_nodes = 100
hidden_2_nodes = 100
output_nodes = 10
# learning rate is 0.3
learning_rate = 0.1
# creat instance of neural network
n = neuralNetwork(input_nodes, hidden_nodes, hidden_2_nodes, output_nodes, learning_rate)
# load the mnist training data CSV file into a list
training_data_file = open("mnist_dataset/mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# train the neural network
# go through all recordes in the training data set
final_training_data = []
for record in training_data_list:
# split the record by the ',' commas
all_values = record.split(',')
# scale and shift the inputs
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# create the target output values (all 0.01, except the desired label which is 0.99)
targets = np.zeros(output_nodes) + 0.01
# all_values[0] is the target label for this record
targets[int(all_values[0])] = 0.99
# train Model
final_training_data.append([inputs, targets])
pass
# load the mnist training data CSV file into a list
training_data_file = open("mnist_dataset/mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# train the neural network
# go through all recordes in the training data set
final_training_data = []
for record in training_data_list:
# split the record by the ',' commas
all_values = record.split(',')
# scale and shift the inputs
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# create the target output values (all 0.01, except the desired label which is 0.99)
targets = np.zeros(output_nodes) + 0.01
# all_values[0] is the target label for this record
targets[int(all_values[0])] = 0.99
# train Model
final_training_data.append([inputs, targets])
pass
# train Model
for i in range(len(final_training_data)):
n.train(final_training_data[i][0], final_training_data[i][1])
# load the minist test data CSV file into a list
test_data_file = open("mnist_dataset/mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# test the neural network
# scorecard for how well the network performs, initially empty
scorecard = []
# go through all the records in the test data set
for record in test_data_list:
# split the record by the ',' commas
all_values = record.split(',')
# correct answer is the first value
correct_label = int(all_values[0])
# print("Correct label : ", correct_label)
# scale and shift the inputs
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# query the network
outputs = n.query(inputs)
# the index of the highest value corresponds to the label
label = np.argmax(outputs)
# print("NeuralNetwork's answer : ", label)
# append correct or incorrect to list
if(label == correct_label):
# network's answer matches correct answer, add 1 to scorecard
scorecard.append(1)
else:
#network's answer doesn't match correct answer, add 0 to scorecard
scorecard.append(0)
pass
pass
# calculate the performance score, the fraction of correct answers
scorecard_array = np.asarray(scorecard)
print("Performance : ", scorecard_array.sum() / scorecard_array.size)