If you want more of my pastes visit: https://randompaste.000webhostapp.com/index.html
--------------------------------------------------------------------------------------
view my last post at: https://bitbin.it/rQfL5wi4/
--------------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from random import shuffle
"""
File for implementation
of neural network from scratch
"""
def create_data():
x = np.linspace(0,1,100)
y = 4 * x + 109
zipped_dat = zip(x, y)
return zipped_dat
class model:
"""
+---------+
| |
| |
| |
| |
| |
+--------+ | | +-------+
| 1x1 | | + |Output |
| Input +------> | Hidden +------> |1x1 |
+--------+ W1 | | W2 +-------+
1x10 | | 10x1
| |
| |
| |
| |
| |
| |
+---------+
"""
def __init__(self, num_hidden = 10, learning_rate = 0.01, batch_size = 10):
self.W1 = np.random.randn(1, 10)
self.bias1 = np.zeros([10, 1])
self.W2 = np.random.randn(10, 1)
self.bias2 = np.zeros([1, 1])
self.learning_rate = learning_rate
self.batch_size = batch_size
self.update2 = np.empty([10 , 1]) # Parameter gradients for w2
self.update1 = np.empty([1, 10]) # Parameter Gradients for w1
self.updatebias2 = np.empty([1, 1]) # Parameter for bias2
self.updatebias1 = np.empty([10, 1]) # Parameter for bias1
def fwd_pass(self, data, value):
"""
This function is used for doing a
fwd pass over the NN. This function
also call calc_gradients for each data
point
Parameters:
data: int, double
Data Point (x)
value: int, double
corresponding y values of the data
"""
data = np.array(data).reshape(1, 1)
value = np.array(value).reshape(1, 1)
data = data.reshape(data.shape[0], 1)
value = value.reshape(data.shape[0], 1)
temp = np.add(np.dot(self.W1.T, data), self.bias1)
predicted = np.dot(self.W2.T, temp) + self.bias2
error = predicted - value
self.calc_gradients(data, error, temp)
return predicted
def calc_gradients(self, data, error, temp):
"""
This function stores the gradient information
calculated over the batch.
Parameters:
data: int, double
"""
self.update2 = np.hstack([self.update2, np.multiply((self.learning_rate * error), temp)])
self.updatebias2 = np.hstack([self.updatebias2, self.learning_rate * error * 1])
self.update1 = np.vstack([self.update1, self.learning_rate * error * np.dot(self.W2, data).T])
self.updatebias1 = np.hstack([self.updatebias1, self.learning_rate * error * self.W2 * 1])
def update_params(self):
self.W2 -= np.sum(self.update2, axis = 1).reshape([10, 1])
self.W1 -= np.sum(self.update1, axis = 0).reshape([1, 10])
self.bias2 -= np.sum(self.updatebias2, axis = 1).reshape([1, 1])
self.bias1 -= np.sum(self.updatebias1, axis = 1).reshape([10, 1])
def loss(self, data, predicted_value, value):
self.update_params()
print np.sum(predicted_value - value, axis = 0)
return 0.5 * np.sum(np.power(predicted_value - value, 2), axis = 1)
def main():
# create the model
m1 = model()
data = create_data()
predicted_list = []
# divid the data into batches of size 10. You should shuffle before.
shuffle(data)
data, value = zip(*data)
for epoch in range(0, 1):
for i in range(1, len(data)):
predicted_list.append(m1.fwd_pass(np.array(data[i]), np.array(value[i])))
if i%10==0:
predicted = np.array(predicted_list).reshape(10,1)
m1.loss(np.array(data[i:i+10]), predicted, np.array(value[i:i+10]).reshape(10,1))
predicted_list = []
if __name__ == "__main__":
main()