4
4
5
5
6
6
class NeuralNetwork :
7
-
8
7
def __init__ (self , layer_sizes , layer_activations , learning_rate = 0.1 , low = - 2 , high = 2 ):
9
8
assert len (layer_sizes ) >= 2
10
9
assert len (layer_sizes )- 1 == len (layer_activations )
11
10
12
11
# Initialize weights between every neuron in all adjacent layers.
13
- self .weights = [np .random .uniform (low , high , (layer_sizes [i - 1 ], layer_sizes [i ])) for i in range (1 , len (layer_sizes ))]
12
+ self .weights = np . array ( [np .random .uniform (low , high , (layer_sizes [i - 1 ], layer_sizes [i ])) for i in range (1 , len (layer_sizes ))])
14
13
# Initialize biases for every neuron in all layers
14
+
15
15
self .biases = np .array ([np .random .uniform (low , high , (layer_sizes [i ], 1 )) for i in range (1 , len (layer_sizes ))])
16
16
# Initialize empty list of output of every neuron in all layers.
17
- self .layer_outputs = [np .zeros ((layer_sizes [i ], 1 )) for i in range (len (layer_sizes ))]
17
+ self .layer_outputs = np . array ( [np .zeros ((layer_sizes [i ], 1 )) for i in range (len (layer_sizes ))])
18
18
19
19
self .layer_activations = layer_activations
20
20
self .layer_sizes = layer_sizes
21
21
self .learning_rate = learning_rate
22
22
23
23
def calculate_output (self , input_data ):
24
24
assert len (input_data ) == self .layer_sizes [0 ]
25
- num_calculations = len (self .weights )
26
25
27
26
y = input_data
28
27
self .layer_outputs [0 ] = y
29
28
30
- for i in range (num_calculations ):
29
+ for i in range (self . weights . shape [ 0 ] ):
31
30
y = self .layer_activations [i ](np .dot (self .weights [i ].T , y ) + self .biases [i ], False )
32
31
self .layer_outputs [i + 1 ] = y
33
-
34
32
return y
35
33
36
34
def train (self , input_data , desired_output_data ):
37
- assert len ( input_data ) == self .layer_sizes [0 ]
38
- assert len ( desired_output_data ) == self .layer_sizes [- 1 ]
35
+ assert input_data . shape [ 0 ] == self .layer_sizes [0 ]
36
+ assert desired_output_data . shape [ 0 ] == self .layer_sizes [- 1 ]
39
37
self .calculate_output (input_data )
40
38
41
39
error = (desired_output_data - self .layer_outputs [- 1 ]) * self .layer_activations [- 1 ](self .layer_outputs [- 1 ], True )
42
40
self .weights [- 1 ] += (self .learning_rate * self .layer_outputs [- 2 ] * error .T )
43
41
self .biases [- 1 ] += self .learning_rate * error
44
42
45
- for i in reversed (range (len ( self .weights ) - 1 )):
43
+ for i in reversed (range (self .weights . shape [ 0 ] - 1 )):
46
44
error = np .dot (self .weights [i + 1 ], error ) * self .layer_activations [i ](self .layer_outputs [i + 1 ], True )
47
45
self .weights [i ] += (self .learning_rate * self .layer_outputs [i ] * error .T )
48
46
self .biases [i ] += self .learning_rate * error
49
47
50
48
def calculate_SSE (self , input_data , desired_output_data ):
51
- assert len ( input_data ) == self .layer_sizes [0 ]
52
- assert len ( desired_output_data ) == self .layer_sizes [- 1 ]
49
+ assert input_data . shape [ 0 ] == self .layer_sizes [0 ]
50
+ assert desired_output_data . shape [ 0 ] == self .layer_sizes [- 1 ]
53
51
return np .sum (np .power (desired_output_data - self .calculate_output (input_data ), 2 ))
54
52
53
+ def calculate_MSE (self , input_data , output_data ):
54
+ assert input_data .shape [0 ] == output_data .shape [0 ]
55
+ size = input_data .shape [0 ]
56
+ sum_error = 0
57
+ for i in range (size ):
58
+ sum_error += self .calculate_SSE (input_data [i ], output_data [i ])
59
+ return sum_error / size
60
+
55
61
def print_weights_and_biases (self ):
56
62
print (self .weights )
57
63
print (self .biases )
@@ -67,26 +73,28 @@ def print_weights_and_biases(self):
67
73
data_output = data_output .reshape ((len (data_input ), - 1 , 1 ))
68
74
69
75
for i in range (4 ):
70
- random_seed = np . random . randint ( 10 , 1010 )
76
+ random_seed = 10
71
77
np .random .seed (random_seed )
72
78
73
79
train_input , validate_input , test_input = h .kfold (4 , data_input , random_seed )
74
80
train_output , validate_output , test_output = h .kfold (4 , data_output , random_seed )
75
81
76
82
nn = NeuralNetwork (layer_sizes = [10 , 15 , 7 ], layer_activations = [h .sigmoid , h .sigmoid ])
83
+ # test_mse = nn.calculate_MSE(test_input, test_output)
84
+ # print("TEST MSE:", test_mse)
77
85
78
86
previous_mse = 1
79
87
current_mse = 0
80
88
epochs = 0
81
89
begin_time = time .time_ns ()
82
90
while (current_mse < previous_mse ):
83
91
epochs += 1
84
- previous_mse = h .calculate_MSE (nn , validate_input , validate_output )
92
+ previous_mse = nn .calculate_MSE (validate_input , validate_output )
85
93
for i in range (len (train_input )):
86
94
nn .train (train_input [i ], train_output [i ])
87
- current_mse = h .calculate_MSE (nn , validate_input , validate_output )
95
+ current_mse = nn .calculate_MSE (validate_input , validate_output )
88
96
end_time = time .time_ns ()
89
97
90
- train_mse = h .calculate_MSE (nn , train_input , train_output )
91
- test_mse = h .calculate_MSE (nn , test_input , test_output )
98
+ train_mse = nn .calculate_MSE (train_input , train_output )
99
+ test_mse = nn .calculate_MSE (test_input , test_output )
92
100
print ("Seed:" , random_seed , "Epochs:" , epochs , "Time:" , (end_time - begin_time )/ 1e9 , "Tr:" , train_mse , "V:" , current_mse , "T:" , test_mse )
0 commit comments