1
1
import numpy as np
2
- # import cupy as cp
2
+ from numba . experimental import jitclass
3
3
from numba import njit , types , typed , prange
4
4
import z_helper as h
5
5
import time
6
6
7
+ from numba .core .errors import NumbaTypeSafetyWarning
8
+ import warnings
9
+
10
+ warnings .simplefilter ('ignore' , category = NumbaTypeSafetyWarning )
11
+
12
+ spec = [
13
+ ("layer_sizes" , types .ListType (types .int64 )),
14
+ ("layer_activations" , types .ListType (types .FunctionType (types .float64 [:, ::1 ](types .float64 [:, ::1 ], types .boolean )))),
15
+ ("weights" , types .ListType (types .float64 [:, ::1 ])),
16
+ ("biases" , types .ListType (types .float64 [:, ::1 ])),
17
+ ("layer_outputs" , types .ListType (types .float64 [:, ::1 ])),
18
+ ("learning_rate" , types .float64 ),
19
+ ]
20
+ @jitclass (spec )
21
+ class NeuralNetwork :
22
+ def __init__ (self , layer_sizes , layer_activations , weights , biases , layer_outputs , learning_rate ):
23
+ self .layer_sizes = layer_sizes
24
+ self .layer_activations = layer_activations
25
+ self .weights = weights
26
+ self .biases = biases
27
+ self .layer_outputs = layer_outputs
28
+ self .learning_rate = learning_rate
29
+
7
30
8
31
def make_neural_network (layer_sizes , layer_activations , learning_rate = 0.05 , low = - 2 , high = 2 ):
32
+ for size in layer_sizes :
33
+ assert size > 0
9
34
10
35
# Initialize typed layer sizes list.
11
36
typed_layer_sizes = typed .List ()
@@ -14,10 +39,10 @@ def make_neural_network(layer_sizes, layer_activations, learning_rate=0.05, low=
14
39
# print(typeof(typed_layer_sizes))
15
40
16
41
# Initialie typed layer activation method strings list.
17
- typed_layer_activations = typed .List ()
42
+ prototype = types .FunctionType (types .float64 [:, ::1 ](types .float64 [:, ::1 ], types .boolean ))
43
+ typed_layer_activations = typed .List .empty_list (prototype )
18
44
for activation in layer_activations :
19
45
typed_layer_activations .append (activation )
20
- # print(typeof(typed_layer_activations))
21
46
22
47
# Initialize weights between every neuron in all adjacent layers.
23
48
typed_weights = typed .List ()
@@ -38,49 +63,42 @@ def make_neural_network(layer_sizes, layer_activations, learning_rate=0.05, low=
38
63
# print(typeof(typed_layer_outputs))
39
64
40
65
typed_learning_rate = learning_rate
41
- return (typed_layer_sizes , typed_layer_activations , typed_weights , typed_biases , typed_layer_outputs , typed_learning_rate )
42
-
43
- # typed_layer_sizes = 0
44
- # typed_layer_activations = 1
45
- # typed_weights = 2
46
- # typed_biases = 3
47
- # typed_layer_outputs = 4
48
- # typed_learning_rate = 5
66
+ return NeuralNetwork (typed_layer_sizes , typed_layer_activations , typed_weights , typed_biases , typed_layer_outputs , typed_learning_rate )
49
67
50
68
51
69
@njit
52
70
def calculate_output (input_data , nn ):
53
- assert len (input_data ) == nn [ 0 ] [0 ]
71
+ assert len (input_data ) == nn . layer_sizes [0 ]
54
72
y = input_data
55
- for i in prange (len (nn [ 2 ] )):
56
- y = h . activation (np .dot (nn [ 2 ][ i ].T , y ) + nn [ 3 ][ i ], nn [ 1 ] [i ], False )
73
+ for i in prange (len (nn . weights )):
74
+ y = nn . layer_activations [ i ] (np .dot (nn . weights [ i ].T , y ) + nn . biases [i ], False )
57
75
return y
58
76
59
77
60
78
@njit
61
79
def feed_forward_layers (input_data , nn ):
62
- assert len (input_data ) == nn [ 0 ] [0 ]
63
- nn [ 4 ] [0 ] = input_data
64
- for i in range (len (nn [ 2 ] )):
65
- nn [ 4 ][ i + 1 ] = h . activation (np .dot (nn [ 2 ][ i ].T , nn [ 4 ][ i ]) + nn [ 3 ][ i ], nn [ 1 ] [i ], False )
80
+ assert len (input_data ) == nn . layer_sizes [0 ]
81
+ nn . layer_outputs [0 ] = input_data
82
+ for i in prange (len (nn . weights )):
83
+ nn . layer_outputs [ i + 1 ] = nn . layer_activations [ i ] (np .dot (nn . weights [ i ].T , nn . layer_outputs [ i ]) + nn . biases [i ], False )
66
84
67
85
68
86
@njit
69
87
def train_single (input_data , desired_output_data , nn ):
70
- assert len (input_data ) == nn [ 0 ] [0 ]
71
- assert len (desired_output_data ) == nn [ 0 ] [- 1 ]
88
+ assert len (input_data ) == nn . layer_sizes [0 ]
89
+ assert len (desired_output_data ) == nn . layer_sizes [- 1 ]
72
90
feed_forward_layers (input_data , nn )
73
91
74
- error = (desired_output_data - nn [ 4 ][ - 1 ]) * h . activation ( nn [ 4 ][ - 1 ], nn [ 1 ] [- 1 ], True )
75
- nn [ 2 ][ - 1 ] += ( nn [ 5 ] * nn [ 4 ][ - 2 ] * error .T )
76
- nn [ 3 ][ - 1 ] += nn [ 5 ] * error
92
+ error = (desired_output_data - nn . layer_outputs [ - 1 ]) * nn . layer_activations [ - 1 ]( nn . layer_outputs [- 1 ], True )
93
+ nn . weights [ - 1 ] += nn . learning_rate * nn . layer_outputs [ - 2 ] * error .T
94
+ nn . biases [ - 1 ] += nn . learning_rate * error
77
95
78
- length_weights = len (nn [ 2 ] )
79
- for i in range (1 , length_weights ):
96
+ length_weights = len (nn . weights )
97
+ for i in prange (1 , length_weights ):
80
98
i = length_weights - i - 1
81
- error = np .dot (nn [ 2 ][ i + 1 ], error ) * h . activation ( nn [ 4 ][ i + 1 ], nn [ 1 ][ i ], True )
82
- nn [ 2 ][ i ] += ( nn [ 5 ] * nn [ 4 ][ i ] * error .T )
83
- nn [ 3 ][ i ] += nn [ 5 ] * error
99
+ error = np .dot (nn . weights [ i + 1 ], error ) * nn . layer_activations [ i ]( nn . layer_outputs [ i + 1 ], True )
100
+ nn . weights [ i ] += nn . learning_rate * nn . layer_outputs [ i ] * error .T
101
+ nn . biases [ i ] += nn . learning_rate * error
84
102
return nn
85
103
86
104
@@ -135,5 +153,5 @@ def evaluate(input_data, desired_output_data, nn):
135
153
136
154
@njit
137
155
def print_weights_and_biases (nn ):
138
- print (nn [ 2 ] )
139
- print (nn [ 3 ] )
156
+ print (nn . weights )
157
+ print (nn . biases )
0 commit comments