Spaces:
Sleeping
Sleeping
ricardo-lsantos
commited on
Commit
•
b7f5a9c
1
Parent(s):
5bda5fb
Fixed minor bugs. Fixed the Ui parameters to be more easy to train.
Browse files
app.py
CHANGED
@@ -1,46 +1,77 @@
|
|
1 |
import streamlit as st
|
|
|
2 |
import json
|
3 |
-
from
|
4 |
-
|
5 |
-
# INPUTS = [[0,0],[0,1],[1,0],[1,1]]
|
6 |
-
# OUTPUTS = [[0],[1],[1],[0]]
|
7 |
-
# EPOCHS = 1000000
|
8 |
-
# ALPHAS = 20
|
9 |
|
10 |
INPUTS = [[0,0],[0,1],[1,0],[1,1]]
|
11 |
OUTPUTS = [[0],[1],[1],[0]]
|
12 |
|
13 |
-
def
|
14 |
-
|
15 |
-
|
16 |
-
print(modelo)
|
17 |
-
# Save model to file
|
18 |
-
# save_model(modelo, "modelo.json")
|
19 |
-
st.download_button(
|
20 |
-
label="Download model",
|
21 |
-
data=json.dumps(modelo),
|
22 |
-
file_name="modelo.json",
|
23 |
-
mime="application/json",
|
24 |
-
)
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
for i in range(4):
|
27 |
-
result = predict(INPUTS[i][0],INPUTS[i][1], activation=sigmoid)
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
30 |
|
|
|
|
|
|
|
31 |
|
32 |
def sidebar():
|
33 |
# Neural network controls
|
34 |
st.sidebar.header('Neural Network Controls')
|
35 |
st.sidebar.text('Number of epochs')
|
36 |
-
epochs = st.sidebar.slider('Epochs',
|
37 |
st.sidebar.text('Learning rate')
|
38 |
alphas = st.sidebar.slider('Alphas', 1, 100, 20)
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
|
|
|
|
|
|
42 |
|
43 |
def app():
|
|
|
44 |
st.title('Simple Neural Network App')
|
45 |
st.write('This is the Neural Network image we are trying to implement!')
|
46 |
st.image('nn.png', width=500)
|
|
|
1 |
import streamlit as st
|
2 |
+
from nn import NeuralNetwork
|
3 |
import json
|
4 |
+
from utils import sigmoid, sigmoid_prime
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
INPUTS = [[0,0],[0,1],[1,0],[1,1]]
|
7 |
OUTPUTS = [[0],[1],[1],[0]]
|
8 |
|
9 |
+
def resetSession():
|
10 |
+
st.session_state.nn = None
|
11 |
+
st.session_state.train_count = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
+
## Controller Function
|
14 |
+
def runNN():
|
15 |
+
nn = st.session_state.nn
|
16 |
+
df = {
|
17 |
+
"input": [],
|
18 |
+
"expected": [],
|
19 |
+
"predicted": [],
|
20 |
+
"rounded": [],
|
21 |
+
"correct": []
|
22 |
+
}
|
23 |
for i in range(4):
|
24 |
+
result = nn.predict(INPUTS[i][0],INPUTS[i][1], activation=sigmoid)
|
25 |
+
df["input"].append(f"{INPUTS[i][0]} xor {INPUTS[i][1]}")
|
26 |
+
df["expected"].append(OUTPUTS[i][0])
|
27 |
+
df["predicted"].append(result)
|
28 |
+
df["rounded"].append(round(result))
|
29 |
+
df["correct"].append('correct' if round(result)==OUTPUTS[i][0] else 'incorrect')
|
30 |
|
31 |
+
st.dataframe(df)
|
32 |
+
# st.write(f"for input `{INPUTS[i][0]} xor {INPUTS[i][1]}` expected `{OUTPUTS[i][0]}` predicted `{result}` which rounds to `{round(result)}` and is `{ 'correct' if round(result)==OUTPUTS[i][0] else 'incorrect' }`")
|
33 |
+
|
34 |
|
35 |
def sidebar():
|
36 |
# Neural network controls
|
37 |
st.sidebar.header('Neural Network Controls')
|
38 |
st.sidebar.text('Number of epochs')
|
39 |
+
epochs = st.sidebar.slider('Epochs', 1, 10000, 500)
|
40 |
st.sidebar.text('Learning rate')
|
41 |
alphas = st.sidebar.slider('Alphas', 1, 100, 20)
|
42 |
+
col1, col2 = st.sidebar.columns(2)
|
43 |
+
if col1.button('New Model'):
|
44 |
+
btnNewModel()
|
45 |
+
|
46 |
+
if col2.button('Reset Model'):
|
47 |
+
resetSession()
|
48 |
+
|
49 |
+
if "nn" in st.session_state and st.session_state.nn is not None:
|
50 |
+
if st.sidebar.button('Train Model'):
|
51 |
+
btnTrainModel(epochs, alphas)
|
52 |
+
if st.sidebar.button('Run Neural Network'):
|
53 |
+
btnRunModel()
|
54 |
+
st.sidebar.download_button(label="Save Model", data=json.dumps(st.session_state.nn.getModelJson()), file_name="model.json", mime="application/json")
|
55 |
+
|
56 |
+
def btnNewModel():
|
57 |
+
resetSession()
|
58 |
+
st.session_state.nn = NeuralNetwork()
|
59 |
+
st.sidebar.text("New model created")
|
60 |
+
|
61 |
+
def btnTrainModel(epochs, alphas):
|
62 |
+
st.session_state.nn.train(inputs=INPUTS, outputs=OUTPUTS, epochs=epochs, alpha=alphas)
|
63 |
+
st.session_state.train_count += 1
|
64 |
+
st.sidebar.text(f"Model trained {st.session_state.train_count} times")
|
65 |
+
|
66 |
+
def btnRunModel():
|
67 |
+
runNN()
|
68 |
|
69 |
+
def btnResetModel():
|
70 |
+
resetSession()
|
71 |
+
st.sidebar.text("Model reset")
|
72 |
|
73 |
def app():
|
74 |
+
# initSession()
|
75 |
st.title('Simple Neural Network App')
|
76 |
st.write('This is the Neural Network image we are trying to implement!')
|
77 |
st.image('nn.png', width=500)
|
nn.py
CHANGED
@@ -1,196 +1,144 @@
|
|
1 |
import random
|
2 |
-
import math
|
3 |
import json
|
|
|
|
|
4 |
|
5 |
-
INPUTS = [[0,0],[0,1],[1,0],[1,1]]
|
6 |
-
OUTPUTS = [[0],[1],[1],[0]]
|
7 |
EPOCHS = 1000000
|
8 |
ALPHAS = 20
|
9 |
|
10 |
-
|
11 |
WEPOCHS = EPOCHS // 100
|
12 |
-
|
13 |
-
|
14 |
VARIANCE_W = 0.5
|
15 |
-
VARIANCE_B =
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
# VARIANCE_W = 0.5
|
36 |
-
# VARIANCE_B = 1
|
37 |
-
# w11 = VARIANCE_W
|
38 |
-
# w21 = VARIANCE_W
|
39 |
-
# b1 = VARIANCE_B
|
40 |
-
|
41 |
-
# w12 = VARIANCE_W
|
42 |
-
# w22 = VARIANCE_W
|
43 |
-
# b2 = VARIANCE_B
|
44 |
-
|
45 |
-
# w13 = VARIANCE_W
|
46 |
-
# w23 = VARIANCE_W
|
47 |
-
# b3 = VARIANCE_B
|
48 |
-
|
49 |
-
# o1 = VARIANCE_W
|
50 |
-
# o2 = VARIANCE_W
|
51 |
-
# o3 = VARIANCE_W
|
52 |
-
# ob = VARIANCE_B
|
53 |
-
|
54 |
-
|
55 |
-
def sigmoid(x):
|
56 |
-
return 1.0 / (1.0 + math.exp(-x))
|
57 |
-
|
58 |
-
|
59 |
-
def sigmoid_prime(x): # x already sigmoided
|
60 |
-
return x * (1 - x)
|
61 |
-
|
62 |
-
def relu(x):
|
63 |
-
return max(0,x)
|
64 |
-
|
65 |
-
def relu_prime(x):
|
66 |
-
return 1 if x>0 else 0
|
67 |
-
|
68 |
-
def tanh(x):
|
69 |
-
return math.tanh(x)
|
70 |
-
|
71 |
-
def tanh_prime(x):
|
72 |
-
return 1 - x**2
|
73 |
-
|
74 |
-
def softmax(x):
|
75 |
-
return math.exp(x) / (math.exp(x) + 1)
|
76 |
-
|
77 |
-
def softmax_prime(x):
|
78 |
-
return x * (1 - x)
|
79 |
-
|
80 |
-
def predict(i1, i2, activation=sigmoid):
|
81 |
-
s1 = w11 * i1 + w21 * i2 + b1
|
82 |
-
# s1 = sigmoid(s1)
|
83 |
-
s1 = activation(s1)
|
84 |
-
s2 = w12 * i1 + w22 * i2 + b2
|
85 |
-
# s2 = sigmoid(s2)
|
86 |
-
s2 = activation(s2)
|
87 |
-
s3 = w13 * i1 + w23 * i2 + b3
|
88 |
-
# s3 = sigmoid(s3)
|
89 |
-
s3 = activation(s3)
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
|
98 |
-
def learn(i1,i2,target, activation, activation_prime, alpha=0.2):
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
s3 = activation(s3)
|
111 |
-
|
112 |
-
output = s1 * o1 + s2 * o2 + s3 * o3 + ob
|
113 |
-
# output = sigmoid(output)
|
114 |
-
output = activation(output)
|
115 |
-
|
116 |
-
error = target - output
|
117 |
-
# derror = error * sigmoid_prime(output)
|
118 |
-
derror = error * activation_prime(output)
|
119 |
-
|
120 |
-
# ds1 = derror * o1 * sigmoid_prime(s1)
|
121 |
-
ds1 = derror * o1 * activation_prime(s1)
|
122 |
-
# ds2 = derror * o2 * sigmoid_prime(s2)
|
123 |
-
ds2 = derror * o2 * activation_prime(s2)
|
124 |
-
# ds3 = derror * o3 * sigmoid_prime(s3)
|
125 |
-
ds3 = derror * o3 * activation_prime(s3)
|
126 |
-
|
127 |
-
o1 += alpha * s1 * derror
|
128 |
-
o2 += alpha * s2 * derror
|
129 |
-
o3 += alpha * s3 * derror
|
130 |
-
ob += alpha * derror
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
def train(epochs=EPOCHS, alpha=ALPHAS):
|
144 |
-
modelo = None
|
145 |
-
for epoch in range(1,epochs+1):
|
146 |
-
indexes = [0,1,2,3]
|
147 |
-
random.shuffle(indexes)
|
148 |
-
for j in indexes:
|
149 |
-
learn(INPUTS[j][0],INPUTS[j][1],OUTPUTS[j][0], activation=sigmoid, activation_prime=sigmoid_prime, alpha=alpha)
|
150 |
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
|
159 |
-
modelo = {
|
160 |
-
"w11": w11,
|
161 |
-
"w21": w21,
|
162 |
-
"b1": b1,
|
163 |
-
"w12": w12,
|
164 |
-
"w22": w22,
|
165 |
-
"b2": b2,
|
166 |
-
"w13": w13,
|
167 |
-
"w23": w23,
|
168 |
-
"b3": b3,
|
169 |
-
"o1": o1,
|
170 |
-
"o2": o2,
|
171 |
-
"o3": o3,
|
172 |
-
"ob": ob
|
173 |
-
}
|
174 |
-
return modelo
|
175 |
-
|
176 |
-
def save_model(modelo, filename):
|
177 |
-
with open(filename, 'w') as json_file:
|
178 |
-
json.dump(modelo, json_file)
|
179 |
|
180 |
## Main
|
181 |
def main():
|
|
|
|
|
182 |
# Train model
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
|
|
|
|
|
|
|
|
|
|
188 |
|
189 |
for i in range(4):
|
190 |
-
result = predict(INPUTS[i][0],INPUTS[i][1], activation=sigmoid)
|
191 |
print("for input", INPUTS[i], "expected", OUTPUTS[i][0], "predicted", f"{result:4.4}", "which is", "correct" if round(result)==OUTPUTS[i][0] else "incorrect")
|
192 |
-
# print("for input", INPUTS[i], "expected", OUTPUTS[i][0], "predicted", result, "which is", "correct" if round(result)==OUTPUTS[i][0] else "incorrect")
|
193 |
-
|
194 |
|
195 |
if __name__ == "__main__":
|
196 |
main()
|
|
|
1 |
import random
|
|
|
2 |
import json
|
3 |
+
from typing import Any
|
4 |
+
from utils import sigmoid, sigmoid_prime
|
5 |
|
|
|
|
|
6 |
EPOCHS = 1000000
|
7 |
ALPHAS = 20
|
8 |
|
|
|
9 |
WEPOCHS = EPOCHS // 100
|
|
|
|
|
10 |
VARIANCE_W = 0.5
|
11 |
+
VARIANCE_B = 1
|
12 |
+
|
13 |
+
class NeuralNetwork:
|
14 |
+
def __init__(self):
|
15 |
+
self._w11 = random.uniform(-VARIANCE_W,VARIANCE_W)
|
16 |
+
self._w21 = random.uniform(-VARIANCE_W,VARIANCE_W)
|
17 |
+
self._b1 = VARIANCE_B
|
18 |
+
|
19 |
+
self._w12 = random.uniform(-VARIANCE_W,VARIANCE_W)
|
20 |
+
self._w22 = random.uniform(-VARIANCE_W,VARIANCE_W)
|
21 |
+
self._b2 = VARIANCE_B
|
22 |
+
|
23 |
+
self._w13 = random.uniform(-VARIANCE_W,VARIANCE_W)
|
24 |
+
self._w23 = random.uniform(-VARIANCE_W,VARIANCE_W)
|
25 |
+
self._b3 = VARIANCE_B
|
26 |
+
|
27 |
+
self._o1 = random.uniform(-VARIANCE_W,VARIANCE_W)
|
28 |
+
self._o2 = random.uniform(-VARIANCE_W,VARIANCE_W)
|
29 |
+
self._o3 = random.uniform(-VARIANCE_W,VARIANCE_W)
|
30 |
+
self._ob = VARIANCE_B
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
def predict(self, i1, i2, activation=sigmoid):
|
33 |
+
s1 = self._w11 * i1 + self._w21 * i2 + self._b1
|
34 |
+
s1 = activation(s1)
|
35 |
+
s2 = self._w12 * i1 + self._w22 * i2 + self._b2
|
36 |
+
s2 = activation(s2)
|
37 |
+
s3 = self._w13 * i1 + self._w23 * i2 + self._b3
|
38 |
+
s3 = activation(s3)
|
39 |
+
|
40 |
+
output = s1 * self._o1 + s2 * self._o2 + s3 * self._o3 + self._ob
|
41 |
+
output = activation(output)
|
42 |
+
|
43 |
+
return output
|
44 |
|
45 |
|
46 |
+
def learn(self, i1, i2, target, activation=sigmoid, activation_prime=sigmoid_prime, alpha=0.2):
|
47 |
+
s1 = self._w11 * i1 + self._w21 * i2 + self._b1
|
48 |
+
s1 = activation(s1)
|
49 |
+
s2 = self._w12 * i1 + self._w22 * i2 + self._b2
|
50 |
+
s2 = activation(s2)
|
51 |
+
s3 = self._w13 * i1 + self._w23 * i2 + self._b3
|
52 |
+
s3 = activation(s3)
|
53 |
+
|
54 |
+
output = s1 * self._o1 + s2 * self._o2 + s3 * self._o3 + self._ob
|
55 |
+
output = activation(output)
|
56 |
+
|
57 |
+
error = target - output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
+
derror = error * activation_prime(output)
|
60 |
+
ds1 = derror * self._o1 * activation_prime(s1)
|
61 |
+
ds2 = derror * self._o2 * activation_prime(s2)
|
62 |
+
ds3 = derror * self._o3 * activation_prime(s3)
|
63 |
+
|
64 |
+
self._o1 += alpha * s1 * derror
|
65 |
+
self._o2 += alpha * s2 * derror
|
66 |
+
self._o3 += alpha * s3 * derror
|
67 |
+
self._ob += alpha * derror
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
+
self._w11 += alpha * i1 * ds1
|
70 |
+
self._w21 += alpha * i2 * ds1
|
71 |
+
self._b1 += alpha * ds1
|
72 |
+
self._w12 += alpha * i1 * ds2
|
73 |
+
self._w22 += alpha * i2 * ds2
|
74 |
+
self._b2 += alpha * ds2
|
75 |
+
self._w13 += alpha * i1 * ds3
|
76 |
+
self._w23 += alpha * i2 * ds3
|
77 |
+
self._b3 += alpha * ds3
|
78 |
+
|
79 |
+
|
80 |
+
def train(self, inputs=[], outputs=[], epochs=EPOCHS, alpha=ALPHAS):
|
81 |
+
if len(inputs) > 0 and len(outputs) > 0:
|
82 |
+
for epoch in range(1,epochs+1):
|
83 |
+
indexes = [0,1,2,3]
|
84 |
+
random.shuffle(indexes)
|
85 |
+
for j in indexes:
|
86 |
+
self.learn(inputs[j][0],inputs[j][1],outputs[j][0], activation=sigmoid, activation_prime=sigmoid_prime, alpha=alpha)
|
87 |
+
|
88 |
+
# Print cost every 100 epochs for debug
|
89 |
+
if epoch%WEPOCHS == 0:
|
90 |
+
cost = 0
|
91 |
+
for j in range(4):
|
92 |
+
o = self.predict(inputs[j][0],inputs[j][1], activation=sigmoid)
|
93 |
+
cost += (outputs[j][0] - o) ** 2
|
94 |
+
cost /= 4
|
95 |
+
print("epoch", epoch, "mean squared error:", cost)
|
96 |
+
|
97 |
+
def save_model(self, filename):
|
98 |
+
model = self.getModelJson()
|
99 |
+
with open(filename, 'w') as json_file:
|
100 |
+
json.dump(model, json_file)
|
101 |
+
|
102 |
+
def getModelJson(self):
|
103 |
+
return {
|
104 |
+
"w11": self._w11,
|
105 |
+
"w21": self._w21,
|
106 |
+
"b1": self._b1,
|
107 |
+
"w12": self._w12,
|
108 |
+
"w22": self._w22,
|
109 |
+
"b2": self._b2,
|
110 |
+
"w13": self._w13,
|
111 |
+
"w23": self._w23,
|
112 |
+
"b3": self._b3,
|
113 |
+
"o1": self._o1,
|
114 |
+
"o2": self._o2,
|
115 |
+
"o3": self._o3,
|
116 |
+
"ob": self._ob
|
117 |
+
}
|
118 |
+
|
119 |
+
def __str__(self):
|
120 |
+
return self.getModelJson()
|
121 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
## Main
|
124 |
def main():
|
125 |
+
INPUTS = [[0,0],[0,1],[1,0],[1,1]]
|
126 |
+
OUTPUTS = [[0],[1],[1],[0]]
|
127 |
# Train model
|
128 |
+
nn = NeuralNetwork()
|
129 |
+
print("Neural Network created")
|
130 |
+
nn.train(inputs=INPUTS, outputs=OUTPUTS, epochs=EPOCHS, alpha=ALPHAS)
|
131 |
+
print("Model trained")
|
132 |
+
print("Printing Model:")
|
133 |
+
print(nn._getModelJson())
|
134 |
+
|
135 |
+
# Save model to files
|
136 |
+
nn.save_model("model.json")
|
137 |
+
print("Model saved to model.json")
|
138 |
|
139 |
for i in range(4):
|
140 |
+
result = nn.predict(INPUTS[i][0],INPUTS[i][1], activation=sigmoid)
|
141 |
print("for input", INPUTS[i], "expected", OUTPUTS[i][0], "predicted", f"{result:4.4}", "which is", "correct" if round(result)==OUTPUTS[i][0] else "incorrect")
|
|
|
|
|
142 |
|
143 |
if __name__ == "__main__":
|
144 |
main()
|
utils.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
def sigmoid(x):
|
4 |
+
return 1.0 / (1.0 + math.exp(-x))
|
5 |
+
|
6 |
+
def sigmoid_prime(x): # x already sigmoided
|
7 |
+
return x * (1 - x)
|
8 |
+
|
9 |
+
def relu(x):
|
10 |
+
return max(0,x)
|
11 |
+
|
12 |
+
def relu_prime(x):
|
13 |
+
return 1 if x>0 else 0
|
14 |
+
|
15 |
+
def tanh(x):
|
16 |
+
return math.tanh(x)
|
17 |
+
|
18 |
+
def tanh_prime(x):
|
19 |
+
return 1 - x**2
|
20 |
+
|
21 |
+
def softmax(x):
|
22 |
+
return math.exp(x) / (math.exp(x) + 1)
|
23 |
+
|
24 |
+
def softmax_prime(x):
|
25 |
+
return x * (1 - x)
|