File 00_simslope/simslope.c changed (mode: 100644) (index f97d055..062672f) |
1 |
1 |
|
|
2 |
|
/* |
|
3 |
|
** Generates simulated data of two classes. |
|
4 |
|
** Measurements come from 1 axis. |
|
5 |
|
** Class 0 has a generally downard slope, class 1 has a generally |
|
6 |
|
** upward slope. |
|
7 |
|
** The NN must learn weights that correspond to the difference |
|
8 |
|
** in slope (printing out the weights at end makes this clear). |
|
9 |
|
*/ |
|
|
2 |
|
/* |
|
3 |
|
** Generates simulated data of two classes. |
|
4 |
|
** Measurements come from 1 axis. |
|
5 |
|
** Class 0 has a generally downard slope, class 1 has a generally |
|
6 |
|
** upward slope. |
|
7 |
|
** The NN must learn weights that correspond to the difference |
|
8 |
|
** in slope (printing out the weights at end makes this clear). |
|
9 |
|
*/ |
10 |
10 |
|
|
11 |
11 |
#include <stdio.h> |
#include <stdio.h> |
12 |
12 |
#include <stdlib.h> |
#include <stdlib.h> |
|
... |
... |
static int rand_j, rand_k; /* used for normal dist */ |
19 |
19 |
|
|
20 |
20 |
unsigned urand0 (void) |
unsigned urand0 (void) |
21 |
21 |
{ |
{ |
22 |
|
if (--rand_j == 0) rand_j = 55; |
|
23 |
|
if (--rand_k == 0) rand_k = 55; |
|
24 |
|
return rand_x[rand_k] += rand_x[rand_j]; |
|
|
22 |
|
if (--rand_j == 0) rand_j = 55; |
|
23 |
|
if (--rand_k == 0) rand_k = 55; |
|
24 |
|
return rand_x[rand_k] += rand_x[rand_j]; |
25 |
25 |
} |
} |
26 |
26 |
|
|
27 |
27 |
void init_generators(unsigned seed) |
void init_generators(unsigned seed) |
28 |
28 |
{ |
{ |
29 |
|
int i; |
|
30 |
|
|
|
31 |
|
/* inits the normal generator */ |
|
32 |
|
rand_x[1] = 1; |
|
33 |
|
if(seed) |
|
34 |
|
rand_x[2] = seed; |
|
35 |
|
else |
|
36 |
|
rand_x[2] = time (NULL); |
|
37 |
|
for (i=3; i<56; ++i) rand_x[i] = rand_x[i-1] + rand_x[i-2]; |
|
38 |
|
rand_j = 24; |
|
39 |
|
rand_k = 55; |
|
40 |
|
for (i=255; i>=0; --i) |
|
41 |
|
urand0 (); //run loop for a while |
|
42 |
|
for (i=255; i>=0; --i) |
|
43 |
|
rand_y[i] = urand0 (); |
|
44 |
|
rand_z = urand0 (); |
|
45 |
|
|
|
46 |
|
/* inits the uniform generator */ |
|
47 |
|
srand( (unsigned)time( NULL ) ); |
|
|
29 |
|
int i; |
|
30 |
|
|
|
31 |
|
/* inits the normal generator */ |
|
32 |
|
rand_x[1] = 1; |
|
33 |
|
if(seed) rand_x[2] = seed; |
|
34 |
|
else rand_x[2] = time (NULL); |
|
35 |
|
for (i=3; i<56; ++i) rand_x[i] = rand_x[i-1] + rand_x[i-2]; |
|
36 |
|
rand_j = 24; |
|
37 |
|
rand_k = 55; |
|
38 |
|
for (i=255; i>=0; --i) urand0 (); //run loop for a while |
|
39 |
|
for (i=255; i>=0; --i) rand_y[i] = urand0 (); |
|
40 |
|
rand_z = urand0 (); |
|
41 |
|
|
|
42 |
|
/* inits the uniform generator */ |
|
43 |
|
srand( (unsigned)time( NULL ) ); |
|
44 |
|
return; |
48 |
45 |
} |
} |
49 |
46 |
|
|
50 |
47 |
unsigned urand (void) |
unsigned urand (void) |
51 |
48 |
{ |
{ |
52 |
|
int i; |
|
53 |
|
|
|
54 |
|
i = rand_z % 256; |
|
55 |
|
rand_z = rand_y[i]; |
|
56 |
|
if (--rand_j == 0) rand_j = 55; |
|
57 |
|
if (--rand_k == 0) rand_k = 55; |
|
58 |
|
rand_y[i] = rand_x[rand_k] += rand_x[rand_j]; |
|
59 |
|
return rand_z; |
|
|
49 |
|
int i; |
|
50 |
|
|
|
51 |
|
i = rand_z % 256; |
|
52 |
|
rand_z = rand_y[i]; |
|
53 |
|
if (--rand_j == 0) rand_j = 55; |
|
54 |
|
if (--rand_k == 0) rand_k = 55; |
|
55 |
|
rand_y[i] = rand_x[rand_k] += rand_x[rand_j]; |
|
56 |
|
return rand_z; |
60 |
57 |
} |
} |
61 |
58 |
|
|
62 |
59 |
|
|
63 |
60 |
|
|
64 |
61 |
|
|
65 |
|
/* returns a value from a normal dist with mean=0 stddev=1 */ |
|
66 |
|
|
|
|
62 |
|
/* returns a value from a normal dist with mean=0 stddev=1 */ |
67 |
63 |
double normal_rand (void) |
double normal_rand (void) |
68 |
64 |
{ |
{ |
69 |
|
static int flag = 0; |
|
70 |
|
static double z, a = 2147483648.0; |
|
71 |
|
double v1, v2, s; |
|
72 |
|
|
|
73 |
|
if (flag) { |
|
74 |
|
flag = 0; |
|
75 |
|
return z; |
|
76 |
|
} |
|
77 |
|
flag = 1; |
|
78 |
|
do { |
|
79 |
|
v1 = urand()/a - 1; |
|
80 |
|
v2 = urand()/a - 1; |
|
81 |
|
} |
|
82 |
|
while ((s = v1*v1 + v2*v2) > 1.0); |
|
83 |
|
s = sqrt (-2.0 * log(s) / s); |
|
84 |
|
z = v1 * s; |
|
85 |
|
return v2 * s; |
|
|
65 |
|
static int flag = 0; |
|
66 |
|
static double z, a = 2147483648.0; |
|
67 |
|
double v1, v2, s; |
|
68 |
|
|
|
69 |
|
if (flag) { |
|
70 |
|
flag = 0; |
|
71 |
|
return z; |
|
72 |
|
} |
|
73 |
|
flag = 1; |
|
74 |
|
do { |
|
75 |
|
v1 = urand()/a - 1; |
|
76 |
|
v2 = urand()/a - 1; |
|
77 |
|
} |
|
78 |
|
while ((s = v1*v1 + v2*v2) > 1.0); |
|
79 |
|
s = sqrt (-2.0 * log(s) / s); |
|
80 |
|
z = v1 * s; |
|
81 |
|
return v2 * s; |
86 |
82 |
} |
} |
87 |
83 |
|
|
88 |
|
|
|
89 |
|
|
|
90 |
|
/* returns a value from -4...+4 */ |
|
91 |
|
|
|
|
84 |
|
/* returns a value from -4...+4 */ |
92 |
85 |
double uniform_rand (void) |
double uniform_rand (void) |
93 |
86 |
{ |
{ |
94 |
|
int randgen; |
|
95 |
|
double ret; |
|
|
87 |
|
int randgen; |
|
88 |
|
double ret; |
96 |
89 |
|
|
97 |
|
randgen=rand(); |
|
98 |
|
ret=((double)randgen/(double)RAND_MAX*8.0)-4.0; |
|
99 |
|
return ret; |
|
|
90 |
|
randgen=rand(); |
|
91 |
|
ret=(((double)randgen)/((double)RAND_MAX)*8.0)-4.0; |
|
92 |
|
return ret; |
100 |
93 |
} |
} |
101 |
94 |
|
|
102 |
95 |
|
|
103 |
96 |
#define DATA 1000 |
#define DATA 1000 |
104 |
97 |
#define SAMPLES_PER_DATA 20 |
#define SAMPLES_PER_DATA 20 |
105 |
98 |
|
|
106 |
|
int main() |
|
107 |
|
|
|
|
99 |
|
int main(void) |
108 |
100 |
{ |
{ |
109 |
|
int i,j; |
|
110 |
|
int d,slope; |
|
111 |
|
double base,n,step; |
|
112 |
|
|
|
113 |
|
init_generators(0); |
|
114 |
|
|
|
115 |
|
for (j=0; j<DATA; j++) |
|
116 |
|
{ |
|
117 |
|
n=normal_rand(); |
|
118 |
|
if (n < 0.0) |
|
119 |
|
slope=-1; |
|
120 |
|
else |
|
121 |
|
slope=1; |
|
122 |
|
n=normal_rand(); |
|
123 |
|
base=n*100.0+500.0; |
|
124 |
|
printf("%d\t%d",(slope == -1 ? 0 : 1),(int)base); |
|
125 |
|
for (d=1; d<SAMPLES_PER_DATA; d++) |
|
126 |
|
{ |
|
127 |
|
n=normal_rand(); |
|
128 |
|
step=n*3.0+(double)slope*2; /* 3.0 => low noise, 10.0 => high noise */ |
|
129 |
|
base+=step; |
|
130 |
|
printf("\t%d",(int)base); |
|
131 |
|
} |
|
132 |
|
printf("\n"); |
|
133 |
|
} |
|
134 |
|
|
|
|
101 |
|
int i,j; |
|
102 |
|
int d,slope; |
|
103 |
|
double base,n,step; |
|
104 |
|
|
|
105 |
|
init_generators(0); |
|
106 |
|
|
|
107 |
|
for (j=0; j<DATA; j++)//This loop generates {$DATA} datasets |
|
108 |
|
{ |
|
109 |
|
n=normal_rand(); |
|
110 |
|
if (n < 0.0) slope=-1; |
|
111 |
|
else slope=1; |
|
112 |
|
n=normal_rand(); |
|
113 |
|
base=n*100.0+500.0; |
|
114 |
|
printf("%d\t%d",(slope == -1 ? 0 : 1),(int)base); |
|
115 |
|
for (d=1; d<SAMPLES_PER_DATA; d++)//This loop gnereates datasets of size {$SAMPLES_PER_DATA}. |
|
116 |
|
{ |
|
117 |
|
n=normal_rand(); |
|
118 |
|
step=n*3.0+(double)slope*2; /* 3.0 => low noise, 10.0 => high noise */ |
|
119 |
|
base+=step; |
|
120 |
|
printf("\t%d",(int)base); |
|
121 |
|
} |
|
122 |
|
printf("\n"); |
|
123 |
|
} |
|
124 |
|
return 0; |
135 |
125 |
} |
} |
File 00_simslope/simslope0.py changed (mode: 100644) (index ae8e351..d53b86a) |
... |
... |
for a in range(0,len(d)): |
36 |
36 |
# normalize each row of data |
# normalize each row of data |
37 |
37 |
normdata=[] |
normdata=[] |
38 |
38 |
for a in range(0,len(data)): |
for a in range(0,len(data)): |
39 |
|
norm=[] |
|
40 |
|
s=min(data[a]) |
|
41 |
|
t=max(data[a]) |
|
42 |
|
for b in range(0,len(data[a])): |
|
43 |
|
norm.append((float(data[a][b])-float(s))/(float(t)-float(s))) |
|
44 |
|
normdata.append(norm) |
|
|
39 |
|
norm=[] |
|
40 |
|
s=min(data[a]) |
|
41 |
|
t=max(data[a]) |
|
42 |
|
for b in range(0,len(data[a])): |
|
43 |
|
norm.append((float(data[a][b])-float(s))/(float(t)-float(s))) |
|
44 |
|
normdata.append(norm) |
45 |
45 |
|
|
46 |
46 |
#print(normdata) |
#print(normdata) |
47 |
47 |
|
|
|
... |
... |
model = keras.Sequential([ |
60 |
60 |
# cannot have InputLayer() or network will not save/load (bug in TF) |
# cannot have InputLayer() or network will not save/load (bug in TF) |
61 |
61 |
# instead, specify input_shape in first real layer |
# instead, specify input_shape in first real layer |
62 |
62 |
# keras.layers.InputLayer(input_shape=(20,)), |
# keras.layers.InputLayer(input_shape=(20,)), |
63 |
|
keras.layers.Dense(2, input_shape=(20,), activation='sigmoid') |
|
64 |
|
]) |
|
|
63 |
|
keras.layers.Dense(2, input_shape=(20,), activation='sigmoid')]) |
65 |
64 |
|
|
66 |
65 |
model.compile(optimizer='adam', |
model.compile(optimizer='adam', |
67 |
66 |
loss='sparse_categorical_crossentropy', |
loss='sparse_categorical_crossentropy', |
68 |
67 |
metrics=['accuracy']) |
metrics=['accuracy']) |
69 |
68 |
|
|
70 |
69 |
print("Training") |
print("Training") |
71 |
|
metrics = model.fit(data, classes, epochs=100, verbose=2, |
|
72 |
|
validation_split=0.2) |
|
|
70 |
|
metrics = model.fit(data, classes, epochs=100, verbose=2,validation_split=0.2) |
73 |
71 |
print("Max value: ", max(metrics.history['acc']), " at epoch", np.argmax(metrics.history['acc'])) |
print("Max value: ", max(metrics.history['acc']), " at epoch", np.argmax(metrics.history['acc'])) |
74 |
72 |
|
|
75 |
73 |
print("Testing") |
print("Testing") |
File 00_simslope/simslope1.py changed (mode: 100644) (index 6e36081..61d98e6) |
... |
... |
classes=[] |
17 |
17 |
data=[] |
data=[] |
18 |
18 |
|
|
19 |
19 |
for a in range(0,len(d)): |
for a in range(0,len(d)): |
20 |
|
classes.append(d[a][0]) |
|
21 |
|
row=[] |
|
22 |
|
for b in range(1,len(d[a])): |
|
23 |
|
row.append(d[a][b]) |
|
24 |
|
data.append(row) |
|
|
20 |
|
classes.append(d[a][0]) |
|
21 |
|
row=[] |
|
22 |
|
for b in range(1,len(d[a])): |
|
23 |
|
row.append(d[a][b]) |
|
24 |
|
data.append(row) |
25 |
25 |
|
|
26 |
26 |
#print(classes) |
#print(classes) |
27 |
27 |
#print(data) |
#print(data) |
|
... |
... |
for a in range(0,len(d)): |
29 |
29 |
# normalize each row of data |
# normalize each row of data |
30 |
30 |
normdata=[] |
normdata=[] |
31 |
31 |
for a in range(0,len(data)): |
for a in range(0,len(data)): |
32 |
|
norm=[] |
|
33 |
|
s=min(data[a]) |
|
34 |
|
t=max(data[a]) |
|
35 |
|
for b in range(0,len(data[a])): |
|
36 |
|
norm.append((float(data[a][b])-float(s))/(float(t)-float(s))) |
|
37 |
|
normdata.append(norm) |
|
|
32 |
|
norm=[] |
|
33 |
|
s=min(data[a]) |
|
34 |
|
t=max(data[a]) |
|
35 |
|
for b in range(0,len(data[a])): |
|
36 |
|
norm.append((float(data[a][b])-float(s))/(float(t)-float(s))) |
|
37 |
|
normdata.append(norm) |
38 |
38 |
|
|
39 |
39 |
#print(normdata) |
#print(normdata) |
40 |
40 |
|
|