-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdelay_learning_v3.py
935 lines (751 loc) · 41.1 KB
/
delay_learning_v3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
#!/bin/python
import itertools as it
from pyNN.utility import get_simulator, init_logging, normalized_filename
from quantities import ms
from random import randint
from pyNN.utility.plotting import Figure, Panel
import matplotlib.pyplot as plt
from datetime import datetime as dt
import neo
import numpy as np
import sys
import os
import imageio
sys.path.append("/Users/kevin/Documents/SI5/Cours/T.E.R/Code/Amelie/EvData/translate_2_formats")
from events2spikes import ev2spikes
start = dt.now()
sim, options = get_simulator(("--plot-figure", "Plot the simulation results to a file", {"action": "store_true"}),
("--nb-convolution", "The number of convolution layers of the model", {"action": "store", "type": int}),
("--debug", "Print debugging information"))
if options.debug:
init_logging(None, debug=True)
if sim == "nest":
from pyNN.nest import *
sim.setup(timestep=0.01)
# Constants, variables
NB_CONV_LAYERS = options.nb_convolution
if NB_CONV_LAYERS < 2:
print("The number of convolution layers should be at least 2")
quit()
SIZE_CONV_FILTER = 5
OUTPUT_PATH_GENERIC = "./output"
DIRECTIONS = {-1: "INDETERMINATE", 0: "SOUTH-EAST ↘︎", 1: "SOUTH-WEST ↙︎", 2: "NORTH-WEST ↖︎", 3: "NORTH-EAST ↗︎", 4: "EAST →", 5: "SOUTH ↓", 6: "WEST ←", 7: "NORTH ↑"} # KEY=DIRECTIONS ID ; VALUE=STRING REPRESENTING THE DIRECTION
NB_DIRECTIONS = min(len(DIRECTIONS)-1, NB_CONV_LAYERS) # No more than available directions, and at least 2 directions. -1 to ignore INDETERMINATE
### Generate input data
time_data = 15_000 # TODO de base 1e6, à réduire pour tests
temporal_reduction = 1_000
pattern_interval = 100
pattern_duration = 5
num = time_data//pattern_interval
# The input should be at least 13*13 for a duration of 5 since we want to leave a margin of 4 neurons on the edges when generating data
x_input = 13
filter_x = 5
x_output = x_input - filter_x + 1
y_input = 13
filter_y = 5
y_output = y_input - filter_y + 1
x_margin = y_margin = 4
# Dataset Generation
input_data = {} # KEY=(BEGIN_MVT, END_MVT) ; VALUE=DIRECTION_ID
input_events = np.zeros((0,4)) # 4 because : x, y, polarity, time
for t in range(int(time_data/pattern_interval)):
direction = randint(0, NB_DIRECTIONS-1) # {NB_DIRECTIONS} directions
input_data[(t*pattern_interval, t*pattern_interval+(pattern_duration-1))] = direction
if direction == 0:
start_x = randint(x_margin, x_input-pattern_duration-x_margin) # We leave a margin of 4 neurons on the edges of the input layer so that the whole movement can be seen by the convolution window
start_y = randint(y_margin, y_input-pattern_duration-y_margin)
input_events = np.concatenate((input_events, [[start_x+d, start_y+d, 1, d+t*pattern_interval] for d in range(pattern_duration)]), axis=0)
elif direction == 1:
start_x = randint(x_input-x_margin-1, x_input-pattern_duration) # We leave a margin of 4 neurons on the edges of the input layer so that the whole movement can be seen by the convolution window
start_y = randint(y_margin, y_input-pattern_duration-y_margin)
input_events = np.concatenate((input_events, [[start_x-d, start_y+d, 1, d+t*pattern_interval] for d in range(pattern_duration)]), axis=0)
elif direction == 2:
start_x = randint(x_input-x_margin-1, x_input-pattern_duration) # We leave a margin of 4 neurons on the edges of the input layer so that the whole movement can be seen by the convolution window
start_y = randint(y_input-y_margin-1, y_input-pattern_duration)
input_events = np.concatenate((input_events, [[start_x-d, start_y-d, 1, d+t*pattern_interval] for d in range(pattern_duration)]), axis=0)
elif direction == 3:
start_x = randint(x_margin, x_input-pattern_duration-x_margin) # We leave a margin of 4 neurons on the edges of the input layer so that the whole movement can be seen by the convolution window
start_y = randint(y_input-y_margin-1, y_input-pattern_duration)
input_events = np.concatenate((input_events, [[start_x+d, start_y-d, 1, d+t*pattern_interval] for d in range(pattern_duration)]), axis=0)
elif direction == 4:
start_x = randint(x_margin, x_input-pattern_duration-x_margin)
start_y = randint((y_margin + (y_input-y_margin)) // 2, (y_margin + (y_input-y_margin)) // 2)
input_events = np.concatenate((input_events, [[start_x+d, start_y, 1, d+t*pattern_interval] for d in range(pattern_duration)]), axis=0)
elif direction == 5:
start_x = randint((x_margin + (x_input-x_margin)) // 2, (x_margin + (x_input-x_margin)) // 2)
start_y = randint(y_margin, y_input-pattern_duration-y_margin)
input_events = np.concatenate((input_events, [[start_x, start_y+d, 1, d+t*pattern_interval] for d in range(pattern_duration)]), axis=0)
elif direction == 6:
start_x = randint(x_input-x_margin-1, x_input-pattern_duration)
start_y = randint((y_margin + (y_input-y_margin)) // 2, (y_margin + (y_input-y_margin)) // 2)
input_events = np.concatenate((input_events, [[start_x-d, start_y, 1, d+t*pattern_interval] for d in range(pattern_duration)]), axis=0)
elif direction == 7:
start_x = randint((x_margin + (x_input-x_margin)) // 2, (x_margin + (x_input-x_margin)) // 2)
start_y = randint(y_input-y_margin-1, y_input-pattern_duration)
input_events = np.concatenate((input_events, [[start_x, start_y-d, 1, d+t*pattern_interval] for d in range(pattern_duration)]), axis=0)
input_spiketrain, _, _ = ev2spikes(input_events, width=x_input, height=y_input)
### Build Network
# Populations
Input = sim.Population(
x_input*y_input,
sim.SpikeSourceArray(spike_times=input_spiketrain),
label="Input"
)
Input.record("spikes")
Convolutions_parameters = {
'tau_m': 20.0, # membrane time constant (in ms)
'tau_refrac': 30.0, # duration of refractory period (in ms) 0.1 de base
'v_reset': -70.0, # reset potential after a spike (in mV)
'v_rest': -70.0, # resting membrane potential (in mV)
'v_thresh': -5.0, # spike threshold (in mV) -5 de base
}
# The size of a convolution layer with a filter of size x*y is input_x-x+1 * input_y-y+1
ConvLayers = []
for i in range(NB_CONV_LAYERS):
Conv_i = sim.Population(
x_output*y_output,
sim.IF_cond_exp(**Convolutions_parameters),
)
Conv_i.record(('spikes','v'))
ConvLayers.append(Conv_i)
# List connector
weight_N = 0.35
delays_N = 15.0
weight_teta = 0.005
delays_teta = 0.05
Weight_conv = np.random.normal(weight_N, weight_teta, size=(NB_CONV_LAYERS, filter_x, filter_y))
Delay_conv = np.random.normal(delays_N, delays_teta, size=(NB_CONV_LAYERS, filter_x, filter_y))
Input_to_output_i_conn = [[] for _ in range(NB_CONV_LAYERS)]
c = 0
for in2out_conn in Input_to_output_i_conn:
for X,Y in it.product(range(x_output), range(y_output)):
idx = np.ravel_multi_index( (X,Y) , (x_output, y_output) )
conn = []
for x, y in it.product(range(filter_x), range(filter_y)):
w = Weight_conv[c, x, y]
d = Delay_conv[ c, x, y]
A = np.ravel_multi_index( (X+x,Y+y) , (x_input, y_input) )
conn.append( ( A, idx, w, d ) )
in2out_conn += conn
c += 1
# Projections
Input_to_Conv_i = []
for i in range(NB_CONV_LAYERS):
Input_to_Conv_i.append(
sim.Projection(
Input, ConvLayers[i],
connector = sim.FromListConnector(Input_to_output_i_conn[i]),
synapse_type = sim.StaticSynapse(),
receptor_type = 'excitatory',
label = 'Input to Conv'+str((i+1))
)
)
# Establish Lateral Inhibition
for idx_a in range(NB_CONV_LAYERS):
for idx_b in range(NB_CONV_LAYERS):
if idx_a != idx_b: # Avoid creating Lateral inhibition with himself
Conv_rising_to_conv_falling = sim.Projection(
ConvLayers[idx_a], ConvLayers[idx_b],
connector = sim.OneToOneConnector(),
synapse_type = sim.StaticSynapse(
weight = 50,
delay = 0.01
),
receptor_type = "inhibitory",
label = "Lateral inhibition - Conv"+str(idx_a+1)+" to Conv"+str(idx_b+1)
)
# We will use this list to know which convolution layer has reached its stop condition
full_stop_condition= [False] * NB_CONV_LAYERS
# Each filter of each convolution layer will be put in this list and actualized at each stimulus
final_filters = [[] for _ in range(NB_CONV_LAYERS)]
# Sometimes, even with lateral inhibition, two neurons on the same location in different convolution
# layers will both spike (due to the minimum delay on those connections). So we keep track of
# which neurons in each layer has already spiked for this stimulus. (Everything is put back to False at the end of the stimulus)
neuron_activity_tag = [[False for _ in range((x_input-filter_x+1)*(y_input-filter_y+1))] for _ in range(NB_CONV_LAYERS)]
# When a convolution layer will be specialized, we put in this dict in which direction
conv_to_direction = {} # KEY=CONV_ID ; VALUE=DIRECTION_ID
# At the end we will put for every convolution layers the spikes produced
conv_output_spikes = {}
### Run simulation
# Callback classes
class LastSpikeRecorder(object):
def __init__(self, sampling_interval, pop):
self.interval = sampling_interval
self.population = pop
self.global_spikes = [[] for _ in range(self.population.size)]
self.annotations = {}
self.final_spikes = []
if type(self.population) != list:
self._spikes = np.ones(self.population.size) * (-1)
else:
self._spikes = np.ones(len(self.population)) * (-1)
def __call__(self, t):
if t > 0:
if type(self.population) != list:
population_spikes = self.population.get_data("spikes", clear=True).segments[0].spiketrains
self._spikes = map(
lambda x: x[-1].item() if len(x) > 0 else -1,
population_spikes
)
self._spikes = np.fromiter(self._spikes, dtype=float)
if t == self.interval:
for n, neuron_spikes in enumerate(population_spikes):
self.annotations[n] = neuron_spikes.annotations
else:
self._spikes = []
for subr in self.population:
sp = subr.get_data("spikes", clear=True).segments[0].spiketrains
spikes_subr = map(
lambda x: x[-1].item() if len(x) > 0 else -1,
sp
)
self._spikes.append(max(spikes_subr))
assert len(self._spikes) == len(self.global_spikes)
if len(np.unique(self._spikes)) > 1:
idx = np.where(self._spikes != -1)[0]
for n in idx:
self.global_spikes[n].append(self._spikes[n])
#return t+self.interval
def get_spikes(self):
for n, s in enumerate(self.global_spikes):
self.final_spikes.append( neo.core.spiketrain.SpikeTrain(s*ms, t_stop=time_data, **self.annotations[n]) )
return self.final_spikes
class WeightDelayRecorder(object):
def __init__(self, sampling_interval, proj):
self.interval = sampling_interval
self.projection = proj
self.weight = None
self._weights = []
self.delay = None
self._delays = []
def __call__(self, t):
attribute_names = self.projection.synapse_type.get_native_names('weight','delay')
self.weight, self.delay = self.projection._get_attributes_as_arrays(attribute_names, multiple_synapses='sum')
self._weights.append(self.weight)
self._delays.append(self.delay)
#return t+self.interval
def update_weights(self, w):
assert self._weights[-1].shape == w.shape
self._weights[-1] = w
def update_delays(self, d):
assert self._delays[-1].shape == d.shape
self._delays[-1] = d
def get_weights(self):
signal = neo.AnalogSignal(self._weights, units='nA', sampling_period=self.interval * ms, name="weight")
signal.channel_index = neo.ChannelIndex(np.arange(len(self._weights[0])))
return signal
def get_weights(self):
signal = neo.AnalogSignal(self._delays, units='ms', sampling_period=self.interval * ms, name="delay")
signal.channel_index = neo.ChannelIndex(np.arange(len(self._delays[0])))
return signal
class visualiseTime(object):
def __init__(self, sampling_interval):
self.interval = sampling_interval
self.times_called = 0
self.delay_and_weight_evolution_plot = []
self.OUTPUT_PATH_CURRENT_RUN = OUTPUT_PATH_GENERIC + '/' + dt.now().strftime("%Y%m%d-%H%M%S")
if options.plot_figure: # TODO find a better way, actually 2 conditions based on this
if not os.path.isdir(OUTPUT_PATH_GENERIC): # Output folder
os.mkdir(OUTPUT_PATH_GENERIC)
if not os.path.isdir(self.OUTPUT_PATH_CURRENT_RUN): # Folder for current run, inside Output folder
os.mkdir(self.OUTPUT_PATH_CURRENT_RUN)
def __call__(self, t):
print("step : {}".format(t))
if full_stop_condition[0] and full_stop_condition[1]:
print("!!!! FINISHED LEARNING !!!!")
sim.end()
self.print_filters(t) # TODO only print when reaching final condition
# exit() # TODO necessary ? because with this we skip the end part of the code with plots and more...
if t > 1 and int(t) % pattern_interval==0:
self.print_filters(t)
#return t + self.interval
def recognize_movement(self, delay_matrix):
"""
Return an int that indicates the direction in which the input delay matrix has specialized.
For the moment, 9 possibles outputs:
- 0 = (SOUTH-EAST ↘︎)
- 1 = (SOUTH-WEST ↙︎)
- 2 = (NORTH-WEST ↖︎)
- 3 = (NORTH-EAST ↗︎)
- 4 = (EAST →)
- 5 = (SOUTH ↓)
- 6 = (WEST ←)
- 7 = (NORTH ↑)
- -1 = (INDETERMINATE)
"""
def pred_movement(delay_matrix, rangeI, rangeJ, prevI, prevJ):
delay_matrix = delay_matrix.copy()
ignore_idx = [(rangeI[0]+prevI, rangeJ[0]+prevJ)]
for i, j in zip(rangeI, rangeJ):
ignore_idx.append((i, j))
if delay_matrix[i+prevI][j+prevJ] < delay_matrix[i][j]:
return False
first_mvt_idx = ignore_idx[0]
first_mvt_idx_val = delay_matrix[first_mvt_idx[0]][first_mvt_idx[1]]
for idx in ignore_idx:
delay_matrix[idx[0]][idx[1]] = np.inf
if not np.amin(delay_matrix) > first_mvt_idx_val: # S'assurer que le délai min en dehors de la diagonale soit supérieur au délai du premier pixel du mouvement # TODO et ce, avec un écart d'au moins 0.2 ms
return False
return True
# Testing diagonals first
size_matrix = len(delay_matrix)
if pred_movement(delay_matrix, range(1, size_matrix), range(1, size_matrix), -1, -1):
return 0 # HGBD
elif pred_movement(delay_matrix, range(size_matrix-2, -1, -1), range(size_matrix-2, -1, -1), 1, 1):
return 2 # BDHG
elif pred_movement(delay_matrix, range(1, size_matrix), range(size_matrix-2, -1, -1), -1, 1):
return 1 # HDBG
elif pred_movement(delay_matrix, range(size_matrix-2, -1, -1), range(1, size_matrix), 1, -1):
return 3 # BGHD
# Then testing the 4 cardinal points
half_matrix = size_matrix // 2
if pred_movement(delay_matrix, [half_matrix] * size_matrix, range(1, size_matrix), 0, -1):
return 4 # MGMD
elif pred_movement(delay_matrix, range(1, size_matrix), [half_matrix] * size_matrix, -1, 0):
return 5 # MHMB
elif pred_movement(delay_matrix, [half_matrix] * size_matrix, range(size_matrix-2, -1, -1), 0, 1):
return 6 # MDMG
elif pred_movement(delay_matrix, range(size_matrix-2, -1, -1), [half_matrix] * size_matrix, 1, 0):
return 7 # MBMH
else:
return -1
def print_filters(self, t):
"""
Create and save a plot that contains for each convolution filter its delay matrix and associated weights of the current model state
"""
SAVED_FILE_NAME = self.OUTPUT_PATH_CURRENT_RUN + '/delays_and_weights_'+str(self.times_called)+".png"
LOG_STR = ["Delays of convolution", "Weights of convolution"]
COLOR_MAP_TYPE = plt.cm.autumn # https://matplotlib.org/stable/tutorials/colors/colormaps.html
SCALING_VALUE = SIZE_CONV_FILTER + ((NB_CONV_LAYERS - 2) * 4) # Make sure the plot is big enought depending on number of convolution used
FONTSIZE = 9+(1.05*NB_CONV_LAYERS)
fig, axs = plt.subplots(nrows=len(LOG_STR), ncols=NB_CONV_LAYERS, sharex=True, figsize=(SCALING_VALUE, SCALING_VALUE))
for i in range(len(LOG_STR)): # Delay and Weight
for layer_n in range(NB_CONV_LAYERS): # The number of convolution layer in the model
data = final_filters[layer_n][i]
title = LOG_STR[i] + ' ' + str(layer_n)
if i == 0: # Delay matrix part
direction_id = self.recognize_movement(data)
if direction_id != -1: # Not INDETERMINATED
conv_to_direction[layer_n] = direction_id
else:
conv_to_direction.pop(layer_n, None)
title += '\n' + DIRECTIONS[direction_id] + '(' + str(direction_id) + ')'
curr_matrix = axs[i][layer_n]
curr_matrix.set_title(title, fontsize=FONTSIZE)
im = curr_matrix.imshow(data, cmap=COLOR_MAP_TYPE)
fig.colorbar(im, ax=curr_matrix, fraction=0.046, pad=0.04) # https://stackoverflow.com/a/26720422
fig.suptitle('Delays and Weights kernel at t:'+str(t), fontsize=FONTSIZE)
plt.tight_layout()
fig.savefig(SAVED_FILE_NAME, dpi=300)
plt.close() # Avoid getting displayed at the end
self.delay_and_weight_evolution_plot.append(SAVED_FILE_NAME)
print("[", str(self.times_called) , "] : Images of delays and weights saved as", SAVED_FILE_NAME)
self.times_called += 1
def print_final_filters(self):
"""
Create a gif containing every images generated by print_filters
"""
imgs = [imageio.v2.imread(step_file) for step_file in self.delay_and_weight_evolution_plot]
imageio.mimsave(self.OUTPUT_PATH_CURRENT_RUN + '/delays_and_weights_evolution.gif', imgs, duration=1) # 1s between each frame of the gif
class NeuronReset(object):
"""
Resets neuron_activity_tag to False for all neurons in all layers.
Also injects a negative amplitude pulse to all neurons at the end of each stimulus
So that all membrane potentials are back to their resting values.
"""
def __init__(self, sampling_interval, pops):
self.interval = sampling_interval
self.populations = pops
def __call__(self, t):
for conv in neuron_activity_tag:
for cell in range(len(conv)):
conv[cell] = False
if t > 0:
print("!!! RESET !!!")
if type(self.populations)==list:
for pop in self.populations:
pulse = sim.DCSource(amplitude=-10.0, start=t, stop=t+10)
pulse.inject_into(pop)
else:
pulse = sim.DCSource(amplitude=-10.0, start=t, stop=t+10)
pulse.inject_into(self.populations)
self.interval = pattern_interval
#return t + self.interval
class InputClear(object):
"""
When called, simply gets the data from the input with the 'clear' parameter set to True.
By periodically clearing the data from the populations the simulation goes a lot faster.
"""
def __init__(self, sampling_interval, pops_to_clear_data):
self.interval = sampling_interval
self.pop_clear = pops_to_clear_data
def __call__(self, t):
if t > 0:
print("!!! INPUT CLEAR !!!")
try:
input_spike_train = self.pop_clear.get_data("spikes", clear=True).segments[0].spiketrains
except:
pass
self.interval = pattern_interval
#return t + self.interval
class LearningMechanisms(object):
def __init__(
self,
sampling_interval,
input_spikes_recorder, output_spikes_recorder,
projection, projection_delay_weight_recorder,
B_plus, B_minus,
tau_plus, tau_minus,
A_plus, A_minus,
teta_plus, teta_minus,
filter_d, filter_w,
stop_condition,
growth_factor,
Rtarget=0.005,
lamdad=0.002, lamdaw=0.00005,
thresh_adapt=True,
label=0
):
print("PROJECT:", projection)
self.interval = sampling_interval
self.projection = projection
self.input = projection.pre
self.output = projection.post
self.input_spikes = input_spikes_recorder
self.output_spikes = output_spikes_recorder
self.DelayWeights = projection_delay_weight_recorder
# We keep the last time of spike of each neuron
self.input_last_spiking_times = self.input_spikes._spikes
self.output_last_spiking_times = self.output_spikes._spikes
self.B_plus = B_plus
self.B_minus = B_minus
self.tau_plus = tau_plus
self.tau_minus = tau_minus
self.max_delay = False # If set to False, we will find the maximum delay on first call.
self.filter_d = filter_d
self.filter_w = filter_w
self.A_plus = A_plus
self.A_minus = A_minus
self.teta_plus = teta_plus
self.teta_minus = teta_minus
self.c = stop_condition
self.growth_factor = growth_factor
self.label = label
self.thresh_adapt=thresh_adapt
# For each neuron, we count their number of spikes to compute their activation rate.
self.total_spike_count_per_neuron = [0 for _ in range(len(self.output))]
# Number of times this has been called.
self.call_count = 0
self.Rtarget = Rtarget
self.lamdaw = lamdaw
self.lamdad = lamdad
def __call__(self, t):
if t == 0 :
print("No data")
return #t + pattern_interval
self.call_count += 1
final_filters[self.label] = [self.filter_d, self.filter_w]
# The sum of all homeostasis delta_d and delta_t computed for each cell
homeo_delays_total = 0
homeo_weights_total = 0
# Since we can't increase the delays past the maximum delay set at the beginning of the simulation,
# we find the maximum delay during the first call
if self.max_delay == False:
self.max_delay = 0.01
for x in self.DelayWeights.delay:
for y in x:
if not np.isnan(y) and y > self.max_delay:
self.max_delay = y
for pre_neuron in range(self.input.size):
if self.input_spikes._spikes[pre_neuron] != -1 and self.input_spikes._spikes[pre_neuron] > self.input_last_spiking_times[pre_neuron]:
# We actualize the last time of spike for this neuron
self.input_last_spiking_times[pre_neuron] = self.input_spikes._spikes[pre_neuron]
print("PRE SPIKE {} : {}".format(pre_neuron, self.input_spikes._spikes[pre_neuron]))
#print("OOOOLLL", self.output_spikes._spikes)
#print("OOOMMM", self.output_spikes.global_spikes)
for post_neuron in range(self.output.size):
if self.output_spikes._spikes[post_neuron] != -1 and self.check_activity_tags(post_neuron):
neuron_activity_tag[self.label][post_neuron] = True
print("***** STIMULUS {} *****".format(t//pattern_interval))
self.total_spike_count_per_neuron[post_neuron] += 1
# The neuron spiked during this stimulus and its threshold should be increased.
# Since Nest won't allow neurons with a threshold > 0 to spike, we decrease v_rest instead.
current_rest = self.output.__getitem__(post_neuron).get_parameters()['v_rest']
if self.thresh_adapt:
self.output.__getitem__(post_neuron).v_rest=current_rest-(1.0-self.Rtarget)
self.output.__getitem__(post_neuron).v_reset=current_rest-(1.0-self.Rtarget)
print("=== Neuron {} from layer {} spiked ! Whith rest = {} ===".format(post_neuron, self.label, current_rest))
print("Total pikes of neuron {} from layer {} : {}".format(post_neuron, self.label, self.total_spike_count_per_neuron[post_neuron]))
if self.output_spikes._spikes[post_neuron] > self.output_last_spiking_times[post_neuron] and not self.stop_condition(post_neuron):
# We actualize the last time of spike for this neuron
self.output_last_spiking_times[post_neuron] = self.output_spikes._spikes[post_neuron]
# We now compute a new delay for each of its connections using STDP
print("TAILLE PRE_NEURON", len(self.DelayWeights.delay))
for pre_neuron in range(len(self.DelayWeights.delay)):
# For each post synaptic neuron that has a connection with pre_neuron, we also check that both neurons
# already spiked at least once.
if not np.isnan(self.DelayWeights.delay[pre_neuron][post_neuron]) and not np.isnan(self.DelayWeights.weight[pre_neuron][post_neuron]) and self.input_last_spiking_times[pre_neuron] != -1 and self.output_last_spiking_times[post_neuron] != -1:
# Some values here have a dimension in ms
delta_t = self.output_last_spiking_times[post_neuron] - self.input_last_spiking_times[pre_neuron] - self.DelayWeights.delay[pre_neuron][post_neuron]
delta_d = self.G(delta_t)
delta_w = self.F(delta_t)
print("STDP from layer: {} with post_neuron: {} and pre_neuron: {} deltad: {}, deltat: {}".format(self.label, post_neuron, pre_neuron, delta_d*ms, delta_t*ms))
print("TIME PRE {} : {} TIME POST 0: {} DELAY: {}".format(pre_neuron, self.input_last_spiking_times[pre_neuron], self.output_last_spiking_times[post_neuron], self.DelayWeights.delay[pre_neuron][post_neuron]))
self.actualize_filter(pre_neuron, post_neuron, delta_d, delta_w)
else:
# The neuron did not spike and its threshold should be lowered
if self.thresh_adapt:
current_rest = self.output.__getitem__(post_neuron).get_parameters()['v_rest']
self.output.__getitem__(post_neuron).v_rest=current_rest+self.Rtarget
self.output.__getitem__(post_neuron).v_reset=current_rest+self.Rtarget
# Homeostasis regulation per neuron
Robserved = self.total_spike_count_per_neuron[post_neuron]/self.call_count
K = (self.Rtarget - Robserved)/self.Rtarget
delta_d = -self.lamdad*K
delta_w = self.lamdaw*K
homeo_delays_total += delta_d
homeo_weights_total += delta_w
print("Rate of neuron {} from layer {}: {}".format(post_neuron, self.label, Robserved))
print("****** CONVO {} homeo_delays_total: {}, homeo_weights_total: {}".format(self.label, homeo_delays_total, homeo_weights_total))
self.actualizeAllFilter( homeo_delays_total+self.growth_factor*self.interval, homeo_weights_total)
# At last we give the new delays and weights to our projections
self.DelayWeights.update_delays(self.DelayWeights.delay)
self.DelayWeights.update_weights(self.DelayWeights.weight)
### HERE MODIFY
#print("\n\nIDK:", self.projection, " ET ", self.DelayWeights.delay)
#print("IDK2", self.DelayWeights.delay.shape, type(self.DelayWeights.delay))
#self.projection.set(delay = np.ones((169, 81)) * 16)
self.projection.set(delay = self.DelayWeights.delay)
self.projection.set(weight = self.DelayWeights.weight)
# We update the list that tells if this layer has finished learning the delays and weights
full_stop_condition[self.label] = self.full_stop_check()
#return t + pattern_interval
# Computes the delay delta by applying the STDP
def G(self, delta_t):
if delta_t >= 0:
delta_d = -self.B_minus*np.exp(-delta_t/self.teta_minus)
else:
delta_d = self.B_plus*np.exp(delta_t/self.teta_plus)
return delta_d
# Computes the weight delta by applying the STDP
def F(self, delta_t):
if delta_t >= 0:
delta_w = self.A_plus*np.exp(-delta_t/self.tau_plus)
else:
delta_w = -self.A_minus*np.exp(delta_t/self.tau_minus)
return delta_w
# Given a post synaptic cell, returns if that cell has reached its stop condition for learning
def stop_condition(self, post_neuron):
for pre_neuron in range(len(self.DelayWeights.delay)):
if not np.isnan(self.DelayWeights.delay[pre_neuron][post_neuron]) and self.DelayWeights.delay[pre_neuron][post_neuron] <= self.c:
return True
return False
# Checks if all cells have reached their stop condition
def full_stop_check(self):
for post_neuron in range(self.output.size):
if not self.stop_condition(post_neuron):
return False
return True
# Applies the current weights and delays of the filter to all the cells sharing those
def actualize_filter(self, pre_neuron, post_neuron, delta_d, delta_w):
# We now find the delay/weight to use by looking at the filter
convo_coords = [post_neuron%(x_input-filter_x+1), post_neuron//(x_input-filter_x+1)]
input_coords = [pre_neuron%x_input, pre_neuron//x_input]
filter_coords = [input_coords[0]-convo_coords[0], input_coords[1]-convo_coords[1]]
# And we actualize delay/weight of the filter after the STDP
#print(pre_neuron, post_neuron)
#print(np.unravel_index(pre_neuron, (13,13)))
#print(np.unravel_index(post_neuron, (9,9)))
#print(input_coords, convo_coords, filter_coords)
#print(self.filter_d.shape, self.filter_w.shape)
self.filter_d[filter_coords[0]][filter_coords[1]] = max(0.01, min(self.filter_d[filter_coords[0]][filter_coords[1]]+delta_d, self.max_delay))
self.filter_w[filter_coords[0]][filter_coords[1]] = max(0.05, self.filter_w[filter_coords[0]][filter_coords[1]]+delta_w)
# Finally we actualize the weights and delays of all neurons that use the same filter
for window_x in range(0, x_input - (filter_x-1)):
for window_y in range(0, y_input - (filter_y-1)):
input_neuron_id = window_x+filter_coords[0] + (window_y+filter_coords[1])*x_input
convo_neuron_id = window_x + window_y*(x_input-filter_x+1)
if not np.isnan(self.DelayWeights.delay[input_neuron_id][convo_neuron_id]) and not np.isnan(self.DelayWeights.weight[input_neuron_id][convo_neuron_id]):
self.DelayWeights.delay[input_neuron_id][convo_neuron_id] = self.filter_d[filter_coords[0]][filter_coords[1]]
self.DelayWeights.weight[input_neuron_id][convo_neuron_id] = self.filter_w[filter_coords[0]][filter_coords[1]]
# Applies delta_d and delta_w to the whole filter
def actualizeAllFilter(self, delta_d, delta_w):
for x in range(len(self.filter_d)):
for y in range(len(self.filter_d[x])):
self.filter_d[x][y] = max(0.01, min(self.filter_d[x][y]+delta_d, self.max_delay))
self.filter_w[x][y] = max(0.05, self.filter_w[x][y]+delta_w)
# Finally we actualize the weights and delays of all neurons that use the same filter
for window_x in range(0, x_input - (filter_x-1)):
for window_y in range(0, y_input - (filter_y-1)):
for x in range(len(self.filter_d)):
for y in range(len(self.filter_d[x])):
input_neuron_id = window_x+x + (window_y+y)*x_input
convo_neuron_id = window_x + window_y*(x_input-filter_x+1)
if input_neuron_id < self.input.size and not np.isnan(self.DelayWeights.delay[input_neuron_id][convo_neuron_id]) and not np.isnan(self.DelayWeights.weight[input_neuron_id][convo_neuron_id]):
self.DelayWeights.delay[input_neuron_id][convo_neuron_id] = self.filter_d[x][y]
self.DelayWeights.weight[input_neuron_id][convo_neuron_id] = self.filter_w[x][y]
def get_filters(self):
return self.filter_d, self.filter_w
def check_activity_tags(self, neuron_to_check):
for conv in neuron_activity_tag:
if conv[neuron_to_check]:
return False
return True
class callbacks_(object):
def __init__(self, sampling_interval):
self.call_order = []
self.interval = sampling_interval
def add_object(self,obj):
self.call_order.append(obj)
def __call__(self,t):
for obj in self.call_order:
if t%obj.interval == 0 and t != 0:
obj.__call__(t)
return t + self.interval
def spikes_train_to_single_array(spike_train_array):
total_spikes = np.array([])
for spikes in spike_train_array:
total_spikes = np.concatenate((total_spikes, np.array(spikes)), axis=0)
return np.sort(total_spikes)
class Metrics():
def __init__(self):
pass
def compute_metrics(self):
"""
Will compute the PRECISION, RECALL and F1-SCORE based on the spikes produced in each convolution layers.
We compare whether it was supposed to produce spike or not, in particular in observing the direction of the
input spike and the direction of the layer where the spike is produced, to get the number of TruePositive, FalsePositive, FalseNegative.
- TruePositive (TP): Spike produced on the right convolution layer
- FalsePositive (FP): Spike produced on the convolution layer but direction does not match
- FalseNegative (FN): No spike produced but the direction was good
Then,
- PRECISION = TP / (TP + FP)
- RECALL = TP / (TP + FN)
- F1-SCORE = 2 * ((PRECISION * RECALL) / (PRECISION + RECALL))
"""
def pickup(input_interval, output_conv_spikes):
"""
From an interval and a list of timing, return every timing within the interval.
Timing must be after the beginning of the interval, and cannot exceed 40 (ms) after the end of the interval.
E.g :
pickup((300, 800), [400, 300, 600, 900, 840, 839, 301, 275]) => [400, 600, 839, 301]
"""
start, end = input_interval[0], input_interval[1]
matching_spikes = []
for spike_time in output_conv_spikes:
if spike_time > start and spike_time - 40 < end:
matching_spikes.append(spike_time)
return matching_spikes
res = {} # KEY=CONV_ID ; VALUE=[PRECISION, RECALL, F1]
conv_variables = {conv_id: [0, 0, 0] for conv_id in range(NB_CONV_LAYERS)} # KEY=COND_ID ; VALUE=[Nb_TruePositive, Nb_FalsePositive, Nb_FalseNegative]
# Get TP, FP, FN for each interval, for each convolution layer
for input_interval, direction_id in input_data.items(): # For each generated input and its direction
for conv_id, output_spikes in conv_output_spikes.items(): #
TP = FP = FN = 0
direction_of_conv = conv_to_direction[conv_id]
if not pickup(input_interval, output_spikes): # No matching spikes
if direction_id == direction_of_conv: # If it was in the same direction, we should have had a spike
FN += 1 # TODO change this so we increase the number of spikes we get normally (2, 3 or more ?)
else:
if direction_id == direction_of_conv:
TP += 1
else:
FP += 1
conv_variables[conv_id] = [sum(x) for x in zip(conv_variables[conv_id], [TP, FP, FN])]
# Now compute metrics for each layer
for conv_id, tp_fp_fn in conv_variables.items():
TP, FP, FN = tp_fp_fn[0], tp_fp_fn[1], tp_fp_fn[2]
if TP == 0: # Avoid "dividing by zero" exception
res[conv_id] = [0, 0, 0]
else:
PRECISION = TP / (TP + FP)
RECALL = TP / (TP + FN)
F1 = 2 * ((PRECISION * RECALL) / (PRECISION + RECALL))
res[conv_id] = [PRECISION, RECALL, F1]
return res
### Simulation parameters
growth_factor = (0.001/pattern_interval)*pattern_duration # <- juste faire *duration dans STDP We increase each delay by this constant each step
# Stop Condition
c = 1.0
# STDP weight
A_plus = 0.05
A_minus = 0.05
tau_plus= 1.0
tau_minus= 1.0
# STDP delay (2.5 is good too)
B_plus = 5.0
B_minus = 5.0
teta_plus = 1.0
teta_minus = 1.0
STDP_sampling = pattern_interval
### Launch simulation
visu = visualiseTime(sampling_interval=500)
wd_rec = WeightDelayRecorder(sampling_interval=1, proj=Input_to_Conv_i[0])
Input_spikes = LastSpikeRecorder(sampling_interval=STDP_sampling, pop=Input)
Conv_i_spikes = []
Input_to_conv_i_delay_weight = []
for i in range(NB_CONV_LAYERS):
Conv_i_spikes.append(LastSpikeRecorder(sampling_interval=STDP_sampling, pop=ConvLayers[i]))
Input_to_conv_i_delay_weight.append(WeightDelayRecorder(sampling_interval=STDP_sampling, proj=Input_to_Conv_i[i]))
Learn_i = []
for i in range(NB_CONV_LAYERS):
Learn_i.append(LearningMechanisms(sampling_interval=STDP_sampling,
input_spikes_recorder=Input_spikes,
output_spikes_recorder=Conv_i_spikes[i],
projection=Input_to_Conv_i[i],
projection_delay_weight_recorder=Input_to_conv_i_delay_weight[i],
B_plus=B_plus,
B_minus=B_minus,
tau_plus=tau_plus,
tau_minus=tau_minus,
filter_d=Delay_conv[i],
A_plus=A_plus,
A_minus=A_minus,
teta_plus=teta_plus,
teta_minus=teta_minus,
filter_w=Weight_conv[i] ,
stop_condition=c,
growth_factor=growth_factor,
label=i))
neuron_reset = NeuronReset(sampling_interval=pattern_interval-15, pops=ConvLayers)
# input_clear = InputClear(sampling_interval=pattern_interval+1, pops_to_clear_data=Input)
callbacks = callbacks_(sampling_interval=1)
callback_list = [visu, wd_rec, Input_spikes, *Conv_i_spikes, *Input_to_conv_i_delay_weight, *Learn_i, neuron_reset]
for obj in callback_list:
callbacks.add_object(obj)
sim.run(time_data, callbacks=[callbacks])
print("complete simulation run time:", dt.now() - start)
### Plot figure
if options.plot_figure :
extension = '_'+str(NB_DIRECTIONS)+'directions_'+str(NB_CONV_LAYERS)+'filters'
title = "Delay learning - "+ str(NB_DIRECTIONS)+ " directions - "+str(NB_CONV_LAYERS)+" filters"
Conv_i_data = [conv_i.get_data().segments[0] for conv_i in ConvLayers]
Input_spikes = Input_spikes.get_spikes()
Conv_i_spikes = [conv_i_spikes.get_spikes() for conv_i_spikes in Conv_i_spikes]
figure_filename = normalized_filename("Results", "delay_learning"+extension, "png", options.simulator)
figure_params = []
# Add reaction neurons spike times
for i in range(NB_CONV_LAYERS):
direction_id_of_conv = -1 if i not in conv_to_direction else conv_to_direction[i]
direction_str = DIRECTIONS[direction_id_of_conv]
figure_params.append(Panel(Conv_i_spikes[i], xlabel="Conv"+str(i)+" spikes - "+direction_str+"("+str(direction_id_of_conv)+")", yticks=True, markersize=0.2, xlim=(0, time_data), ylim=(0, ConvLayers[i].size)))
for i in range(NB_CONV_LAYERS):
figure_params.append(Panel(Conv_i_data[i].filter(name='v')[0], xlabel="Membrane potential (mV) - Conv"+str(i)+" layer", yticks=True, xlim=(0, time_data), linewidth=0.2, legend=False))
Figure(
# raster plot of the event inputs spike times
Panel(Input_spikes, xlabel="Input spikes", yticks=True, markersize=0.2, xlim=(0, time_data), ylim=(0, Input.size)),
*figure_params,
title=title,
annotations="Simulated with "+ options.simulator.upper()
).save(figure_filename)
visu.print_final_filters()
print("Figures correctly saved as", figure_filename)
if len(conv_to_direction) != NB_DIRECTIONS: # TODO replace by NB_CONV_LAYERS ?
print("Cannot compute metrics : the convolution layers did not converge towards a direction in a sufficiently large number", flush=True)
else:
print("Computing metrics...", flush=True)
# Fill conv_output_spikes with every spike time produced in each convolution layers
for i in range(NB_CONV_LAYERS):
conv_output_spikes[i] = spikes_train_to_single_array(Conv_i_spikes[i])
print(Metrics().compute_metrics())
#plt.show()