diff --git a/model_binary.tflite b/Demonstration Files/model_binary.tflite
similarity index 100%
rename from model_binary.tflite
rename to Demonstration Files/model_binary.tflite
diff --git a/model_fp.tflite b/Demonstration Files/model_fp.tflite
similarity index 100%
rename from model_fp.tflite
rename to Demonstration Files/model_fp.tflite
diff --git a/Archive/Model_CH_01.py b/Model Archive/Model_CH_01.py
similarity index 100%
rename from Archive/Model_CH_01.py
rename to Model Archive/Model_CH_01.py
diff --git a/Archive/Model_CH_02.py b/Model Archive/Model_CH_02.py
similarity index 100%
rename from Archive/Model_CH_02.py
rename to Model Archive/Model_CH_02.py
diff --git a/Archive/Model_CH_03.py b/Model Archive/Model_CH_03.py
similarity index 100%
rename from Archive/Model_CH_03.py
rename to Model Archive/Model_CH_03.py
diff --git a/Archive/Model_CH_04.py b/Model Archive/Model_CH_04.py
similarity index 100%
rename from Archive/Model_CH_04.py
rename to Model Archive/Model_CH_04.py
diff --git a/Archive/Model_CH_05.py b/Model Archive/Model_CH_05.py
similarity index 100%
rename from Archive/Model_CH_05.py
rename to Model Archive/Model_CH_05.py
diff --git a/Archive/Model_CH_06.py b/Model Archive/Model_CH_06.py
similarity index 100%
rename from Archive/Model_CH_06.py
rename to Model Archive/Model_CH_06.py
diff --git a/Model_CH_07.py b/Model Archive/Model_CH_07.py
similarity index 100%
rename from Model_CH_07.py
rename to Model Archive/Model_CH_07.py
diff --git a/Model_CH_08.py b/Model Archive/Model_CH_08.py
similarity index 100%
rename from Model_CH_08.py
rename to Model Archive/Model_CH_08.py
diff --git a/Model_CH_09.py b/Model Archive/Model_CH_09.py
similarity index 100%
rename from Model_CH_09.py
rename to Model Archive/Model_CH_09.py
diff --git a/Model_CH_10.py b/Model Archive/Model_CH_10.py
similarity index 100%
rename from Model_CH_10.py
rename to Model Archive/Model_CH_10.py
diff --git a/Model_CH_11.py b/Model Archive/Model_CH_11.py
similarity index 100%
rename from Model_CH_11.py
rename to Model Archive/Model_CH_11.py
diff --git a/Model_CH_12.py b/Model Archive/Model_CH_12.py
similarity index 100%
rename from Model_CH_12.py
rename to Model Archive/Model_CH_12.py
diff --git a/Model_CH_13.py b/Model Archive/Model_CH_13.py
similarity index 100%
rename from Model_CH_13.py
rename to Model Archive/Model_CH_13.py
diff --git a/Archive/Model_KS_02.py b/Model Archive/Model_KS_02.py
similarity index 100%
rename from Archive/Model_KS_02.py
rename to Model Archive/Model_KS_02.py
diff --git a/Archive/Model_KS_03.py b/Model Archive/Model_KS_03.py
similarity index 100%
rename from Archive/Model_KS_03.py
rename to Model Archive/Model_KS_03.py
diff --git a/Model_Final_Binary.py b/Model_Final_Binary.py
new file mode 100644
index 0000000000000000000000000000000000000000..a03c7f7236a4edd207238adaa423cb20c83e166c
--- /dev/null
+++ b/Model_Final_Binary.py
@@ -0,0 +1,242 @@
+###################Dependency Imports#################
+import tensorflow as tf
+import larq as lq
+import numpy as np
+import os
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plot
+from sklearn.metrics import confusion_matrix
+import time
+from pathlib import Path
+import datetime
+
+#################OUR IMPORTS##################
+import PreProcessing
+import ModelFunctions
+
+##Sharath's Imports
+import metrics
+import utils
+
+class_dict_classToNum = {
+            'brakes squeaking': 0,
+            'car': 1,
+            'children': 2,
+            'large vehicle': 3,
+            'people speaking': 4,
+            'people walking': 5
+        }
+class_dict_numToClass = dict(map(reversed, class_dict_classToNum.items()))
+
+class model:
+    def __init__(self):
+        self.model = tf.keras.models.Sequential()
+        return
+
+    def Create_FP_Model(self, X_train, Y_train):
+        # CNN model definition
+        cnn_nb_filt = 128  # CNN filter size
+        cnn_pool_size = [2, 2]  # Maxpooling across frequency. Length of cnn_pool_size =  number of CNN layers
+        fp_cnn_pool_size = 5
+        fc_nb = [32]  # Number of FC nodes.  Length of fc_nb =  number of FC layers
+        dropout_rate = 0.2  # Dropout after each layer
+
+        kwargs = dict(
+            input_quantizer="ste_sign",
+            kernel_quantizer="ste_sign",
+            kernel_constraint="weight_clip")
+
+        self.model.add(tf.keras.layers.Conv2D(cnn_nb_filt, (3, 3), input_shape=(X_train.shape[1], X_train.shape[2], 1), padding="same"))
+        self.model.add(tf.keras.layers.Activation("relu"))  # change the order of activation and batch normalization according to "https://docs.larq.dev/compute-engine/end_to_end/"
+        self.model.add(tf.keras.layers.BatchNormalization())
+        self.model.add(tf.keras.layers.MaxPooling2D((1, fp_cnn_pool_size)))
+
+        for _i, _cnt in enumerate(cnn_pool_size):
+            self.model.add(lq.layers.QuantConv2D(cnn_nb_filt, (3, 3), padding="same", **kwargs))
+            self.model.add(tf.keras.layers.Activation("relu"))  # change the order of activation and batch normalization according to "https://docs.larq.dev/compute-engine/end_to_end/"
+            self.model.add(tf.keras.layers.BatchNormalization())
+            self.model.add(tf.keras.layers.MaxPooling2D((1, cnn_pool_size[_i])))
+        self.model.add(tf.keras.layers.Reshape(
+            (X_train.shape[-2], int(cnn_nb_filt * (X_train.shape[-1] / (fp_cnn_pool_size * np.prod(cnn_pool_size)))))))
+
+        for _f in fc_nb:
+            self.model.add(tf.keras.layers.TimeDistributed(lq.layers.QuantDense(_f, **kwargs)))
+            self.model.add(tf.keras.layers.Activation("relu"))  # added relu activation function
+            self.model.add(tf.keras.layers.BatchNormalization())
+            self.model.add(tf.keras.layers.Dropout(dropout_rate))
+
+        self.model.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(Y_train.shape[2])))
+        self.model.add(tf.keras.layers.Activation("sigmoid", name="strong_out"))
+        case_optimizer = lq.optimizers.CaseOptimizer(
+            (
+                lq.optimizers.Bop.is_binary_variable,  # predicate
+                lq.optimizers.Bop(threshold=1e-5, gamma=1e-6),  # optimizer
+            ),
+            default_optimizer=tf.keras.optimizers.Adam(learning_rate=0.001, decay=0.001, name='Adam'),
+        )  # Case Bop optimizer
+        self.model.compile(optimizer=case_optimizer, loss='binary_crossentropy')
+
+    def Summary(self):
+        lq.models.summary(self.model)
+
+def plot_functions(_nb_epoch, _tr_loss, _val_loss, _f1, _er, extension=''):
+    plot.figure()
+
+    plot.subplot(211)
+    plot.plot(range(_nb_epoch), _tr_loss, label='train loss')
+    plot.plot(range(_nb_epoch), _val_loss, label='val loss')
+    plot.legend()
+    plot.grid(True)
+
+    plot.subplot(212)
+    plot.plot(range(_nb_epoch), _f1, label='f')
+    plot.plot(range(_nb_epoch), _er, label='er')
+    plot.legend()
+    plot.grid(True)
+
+    plot.savefig(__models_dir + __fig_name + extension)
+    plot.close()
+    print('figure name : {}'.format(__fig_name))
+
+if __name__ == '__main__':
+    #set up some classes
+    preprocess = PreProcessing.npz_preprocessing()
+    our_model = model()
+    model_filepath = ""
+
+    avg_er = list()
+    avg_f1 = list()
+    for fold in [1, 2, 3, 4]:
+        # COMMENT AND UNCOMMENT TO RUN LOCALLY:
+        # IRIDIS
+        #train_data_path = "/mainfs/cdt/TUT-sound-events-2017-modified/processed/train_fold_{}_data.npz".format(fold)
+        #test_data_path = "/mainfs/cdt/TUT-sound-events-2017-modified/processed/test_fold_{}_data.npz".format(fold)
+        #file_name = os.path.splitext(__file__)[0]
+        #__models_dir = '/mainfs/cdt/models/' + file_name + "/"
+
+        #LOCAL - For MAC
+        #train_data_path = "/Users/charles/Documents/MINDS/Year1/6003_Project/datasets_processed/TUT-2016/train_fold_{}_data.npz".format(fold)
+        #test_data_path = "/Users/charles/Documents/MINDS/Year1/6003_Project/datasets_processed/TUT-2016/test_fold_{}_data.npz".format(fold)
+        #file_name = os.path.splitext(os.path.basename(__file__))[0]
+        #__models_dir = '/Users/charles/Documents/MINDS/Year1/6003_Project/local_testing/models/' + file_name + "/"
+
+        # LOCAL - For Linux
+        train_data_path = "/home/charles/Documents/MINDS/Year1/Interdisciplinary_Project_6003/datasets/TUT-sound-events-2017-modified/processed/train_fold_{}_data.npz".format(fold)
+        test_data_path = "/home/charles/Documents/MINDS/Year1/Interdisciplinary_Project_6003/datasets/TUT-sound-events-2017-modified/processed/test_fold_{}_data.npz".format(fold)
+        file_name = os.path.splitext(os.path.basename(__file__))[0]
+        __models_dir = '/home/charles/Documents/MINDS/Year1/Interdisciplinary_Project_6003/PyProjects/localtesting/models/' + file_name + "/"
+
+        print("########FOLD: {}".format(fold))
+
+        batch_size = 128  # Decrease this if you want to run on smaller GPU's
+        seq_len = 256  # Frame sequence length. Input to the CRNN.
+        nb_epoch = 500  # Training epochs
+        patience = int(0.25 * nb_epoch)  # Patience for early stopping
+        Path(__models_dir).mkdir(parents=True, exist_ok=True)
+
+        X_train_orig, Y_train = preprocess.load_from_npz(train_data_path)
+        X_train_orig, Y_train = preprocess.split_into_batches(X_train_orig, Y_train, seq_len)
+
+        X_test_orig, Y_test = preprocess.load_from_npz(test_data_path)
+        X_test_orig, Y_test = preprocess.split_into_batches(X_test_orig, Y_test, seq_len)
+
+        print("Creating New Model...")
+        our_model = model()
+        our_model.Create_FP_Model(X_train_orig, Y_train)
+        our_model.Summary()
+
+        #get the data ready for the network by adding another dimension for the feature maps
+        X_train = X_train_orig.reshape((X_train_orig.shape[0], X_train_orig.shape[1], X_train_orig.shape[2], 1))
+        X_test = X_test_orig.reshape((X_test_orig.shape[0], X_test_orig.shape[1], X_test_orig.shape[2], 1))
+
+        #------------------------------------
+        #the next bit is just copy and paste from Sharath SED
+        #------------------------------------
+        # Number of frames in 1 second, required to calculate F and ER for 1 sec segments.
+        # Make sure the nfft and sr are the same as in feature.py
+        sr = 44100
+        nfft = 2048
+        frames_1_sec = int(sr / (nfft / 2.0))
+
+        __fig_name = time.strftime("%m_%d_%H_%M_%S")
+        file_name = os.path.splitext(__file__)[0]
+        # file_name = os.path.splitext(os.path.basename(__file__))[0]
+
+        # TensorBoard Vars
+        log_dir = __models_dir + "TensorLogs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
+        file_writer = tf.summary.create_file_writer(log_dir + "/metrics")
+        file_writer.set_as_default()
+        tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
+                                                              histogram_freq=0,
+                                                              write_graph=True,
+                                                              write_images=True,
+                                                              update_freq='epoch',
+                                                              profile_batch=2,
+                                                              embeddings_freq=1)
+
+        # Training
+        best_epoch, pat_cnt, best_er, f1_for_best_er, best_conf_mat = 0, 0, 99999, None, None
+        tr_loss, val_loss, f1_overall_1sec_list, er_overall_1sec_list = [0] * nb_epoch, [0] * nb_epoch, [0] * nb_epoch, [0] * nb_epoch
+        posterior_thresh = 0.5
+        for i in range(nb_epoch):
+            print('Epoch : {} '.format(i), end='')
+            hist = our_model.model.fit(
+                X_train, Y_train,
+                batch_size=batch_size,
+                validation_data=(X_test, Y_test),
+                epochs=1,
+                verbose=2,
+                callbacks=[tensorboard_callback]
+            )
+
+            val_loss[i] = hist.history.get('val_loss')[-1]
+            tr_loss[i] = hist.history.get('loss')[-1]
+
+            # Calculate the predictions on test data, in order to calculate ER and F scores
+            pred = our_model.model.predict(X_test)
+            pred_thresh = pred > posterior_thresh
+            score_list = metrics.compute_scores(pred_thresh, Y_test, frames_in_1_sec=frames_1_sec)
+
+            f1_overall_1sec_list[i] = score_list['f1_overall_1sec']
+            er_overall_1sec_list[i] = score_list['er_overall_1sec']
+            pat_cnt = pat_cnt + 1
+
+            # Calculate confusion matrix
+            test_pred_cnt = np.sum(pred_thresh, 2)
+            Y_test_cnt = np.sum(Y_test, 2)
+            conf_mat = confusion_matrix(Y_test_cnt.reshape(-1), test_pred_cnt.reshape(-1))
+            conf_mat = conf_mat / (utils.eps + np.sum(conf_mat, 1)[:, None].astype('float'))
+
+            if er_overall_1sec_list[i] < best_er:
+                best_conf_mat = conf_mat
+                best_er = er_overall_1sec_list[i]
+                f1_for_best_er = f1_overall_1sec_list[i]
+                our_model.model.save(os.path.join(__models_dir, '{}__{}.tf'.format(file_name, __fig_name)), save_format='tf')
+                model_filepath = __models_dir + '{}__{}.tf'.format(file_name, __fig_name)
+                best_epoch = i
+                pat_cnt = 0
+
+            print('tr Er : {}, val Er : {}, F1_overall : {}, ER_overall : {} Best ER : {}, best_epoch: {}'.format(
+                tr_loss[i], val_loss[i], f1_overall_1sec_list[i], er_overall_1sec_list[i], best_er, best_epoch))
+            plot_functions(nb_epoch, tr_loss, val_loss, f1_overall_1sec_list, er_overall_1sec_list)
+            with file_writer.as_default():
+                tf.summary.scalar('f1', f1_overall_1sec_list[i], step=i)
+                tf.summary.scalar('er', er_overall_1sec_list[i], step=i)
+                tf.summary.scalar('validation_loss', val_loss[i], step=i)
+                tf.summary.scalar('test_loss', tr_loss[i], step=i)
+        avg_er.append(best_er)
+        avg_f1.append(f1_for_best_er)
+        print('saved model for the best_epoch: {} with best_f1: {} f1_for_best_er: {}'.format(
+            best_epoch, best_er, f1_for_best_er))
+        print('best_conf_mat: {}'.format(best_conf_mat))
+        print('best_conf_mat_diag: {}'.format(np.diag(best_conf_mat)))
+
+        #lets make some graphs
+        fold_label_dir = __models_dir + "/{}/".format(fold)
+        Path(fold_label_dir).mkdir(parents=True, exist_ok=True)
+        ModelFunctions.Generate_Model_Graphs(X_test, model_filepath, fold_label_dir)
+        ModelFunctions.Generate_Ground_Truth_Graphs(Y_test, fold_label_dir)
+
+    print('\n\nMETRICS FOR ALL FOUR FOLDS: avg_er: {}, avg_f1: {}'.format(avg_er, avg_f1))
+    print('MODEL AVERAGE OVER FOUR FOLDS: avg_er: {}, avg_f1: {}'.format(np.mean(avg_er), np.mean(avg_f1)))
\ No newline at end of file
diff --git a/Model_Final_FullPrecision.py b/Model_Final_FullPrecision.py
new file mode 100644
index 0000000000000000000000000000000000000000..551a123cb161d64da2c0d014cef90d749597b27a
--- /dev/null
+++ b/Model_Final_FullPrecision.py
@@ -0,0 +1,230 @@
+###################Dependency Imports#################
+import tensorflow as tf
+import larq as lq
+import numpy as np
+import os
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plot
+from sklearn.metrics import confusion_matrix
+import time
+from pathlib import Path
+import datetime
+
+#################OUR IMPORTS##################
+import PreProcessing
+import ModelFunctions
+
+##Sharath's Imports
+import metrics
+import utils
+
+class_dict_classToNum = {
+            'brakes squeaking': 0,
+            'car': 1,
+            'children': 2,
+            'large vehicle': 3,
+            'people speaking': 4,
+            'people walking': 5
+        }
+class_dict_numToClass = dict(map(reversed, class_dict_classToNum.items()))
+
+class model:
+    def __init__(self):
+        self.model = tf.keras.models.Sequential()
+        return
+
+    def Create_FP_Model(self, X_train, Y_train):
+        # CRNN model definition
+        cnn_nb_filt = 128  # CNN filter size
+        cnn_pool_size = [5, 2, 2]  # Maxpooling across frequency. Length of cnn_pool_size =  number of CNN layers
+        cnn_kernel_size = [5, 3, 3]
+        fc_nb = [32]  # Number of FC nodes.  Length of fc_nb =  number of FC layers
+        dropout_rate = 0.5  # Dropout after each layer
+
+        #decreased the dropout rate to 0.2 from 0.5
+        for _i, _cnt in enumerate(cnn_pool_size):
+            self.model.add(tf.keras.layers.Conv2D(cnn_nb_filt, cnn_kernel_size[_i], input_shape=(X_train.shape[1], X_train.shape[2], 1), padding="same")) #tried this with "valid" and it came up with an error :(, also changed the kernel size for the first layer
+            self.model.add(tf.keras.layers.BatchNormalization())
+            self.model.add(tf.keras.layers.Activation("relu"))
+            self.model.add(tf.keras.layers.MaxPooling2D((1, cnn_pool_size[_i])))
+        self.model.add(tf.keras.layers.Reshape((X_train.shape[-2], int(cnn_nb_filt * (X_train.shape[-1] / np.prod(cnn_pool_size))))))
+
+        for _f in fc_nb:
+            self.model.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(_f)))
+            self.model.add(tf.keras.layers.BatchNormalization())
+            self.model.add(tf.keras.layers.Activation("relu"))  #added relu activation function
+            self.model.add(tf.keras.layers.Dropout(dropout_rate))
+
+        # Custom Adam Optimiser
+        opt = tf.keras.optimizers.Adam(learning_rate=0.001, decay=0.001, name='Adam')
+
+        self.model.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(Y_train.shape[2])))
+        self.model.add(tf.keras.layers.Activation("sigmoid", name="strong_out"))
+        self.model.compile(optimizer=opt, loss='binary_crossentropy')
+
+    def Summary(self):
+        lq.models.summary(self.model)
+
+def plot_functions(_nb_epoch, _tr_loss, _val_loss, _f1, _er, extension=''):
+    plot.figure()
+
+    plot.subplot(211)
+    plot.plot(range(_nb_epoch), _tr_loss, label='train loss')
+    plot.plot(range(_nb_epoch), _val_loss, label='val loss')
+    plot.legend()
+    plot.grid(True)
+
+    plot.subplot(212)
+    plot.plot(range(_nb_epoch), _f1, label='f')
+    plot.plot(range(_nb_epoch), _er, label='er')
+    plot.legend()
+    plot.grid(True)
+
+    plot.savefig(__models_dir + __fig_name + extension)
+    plot.close()
+    print('figure name : {}'.format(__fig_name))
+
+if __name__ == '__main__':
+    #set up some classes
+    preprocess = PreProcessing.npz_preprocessing()
+    our_model = model()
+    model_filepath = ""
+
+    avg_er = list()
+    avg_f1 = list()
+    for fold in [1, 2, 3, 4]:
+        # COMMENT AND UNCOMMENT TO RUN LOCALLY:
+        # IRIDIS
+        #train_data_path = "/mainfs/cdt/TUT-sound-events-2017-modified/processed/train_fold_{}_data.npz".format(fold)
+        #test_data_path = "/mainfs/cdt/TUT-sound-events-2017-modified/processed/test_fold_{}_data.npz".format(fold)
+        #file_name = os.path.splitext(__file__)[0]
+        #__models_dir = '/mainfs/cdt/models/' + file_name + "/"
+
+        #LOCAL - For MAC
+        #train_data_path = "/Users/charles/Documents/MINDS/Year1/6003_Project/datasets_processed/TUT-2016/train_fold_{}_data.npz".format(fold)
+        #test_data_path = "/Users/charles/Documents/MINDS/Year1/6003_Project/datasets_processed/TUT-2016/test_fold_{}_data.npz".format(fold)
+        #file_name = os.path.splitext(os.path.basename(__file__))[0]
+        #__models_dir = '/Users/charles/Documents/MINDS/Year1/6003_Project/local_testing/models/' + file_name + "/"
+
+        # LOCAL - For Linux
+        train_data_path = "/home/charles/Documents/MINDS/Year1/Interdisciplinary_Project_6003/datasets/TUT-sound-events-2017-modified/processed/train_fold_{}_data.npz".format(fold)
+        test_data_path = "/home/charles/Documents/MINDS/Year1/Interdisciplinary_Project_6003/datasets/TUT-sound-events-2017-modified/processed/test_fold_{}_data.npz".format(fold)
+        file_name = os.path.splitext(os.path.basename(__file__))[0]
+        __models_dir = '/home/charles/Documents/MINDS/Year1/Interdisciplinary_Project_6003/PyProjects/localtesting/models/' + file_name + "/"
+
+        print("########FOLD: {}".format(fold))
+
+        batch_size = 128  # Decrease this if you want to run on smaller GPU's
+        seq_len = 256  # Frame sequence length. Input to the CRNN.
+        nb_epoch = 500  # Training epochs
+        patience = int(0.25 * nb_epoch)  # Patience for early stopping
+        Path(__models_dir).mkdir(parents=True, exist_ok=True)
+
+        X_train_orig, Y_train = preprocess.load_from_npz(train_data_path)
+        X_train_orig, Y_train = preprocess.split_into_batches(X_train_orig, Y_train, seq_len)
+
+        X_test_orig, Y_test = preprocess.load_from_npz(test_data_path)
+        X_test_orig, Y_test = preprocess.split_into_batches(X_test_orig, Y_test, seq_len)
+
+        print("Creating New Model...")
+        our_model = model()
+        our_model.Create_FP_Model(X_train_orig, Y_train)
+        our_model.Summary()
+
+        #get the data ready for the network by adding another dimension for the feature maps
+        X_train = X_train_orig.reshape((X_train_orig.shape[0], X_train_orig.shape[1], X_train_orig.shape[2], 1))
+        X_test = X_test_orig.reshape((X_test_orig.shape[0], X_test_orig.shape[1], X_test_orig.shape[2], 1))
+
+        #------------------------------------
+        #the next bit is just copy and paste from Sharath SED
+        #------------------------------------
+        # Number of frames in 1 second, required to calculate F and ER for 1 sec segments.
+        # Make sure the nfft and sr are the same as in feature.py
+        sr = 44100
+        nfft = 2048
+        frames_1_sec = int(sr / (nfft / 2.0))
+
+        __fig_name = time.strftime("%m_%d_%H_%M_%S")
+        file_name = os.path.splitext(__file__)[0]
+        # file_name = os.path.splitext(os.path.basename(__file__))[0]
+
+        # TensorBoard Vars
+        log_dir = __models_dir + "TensorLogs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
+        file_writer = tf.summary.create_file_writer(log_dir + "/metrics")
+        file_writer.set_as_default()
+        tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
+                                                              histogram_freq=0,
+                                                              write_graph=True,
+                                                              write_images=True,
+                                                              update_freq='epoch',
+                                                              profile_batch=2,
+                                                              embeddings_freq=1)
+
+        # Training
+        best_epoch, pat_cnt, best_er, f1_for_best_er, best_conf_mat = 0, 0, 99999, None, None
+        tr_loss, val_loss, f1_overall_1sec_list, er_overall_1sec_list = [0] * nb_epoch, [0] * nb_epoch, [0] * nb_epoch, [0] * nb_epoch
+        posterior_thresh = 0.5
+        for i in range(nb_epoch):
+            print('Epoch : {} '.format(i), end='')
+            hist = our_model.model.fit(
+                X_train, Y_train,
+                batch_size=batch_size,
+                validation_data=(X_test, Y_test),
+                epochs=1,
+                verbose=2,
+                callbacks=[tensorboard_callback]
+            )
+
+            val_loss[i] = hist.history.get('val_loss')[-1]
+            tr_loss[i] = hist.history.get('loss')[-1]
+
+            # Calculate the predictions on test data, in order to calculate ER and F scores
+            pred = our_model.model.predict(X_test)
+            pred_thresh = pred > posterior_thresh
+            score_list = metrics.compute_scores(pred_thresh, Y_test, frames_in_1_sec=frames_1_sec)
+
+            f1_overall_1sec_list[i] = score_list['f1_overall_1sec']
+            er_overall_1sec_list[i] = score_list['er_overall_1sec']
+            pat_cnt = pat_cnt + 1
+
+            # Calculate confusion matrix
+            test_pred_cnt = np.sum(pred_thresh, 2)
+            Y_test_cnt = np.sum(Y_test, 2)
+            conf_mat = confusion_matrix(Y_test_cnt.reshape(-1), test_pred_cnt.reshape(-1))
+            conf_mat = conf_mat / (utils.eps + np.sum(conf_mat, 1)[:, None].astype('float'))
+
+            if er_overall_1sec_list[i] < best_er:
+                best_conf_mat = conf_mat
+                best_er = er_overall_1sec_list[i]
+                f1_for_best_er = f1_overall_1sec_list[i]
+                our_model.model.save(os.path.join(__models_dir, '{}__{}.tf'.format(file_name, __fig_name)), save_format='tf')
+                model_filepath = __models_dir + '{}__{}.tf'.format(file_name, __fig_name)
+                best_epoch = i
+                pat_cnt = 0
+
+            print('tr Er : {}, val Er : {}, F1_overall : {}, ER_overall : {} Best ER : {}, best_epoch: {}'.format(
+                tr_loss[i], val_loss[i], f1_overall_1sec_list[i], er_overall_1sec_list[i], best_er, best_epoch))
+            plot_functions(nb_epoch, tr_loss, val_loss, f1_overall_1sec_list, er_overall_1sec_list)
+            with file_writer.as_default():
+                tf.summary.scalar('f1', f1_overall_1sec_list[i], step=i)
+                tf.summary.scalar('er', er_overall_1sec_list[i], step=i)
+                tf.summary.scalar('validation_loss', val_loss[i], step=i)
+                tf.summary.scalar('test_loss', tr_loss[i], step=i)
+            if pat_cnt > patience:
+                break
+        avg_er.append(best_er)
+        avg_f1.append(f1_for_best_er)
+        print('saved model for the best_epoch: {} with best_f1: {} f1_for_best_er: {}'.format(
+            best_epoch, best_er, f1_for_best_er))
+        print('best_conf_mat: {}'.format(best_conf_mat))
+        print('best_conf_mat_diag: {}'.format(np.diag(best_conf_mat)))
+
+        #lets make some graphs
+        fold_label_dir = __models_dir + "/{}/".format(fold)
+        Path(fold_label_dir).mkdir(parents=True, exist_ok=True)
+        ModelFunctions.Generate_Model_Graphs(X_test, model_filepath, fold_label_dir)
+        ModelFunctions.Generate_Ground_Truth_Graphs(Y_test, fold_label_dir)
+
+    print('\n\nMETRICS FOR ALL FOUR FOLDS: avg_er: {}, avg_f1: {}'.format(avg_er, avg_f1))
+    print('MODEL AVERAGE OVER FOUR FOLDS: avg_er: {}, avg_f1: {}'.format(np.mean(avg_er), np.mean(avg_f1)))
\ No newline at end of file
diff --git a/run_on_iridis.sh b/Scripts/run_on_iridis.sh
similarity index 100%
rename from run_on_iridis.sh
rename to Scripts/run_on_iridis.sh
diff --git a/run_on_iridis_gpu.sh b/Scripts/run_on_iridis_gpu.sh
similarity index 100%
rename from run_on_iridis_gpu.sh
rename to Scripts/run_on_iridis_gpu.sh