From f7712082e3e214b1dafa30b7b8f533d770289ae2 Mon Sep 17 00:00:00 2001
From: at2n19 <at2n19@soton.ac.uk>
Date: Fri, 13 Jan 2023 12:00:35 +0000
Subject: [PATCH] Update main.py

---
 main.py | 575 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 575 insertions(+)

diff --git a/main.py b/main.py
index 8b13789..0fed648 100644
--- a/main.py
+++ b/main.py
@@ -1 +1,576 @@
+# From Python
+# It requires OpenCV installed for Python
+import sys
+import cv2
+import os
+from sys import platform
+import argparse
+import time
+import math
+import numpy as np
+import tensorflow as tf
+import numpy as np
+from tensorflow import keras
+import scipy.spatial as spatial
+import serial
+from multiprocessing import Process, Manager, Queue, Event
+from multiprocessing.connection import Client
+import statistics
+import subprocess
 
+#TODO remove
+import keras
+from keras.models import Model
+from keras.layers import Dense, Activation, Dropout, Input
+from keras.models import model_from_json
+from keras.layers import Concatenate, Add
+from keras import backend as K
+
+
+############################TO BE DELETED ##########################################
+def multi():
+    input1 = Input(shape=(30,))
+
+    x = Dense(30)(input1)
+    x = Activation("tanh")(x)
+    x = Dropout(0.20)(x)
+    x = Dense(30)(x)
+    x = Activation("tanh")(x)
+    x = Dense(30)(x)
+    x = Activation("tanh")(x)
+    x = Dense(30)(x)
+    x = Activation("tanh")(x)
+    x = Dropout(0.20)(x)
+    x = Dense(30)(x)
+    x = Activation("tanh")(x)
+    x = Dense(15)(x)
+    x = Activation("tanh")(x)
+
+    x = Concatenate()([input1, x])
+    x = Dense(45)(x)
+    x = Activation("tanh")(x)
+    x = Dense(45)(x)
+    x = Activation("tanh")(x)
+    x = Dropout(0.10)(x)
+    x = Dense(30)(x)
+    x = Activation("tanh")(x)
+    x = Dense(15)(x)
+    x = Activation("tanh")(x)
+
+    x = Concatenate()([input1, x])
+    x = Dense(45)(x)
+    x = Activation("tanh")(x)
+    x = Dense(45)(x)
+    x = Activation("tanh")(x)
+    x = Dropout(0.10)(x)
+    x = Dense(30)(x)
+    x = Activation("tanh")(x)
+    x = Dense(15)(x)
+    x = Activation("tanh")(x)
+
+    x = Concatenate()([input1, x])
+    x = Dense(45)(x)
+    x = Activation("tanh")(x)
+    x = Dense(45)(x)
+    x = Activation("tanh")(x)
+    x = Dropout(0.10)(x)
+    x = Dense(30)(x)
+    x = Activation("tanh")(x)
+    x = Dense(15)(x)
+    x = Activation("tanh")(x)
+
+    x = Concatenate()([input1, x])
+    x = Dense(45)(x)
+    x = Activation("tanh")(x)
+    x = Dense(45)(x)
+    x = Activation("tanh")(x)
+    x = Dropout(0.10)(x)
+    x = Dense(30)(x)
+    x = Activation("tanh")(x)
+    x = Dense(15)(x)
+    final = Activation("tanh")(x)
+
+    model = Model(inputs=input1, outputs=final)
+    return model
+
+
+############################# GLOBAL VARIABLES ######################################
+command_port1 = 'COM6'
+data_port1 = 'COM7'
+command_port2 = None
+data_port2 = None
+command_port3 = None
+data_port3 = None
+
+##############################RADAR SESSION CLASS#####################################
+
+class RadarSession:
+
+    CONFIG_FILE = 'mmw_pplcount_demo_default.cfg'
+    TIMEOUT = 0.05  # TODO RESET TO 0.05
+    POINT_CLOUD_TLV = 6
+    TARGET_OBJECT_TLV = 7
+    TARGET_INDEX_TLV = 8
+    MAX_BUFFER_SIZE = 2 ** 15
+    TLV_HEADER_BYTES = 8
+    TARGET_LIST_TLV_BYTES = 68
+    POINT_CLOUD_TLV_POINT_BYTES = 16
+    MAGIC_WORD = [2, 1, 4, 3, 6, 5, 8, 7]
+    # word array to convert 4 bytes to a 32 bit number
+    WORD = [1, 2 ** 8, 2 ** 16, 2 ** 24]
+
+    def __init__(self, command_port, data_port):
+        self.CLIport, self.Dataport = self.serialConfig(command_port, data_port)
+
+    # Function to configure the serial ports and send the data from
+    # the configuration file to the radar
+    def serialConfig(self, command_port, data_port):
+        # Open the serial ports for the configuration and the data ports
+        CLIport = serial.Serial(command_port, 115200)
+        Dataport = serial.Serial(data_port, 921600, timeout=self.TIMEOUT)
+        # Read the configuration file and send it to the board
+        config = [line.rstrip('\r\n') for line in open(self.CONFIG_FILE)]
+        for i in config:
+            CLIport.write((i + '\n').encode())
+            print(i)
+            time.sleep(0.01)
+        return CLIport, Dataport
+
+    # Funtion to read and parse the incoming data
+    def readAndParseData(self):
+        readBuffer = self.Dataport.read(
+            size=self.MAX_BUFFER_SIZE - 1)  # TODO inside brackets it was Dataport.in_waiting
+        byteVec = np.frombuffer(readBuffer, dtype='uint8')
+        byteBufferLength = len(byteVec)
+        # Check for all possible locations of the magic word
+        possibleLocs = np.where(byteVec == self.MAGIC_WORD[0])[0]
+        # Confirm that it is the beginning of the magic word and store the index in startIdxes
+        startIdxes = []
+        for loc in possibleLocs:
+            check = byteVec[loc:loc + 8]
+            if np.all(np.array_equal(check, self.MAGIC_WORD)):
+                startIdxes.append(loc)
+        for startIdx in startIdxes:
+            # Read the total packet length
+            totalPacketLen = np.matmul(byteVec[(startIdx + 20):(startIdx + 20 + 4)], self.WORD)
+            # Check that all the packet has been read
+            if (startIdx + totalPacketLen) <= byteBufferLength:
+                # Initialize the pointer index
+                idX = startIdx + 48
+                numTLVs = np.matmul(byteVec[idX:idX + 2], self.WORD[0:2])
+                idX += 4
+                # If the number of TLV messages is at most 3
+                if 0 < numTLVs <= 3:
+                    # Read the TLV messages
+                    for tlvIdx in range(numTLVs):
+                        # Check the header of the TLV message
+                        tlv_type = np.matmul(byteVec[idX:idX + 4], self.WORD)
+                        idX += 4
+                        # Read the data depending on the TLV message
+                        if tlv_type == self.POINT_CLOUD_TLV:
+                            tlv_length = np.matmul(byteVec[idX:idX + 4], self.WORD)
+                            idX += (tlv_length - 4)
+                        # Read the data depending on the TLV message
+                        if tlv_type == self.TARGET_OBJECT_TLV:
+                            tlv_length = np.matmul(byteVec[idX:idX + 4], self.WORD)
+                            idX += 8  # +8 because i do not need the TID
+                            number_of_targets = int((tlv_length - self.TLV_HEADER_BYTES) / self.TARGET_LIST_TLV_BYTES)
+
+                            # Initialize array of 2D coordinates
+                            xy = np.zeros((number_of_targets, 2), dtype=np.float32)
+                            for target in range(number_of_targets):
+                                # Read the data for each target
+                                xy[target, 0] = byteVec[idX:idX + 4].view(dtype=np.float32)
+                                idX += 4
+                                xy[target, 1] = byteVec[idX:idX + 4].view(dtype=np.float32)
+                                idX += 64
+                            return xy
+        return None
+
+############################## UTILITY FUNCTIONS #####################################
+
+#This function opens a radar session at a specific command and data port, giving the radar session an id, and
+# continuously waits on an event to capture data from the radar. The output of the radr is fed into a queue.
+def radar_session(id, command_port, data_port, event, queue, *args):
+    try:
+        session = RadarSession(command_port, data_port)
+        while True:
+            event.wait()
+            xy = session.readAndParseData()
+            queue.put((id, xy))
+            event.clear()
+    except:
+        print("Radar " + id + "session cannot be started")
+
+
+#This function takes as input an array of 2d coordinates detected by the three radars under the same reference frame and
+# returns an array of the corresponding x angle and euclidean distance
+def from_2d_to_angles_euclid(coords):
+    # coords shape = (N,2) where N=detected people, 2=(x,z)
+    angles_euclid = np.zeros((coords.shape[0], 2), dtype=np.float64)
+    # angles_euclid shape = (N,2) where N=detected people, 2=(x-angle, euclidean distance)
+    angles_euclid[:, 1] = np.sqrt(np.sum(coords ** 2, axis=1))
+    angles_euclid[:, 0] = np.arccos(coords[:, 0] / angles_euclid[:, 1]) * np.sign(coords[:, 1])
+    return angles_euclid
+
+
+#This function takes as input the 2D coordinates of the people detected by the three radars and converts them into the
+#same reference frame, which is the reference frame of the first radar
+def under_same_reference(coords1, coords2, coords3):
+    # coords1 shape = (N,2) where N=people detected, 2=(x,y)
+    # coords2 shape = (T,2) where T=people detected, 2=(x,y)
+    # coords3 shape = (S,2) where S=people detected, 2=(x,y)
+    radian2, radian3 = np.radians(120), np.radians(-120)
+    coords2 = np.dot(np.asarray([[np.cos(radian2), -np.sin(radian2)], [np.sin(radian2), np.cos(radian2)]]), coords2.T).T
+    # coords2 shape = (T,2) where T=people detected, 2=(x,y)
+    coords3 = np.dot(np.asarray([[np.cos(radian3), -np.sin(radian3)], [np.sin(radian3), np.cos(radian3)]]), coords3.T).T
+    # coords3 shape = (S,2) where T=people detected, 2=(x,y)
+    output = np.concatenate((coords1, coords2, coords3), axis=0)
+    # output shape = (N+T+S, 2)
+    return output
+
+
+def from_image_space_to_angle(pixel_keypoints, width, height):
+    # pixel_keypoints shape = (N, 15, 2) where N=people detected, 15=human body keypoints, 2=(x,y)
+    pixel_keypoints[:,:,0] = (pixel_keypoints[:,:,0] - (width/2)) * ((2. * np.pi) / width)
+    pixel_keypoints[:,:,1] = ((height / 2.) - pixel_keypoints[:,:,1]) * (np.pi / height)
+    x_angles = pixel_keypoints[:,:,0]
+    # convert camera angles into angles relative to x-axis
+    pixel_keypoints[:,:,0] = np.where(((x_angles >= -np.pi/2) & (x_angles <= np.pi)),  np.pi/2 - x_angles,
+                                      -np.pi + (-x_angles - np.pi/2))
+    return pixel_keypoints
+
+
+# This function takes as input the x and y angles of each body keypoint relative to the camera, the euclidean distance
+# of the subject from the camera as measured by the radar in metric units and outputs real x and y coordinates + a
+# general depth z.
+def coordinates_from_angles(dataset, euclid_distance, camera_height):
+  # dataset shape = (N, 15, 2) where N=frames, 15=human body keypoints, 2=(x-angle,y-angle)
+  # euclidean_distance shape = (N,1) where N=frames
+  # camera_height = double
+  x = np.cos(dataset[:,:,0]) * euclid_distance
+  # x shape = (N,15) where N=frames, 15=human body keypoints
+  y = camera_height + np.tan(dataset[:,:,1]) * euclid_distance
+  # y shape = (N,15) where N=frames, 15=human body keypoints
+  z = np.sin(dataset[:,:,0]) * euclid_distance
+  # z shape = (N,15) where N=frames, 15=human body keypoints
+  output = np.concatenate((np.expand_dims(x,2), np.expand_dims(y,2), np.expand_dims(z,2)), axis = 2)
+  # output shape = (N, 15, 3) where N=frames, 15=human body keypoints, 3=(x,y,z)
+  return output
+
+
+# This function takes as input the x and y angles of each body keypoint relative to the camera, the angle and
+# euclidean distance of the subjects detected from the radar, which need to be converted in relation to the camera before
+# calling this function, and matches each subject detected from the camera to its euclidean distance measured by the radar
+def match_euclidean_distances_to_subjects(keypoint_angles, radar_data):
+    # keypoint_angles shape = (N, 15, 2) where N=people detected, 15=human body keypoints, 2=(x-angle,y-angle)
+    # radar_data shape = (N, 2) where N=people detected, 2=(x-angle, euclidean distance)
+    keypoint_mean_angles = np.nanmean(keypoint_angles, axis=1)
+    # keypoint_mean_angles shape = (N,2) where N=people detected, 2=(x-angle, y-angle)
+    keypoint_mean_angles = np.expand_dims(keypoint_mean_angles[:, 0], 1)
+    # keypoint_mean_angles shape = (N,1) where N=people detected
+    people_detected = keypoint_mean_angles.shape[0]
+    queries = radar_data[:, 0].tolist()
+    tree = spatial.KDTree(keypoint_mean_angles)
+    indexes = []
+    for query in queries:
+        for p in range(people_detected):
+            d, index = tree.query([query], k=p+1)
+            if isinstance(index, np.int32):
+                if index not in indexes:
+                    indexes.append(index)
+                    break
+            else:
+                if index[-1] not in indexes:
+                    indexes.append(index[-1])
+                    break
+    not_detected_indexes = [i for i in range(people_detected) if i not in indexes]
+    return keypoint_angles[indexes], np.expand_dims(radar_data[:, 1], axis=1), keypoint_angles[not_detected_indexes]
+
+
+# Function that normalises keypoints
+def normalize_keypoints(keypoints):
+    """
+    Calculate mean for x, y (and z if present), and standard deviation for x and y in given frame.
+    Subtract respective mean from each point, then divide by avg of std of x and y.
+    """
+    # keypoints shape = (N, 15, 3) or (N, 15, 2) where N = number of people detected, 15=depth estimates for each human skeleton keypoint
+    means = np.mean(keypoints, axis = 1)
+    # means shape = (N,3) or (N,2) where N = number of people detected
+    denoms = np.sum(np.std(keypoints[:,:,0:2], axis=1), axis=1, keepdims=True) / 2
+    # denoms shape = (N,1) where N = number of people detected
+    norm_keypoints = (keypoints - np.expand_dims(means, axis=1)) / np.expand_dims(denoms, axis=1)
+    # norm_keypoints shape = (N, 15, 3) where N = number of people detected, 15=depth estimates for each human skeleton keypoint
+    return norm_keypoints, means, denoms
+
+
+# Function that unnormalises depth
+def unnormalize_depth(norm_depths, average_depths, denoms):
+  # norm_depths shape = (N,15) where N = number of people detected, 15=depth estimates for each human skeleton keypoint
+  # average_depths shape = (N,1) where N = number of people detected
+  # denoms shape = (N,1) where N = number of people detected
+  return norm_depths * denoms + average_depths
+
+
+# Funtion that takes as input a numpy array of people 2d keypoints and their (general) depth and outputs their 3d poses.
+# The general depth is computed with the radar distance.
+# The coordinates provided in the keypoints array are in metric units.
+def threeDposes(keypoints, model):
+  # keypoints shape = (N,15,3) where N=number of people detected, 15=human skeleton keypoints, 3=(x,y,z)
+  norm_keypoints, means, denoms = normalize_keypoints(keypoints / 0.0564) # keypoints are first converted into inches units
+  input = norm_keypoints[:,:,0:2]
+  input = input.reshape(input.shape[0], input.shape[1] * input.shape[2])
+  norm_depths = model(input) #.predict() is just good for large batch execution
+  # norm_depths shape = (N,15) where N = number of people detected, 15=depth estimates for each human skeleton keypoint
+  unnorm_depths = unnormalize_depth(norm_depths, np.expand_dims(means[:,2], axis=1), denoms) * 0.0564 # convert into metric units
+  # unnorm_depths shape = (N, 15) where N = number of people detected, 15=depth estimates for each human skeleton keypoint
+  unnorm_depths = np.expand_dims(unnorm_depths, 2)
+  # unnorm_depths shape = (N, 15, 1) where N = number of people detected, 15=depth estimates for each human skeleton keypoint
+  output = np.concatenate((keypoints[:,:,0:2], unnorm_depths), axis = 2)
+  # output shape = (N, 15, 3) where N=number of people detected, 15=human skeleton keypoints, 3=(x,y,z)
+  return output
+
+def skeleton_poses(skeletons):
+  N, P, L = skeletons.shape
+  # skeletons shape = (N, 15, 3) where N=people, 15=human body keypoints, 3=(x,y,z)
+  skeletons = np.reshape(skeletons, newshape=(N*P, L))
+  # skeletons shape = (N*15, 3) where N=people, 15=human body keypoints, 3=(x,y,z)
+  lines = np.asarray([[0,1], [1,2], [2,3], [3,4], [1,5], [5,6], [6,7], [8,9], [9,10], [11,12], [12,13], [1,14], [8,14], [11,14]], dtype=int)
+  # lines shape = (14, 2)
+  lines = np.expand_dims(lines, axis=0)
+  # lines shape = (1, 14, 2)
+  lines = np.repeat(lines, repeats=N, axis=0)
+  # lines shape = (N, 14, 2)
+  summation = np.expand_dims(np.expand_dims(np.arange(0, 15*(N-1)+1, 15, dtype=int), axis=1), axis=1)
+  # summation shape = (N, 1, 1)
+  lines = lines + summation
+  # lines shape = (N, 14, 2)
+  lines = np.reshape(lines, (lines.shape[0]*lines.shape[1], lines.shape[2]))
+  # lines shape = (N*14, 2)
+  lines = o3d.utility.Vector2iVector(lines)
+  line_set = o3d.geometry.LineSet()
+  line_set.lines = lines
+  line_set.points = o3d.utility.Vector3dVector(skeletons)
+  return line_set
+
+
+def gui(queue, *args):
+    # CREATION OF THE NON-BLOCKING OPEN3D SCENE
+    vis = o3d.visualization.Visualizer()
+    vis.create_window()
+
+    line_set = o3d.geometry.LineSet()
+
+    mesh_box = o3d.geometry.TriangleMesh.create_box(width=40.0,
+                                                    height=0.1,
+                                                    depth=40.0)
+    mesh_box.paint_uniform_color([0.8, 0.8, 0.8])
+    mesh_box.translate(-mesh_box.get_center())
+    mesh_box.translate([0, -0.2, 0])
+
+    vis.add_geometry(line_set)
+    vis.add_geometry(mesh_box)
+
+    keyboard.send('alt', True, False)
+
+    ctr = vis.get_view_control()
+    parameters = o3d.io.read_pinhole_camera_parameters("ScreenCamera_2022-11-25-12-27-51.json")
+    ctr.convert_from_pinhole_camera_parameters(parameters)
+
+    axis = o3d.geometry.LineSet()
+    axis_points = o3d.utility.Vector3dVector(
+        np.asarray([[0, 0, 0], [0, 20, 0], [-20, 0, 0], [20, 0, 0], [0, 0, -20], [0, 0, 20]]))
+    axis_lines = o3d.utility.Vector2iVector(np.asarray([[0, 1], [2, 3], [4, 5]]))
+    axis.points = axis_points
+    axis.lines = axis_lines
+
+    vis.add_geometry(axis)
+
+    while (True):
+        if not queue.empty():
+            # vis.clear_geometries()
+            poses = queue.get()
+            # poses shape = (N, 15, 3) where N=people detected, 15=human body keypoints, 3=(x,y,z)
+            updated_line_set = skeleton_poses(poses)
+            line_set.lines = updated_line_set.lines
+            line_set.points = updated_line_set.points
+
+            vis.update_geometry(line_set)
+
+        if keyboard.is_pressed('q'):
+            vis.destroy_window()
+            keyboard.send('alt', False, True)
+
+        vis.poll_events()
+        vis.update_renderer()
+
+        print("FPS: ", 1.0 / (time.time() - start_time))  # TESTING
+
+
+######################################## MAIN THREAD ###################################################
+
+if __name__ == '__main__':
+
+    ############################### START GUI PROCESS #######################################
+    subprocess.Popen("C:/Users/at2n19/.conda/envs/visualiser/python.exe gui.py")
+    time.sleep(5)
+    address = ('localhost', 6000)
+    conn = Client(address, authkey=None)
+    #q_poses = Queue()
+    #gui_process = Process(target=gui, args=(q_poses,))
+    #gui_process.start()
+    ############################### START RADAR SESSIONS ####################################
+    radar_event = Event()
+    radar_queue = Queue()
+    radar_process1 = Process(target=radar_session, args=(1, command_port1, data_port1, radar_event, radar_queue,))
+    radar_process2 = Process(target=radar_session, args=(2, command_port2, data_port2, radar_event, radar_queue,))
+    radar_process3 = Process(target=radar_session, args=(3, command_port3, data_port3, radar_event, radar_queue,))
+    radar_process1.start()
+    radar_process2.start()
+    radar_process3.start()
+
+    ##################### NEURAL NETWORK MODEL INSTANTIATION ################################
+    
+    gpus = tf.config.experimental.list_physical_devices('GPU')
+    for gpu in gpus:
+        tf.config.experimental.set_memory_growth(gpu, True)
+
+    model = multi() # todo remove
+    model.load_weights("part_trained.hdf5") #TODO REMOVE
+
+    #model = keras.models.load_model("fully_trained_model.hdf5") # TO SET BACK
+
+    ################################### MAIN CODE #################################################
+    cap = None
+    try:
+        # Import Openpose (Windows/Ubuntu/OSX)
+        dir_path = os.path.dirname(os.path.realpath(__file__))
+        try:
+            # Windows Import
+            if platform == "win32":
+                # Change these variables to point to the correct folder (Release/x64 etc.)
+                sys.path.append(dir_path + '/../../python/openpose/Release')
+                os.environ['PATH']  = os.environ['PATH'] + ';' + dir_path + '/../../x64/Release;' +  dir_path + '/../../bin;'
+                import pyopenpose as op
+            else:
+                # Change these variables to point to the correct folder (Release/x64 etc.)
+                sys.path.append('../../python')
+                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
+                # sys.path.append('/usr/local/python')
+                from openpose import pyopenpose as op
+        except ImportError as e:
+            print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
+            raise e
+
+        # Flags
+        parser = argparse.ArgumentParser()
+        parser.add_argument("--image_dir", default="../../../examples/media/", help="Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).")
+        parser.add_argument("--no_display", default=False, help="Enable to disable the visual display.")
+        parser.add_argument("--camera_height", type=float, default=1.45, help="Set camera height")
+        args = parser.parse_known_args()
+
+        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
+        params = dict()
+        params["model_folder"] = "../../../models/"
+
+        # Add others in path?
+        for i in range(0, len(args[1])):
+            curr_item = args[1][i]
+            if i != len(args[1])-1: next_item = args[1][i+1]
+            else: next_item = "1"
+            if "--" in curr_item and "--" in next_item:
+                key = curr_item.replace('-','')
+                if key not in params:  params[key] = "1"
+            elif "--" in curr_item and "--" not in next_item:
+                key = curr_item.replace('-','')
+                if key not in params: params[key] = next_item
+
+        # Construct it from system arguments
+        # op.init_argv(args[1])
+        # oppython = op.OpenposePython()
+
+        cap = cv2.VideoCapture(1)
+
+        # Starting OpenPose
+        opWrapper = op.WrapperPython()
+        opWrapper.configure(params)
+        opWrapper.start()
+
+        frameId = 0 #todo delete then
+        time.sleep(10)
+        print("NOWWWWWWW")
+        print("NOWWWWWWWW")
+
+        while True:
+            # Capture video frame from camera
+            ret, frame = cap.read()
+            coords = np.asarray([[0., 2.]]) #TODO DELETE THEN
+            radar_data = from_2d_to_angles_euclid(coords) #TODO DELETE THEN
+            height, width, _ = frame.shape #TODO DELETE THEN
+            datum = op.Datum() #TODO DELETE THEN
+            datum.cvInputData = frame #TODO DELETE THEN
+            opWrapper.emplaceAndPop(op.VectorDatum([datum])) #TODO DELET THEN
+            cv2.imwrite("imageTrial.png", datum.cvOutputData)
+            # # Capture data from radars
+            # radar_event.set()
+            # xzs = []
+            # for i in range(3):
+            #     xzs.append(radar_queue.get())
+            # xzs.sort(key=lambda x: x[0])
+            # xzs = map(lambda x: x[1], xzs)
+            # coords = under_same_reference(xzs[0], xzs[1], xzs[2])
+            # radar_data = from_2d_to_angles_euclid(coords)
+            # height, width, _ = frame.shape
+            # datum = op.Datum()
+            # datum.cvInputData = frame
+            # opWrapper.emplaceAndPop(op.VectorDatum([datum]))
+            # cv2.imshow('frame', datum.cvOutputData)
+            # if cv2.waitKey(1) & 0xFF == ord('q'):
+            #     break
+            if datum.poseKeypoints is not None:
+                pixel_keypoints = np.round(datum.poseKeypoints[:, [0,1,5,6,7,2,3,4,12,13,14,9,10,11,8], :2]) # todo remove
+                # todo uncomment pixel_keypoints = np.round(datum.poseKeypoints[:,[0,1,2,3,4,5,6,7,9,10,11,12,13,14,8],:2]) # convert tp descrete pixel coordinates while still maintaining double type
+                # datum.poseKeypoints shape = (N, 25, 3) where N=people detected by camera, 25=human body keypoints, 3=(x,y,score)
+                # pixel_keypoints shape = (N, 15, 2) where N=people detected by camera, 15=human body keypoints, 2=(x,y)
+                # replace not detected keypoints (they have 0 pixel coordinates) with nan
+                pixel_keypoints[pixel_keypoints[:, :] == [0., 0.]] = np.nan
+                keypoints_angles = from_image_space_to_angle(pixel_keypoints, width, height)
+                # keypoints_angles shape = (N, 15, 2) where N=people detected by camera, 15=human body keypoints, 2=(x,y)
+                keypoints_angles, euclid_distance, not_matched_keypoints_angles = match_euclidean_distances_to_subjects(keypoints_angles, radar_data)
+                # keypoints_angles shape = (P, 15, 2) where P=people detected by camera and radar, 15=human body keypoints, 2=(x,y)
+                # eculid_distance shape = (P,1) where P=people detected by camera and radar, 15=human body keypoints, 2=(x,y)
+                # not_matched_keypoints_angles shape = (L, 15, 2) where P=people detected by camera and not radar, 15=human body keypoints, 2=(x,y)
+                estimated_3D_coordinates = coordinates_from_angles(keypoints_angles, euclid_distance, args[0].camera_height)
+                # estimated_3D_coordinates shape = (P, 15, 3) where P=people detected by camera and radar, 15=human body keypoints, 3=(x,y,z)
+                no_occlusion_keypoints_indexes = np.all(np.all(~np.isnan(estimated_3D_coordinates), axis=1), axis=1)
+                no_occlusion_estimated_3D_coordinates = estimated_3D_coordinates[no_occlusion_keypoints_indexes]
+                # no_occlusion_estimated_3D_coordinates shape = (T, 15, 3)  where T= not occluded people detected by camera and radar, 15=human body keypoints, 3=(x,y,z)
+                neural_net_3D_poses = threeDposes(no_occlusion_estimated_3D_coordinates, model)
+                # neural_net_3D_poses shape = (T, 15, 3)  where T= not occluded people detected by camera and radar, 15=human body keypoints, 3=(x,y,z)
+                occlusion_keypoints_indexes = ~ no_occlusion_keypoints_indexes
+                occlusion_estimated_3D_coordinates = estimated_3D_coordinates[occlusion_keypoints_indexes]
+                # average the result from the neural network and the estimated 3d poses
+                neural_net_3D_poses[:,:,2] = (neural_net_3D_poses[:,:,2] + no_occlusion_estimated_3D_coordinates[:,:,2])/2
+                # occlusion_estimated_3D_coordinates shape = (S, 15, 3)  where S = occluded people detected by camera and radar, 15=human body keypoints, 3=(x,y,z)
+                output_poses = np.concatenate((neural_net_3D_poses, occlusion_estimated_3D_coordinates), axis=0)
+                # output_poses shape = (P, 15, 3) where P=people detected by camera and radar, 15=human body keypoints, 3=(x,y,z)
+                # Add poses to the queue for the GUI
+                #np.save(("frame"+str(frameId)+".npy"), output_poses) #todo delete then
+                #cv2.imwrite(("frame"+str(frameId)+".png"), frame) #todo delete then
+                #cv2.imwrite(("frameDatum"+str(frameId)+".png"), datum.cvOutputData) #todo delete then
+                #frameId+=1 #todo delete then
+                #q_poses.put(output_poses) todo delete then
+                cv2.imshow('frame', frame)
+                if cv2.waitKey(1) & 0xFF == ord('q'):
+                    cap.release()
+                    conn.send('close')
+                    conn.close()
+                    break
+                conn.send(output_poses)
+
+    except Exception as e:
+        print(e)
+        cap.release()
+        conn.send('close')
+        conn.close()
+        sys.exit(-1)
-- 
GitLab