diff --git a/webcam/ComputerVision.py b/webcam/ComputerVision.py new file mode 100644 index 0000000000000000000000000000000000000000..cf7f57a2e72f8f45139f8f752289151992bc4278 --- /dev/null +++ b/webcam/ComputerVision.py @@ -0,0 +1,176 @@ +# import the necessary packages +from collections import deque +from imutils.video import VideoStream +import numpy as np +import argparse +import cv2 +import imutils +import time +import keyboard +import sys + +sys.argv = [sys.argv[0], '--file', '/home/henry/testMovement'] + +def findBall(colourLower, colourUpper): + # construct a mask for the color "green", then perform + # a series of dilations and erosions to remove any small + # blobs left in the mask + mask = cv2.inRange(hsv, colourLower, colourUpper) + mask = cv2.erode(mask, None, iterations=2) + mask = cv2.dilate(mask, None, iterations=2) + # find contours in the mask and initialize the current + # (x, y) center of the ball + cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, + cv2.CHAIN_APPROX_SIMPLE) + cnts = imutils.grab_contours(cnts) + center = None + # only proceed if at least one contour was found + if len(cnts) > 0: + # find the largest contour in the mask, then use + # it to compute the minimum enclosing circle and + # centroid + c = max(cnts, key=cv2.contourArea) + ((x, y), radius) = cv2.minEnclosingCircle(c) + M = cv2.moments(c) + center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) + # only proceed if the radius meets a minimum size + if radius > 10: + # draw the circle and centroid on the frame, + # then update the list of tracked points + cv2.circle(frame, (int(x), int(y)), int(radius), + (0, 255, 255), 2) + cv2.circle(frame, center, 5, (0, 0, 255), -1) + return center + +#variable setup +mode = "passive" +dpress = 0 +ppress = 0 +linecolours = [] +pts = [] +# construct the argument parse and parse the arguments +ap = argparse.ArgumentParser() +ap.add_argument("-v", "--video", + help="path to the (optional) video file") +ap.add_argument("-b", "--buffer", type=int, default=64, + help="max buffer size") +ap.add_argument("-f", "--file", + help="path to (optional) movement file") +args = vars(ap.parse_args()) +# define the lower and upper boundaries of the "green" +# ball in the HSV color space, then initialize the +# list of tracked points +greenLower = (75, 86, 30) +greenUpper = (90, 255, 255) + +# if a video path was not supplied, grab the reference +# to the webcam +if not args.get("video", False): + vs = VideoStream(src=0).start() +# otherwise, grab a reference to the video file +else: + vs = cv2.VideoCapture(args["video"]) + +if args.get("file", False): + movementFile = open(args["file"]) + line = movementFile.readline() + if line: + while line: + xy = line.split(",") + pts.append((int(xy[0]),int(xy[1]))) + linecolours.append((0,0,255)) + line = movementFile.readline() + mode = "checking" + +# allow the camera or video file to warm up +time.sleep(2.0) +# keep looping +while True: + #some bad button code... i was tired this cane be re made + if keyboard.is_pressed('d'): + if dpress < 5: + dpress += 1 + elif dpress != 100: + if mode == "passive": + pts = [] + linecolours = [] + mode = "recording" + elif mode == "recording": + mode = "passive" + dpress = 100 + else: + dpress = 0 + if keyboard.is_pressed('p'): + if ppress < 5: + ppress += 1 + elif ppress != 100: + if mode == "recording": + mode = "checking" + elif mode == "checking": + mode = "passive" + pts = [] + linecolours = [] + ppress = 100 + else: + ppress = 0 + # grab the current frame + frame = vs.read() + # handle the frame from VideoCapture or VideoStream + frame = frame[1] if args.get("video", False) else frame + # if we are viewing a video and we did not grab a frame, + # then we have reached the end of the video + if frame is None: + break + # resize the frame, blur it, and convert it to the HSV + # color space + frame = cv2.flip(frame,1) + frame = imutils.resize(frame, width=600) + blurred = cv2.GaussianBlur(frame, (11, 11), 0) + hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) + + center = findBall(greenLower,greenUpper) + + if mode == "recording": + try: + #add the starting point + if len(pts) == 0: + pts.append(center) + linecolours.append((0,0,255)) + #if the controller has moved far enough add a new point at the centre + elif (abs(pts[-1][0] - center[0]) > 3) or (abs(pts[-1][1] - center[1]) > 3): + pts.append(center) + linecolours.append((0,0,255)) + except: + pass + if mode == "checking": + #check if the controller is near any of the points and colour in close ones if the previous point was coloured + for i in range(1, len(pts)): + if pts[i - 1] is None or pts[i] is None or center is None: + continue + if (abs(pts[i][0] - center[0]) < 20) and (abs(pts[i][1] - center[1]) < 20) and (linecolours[i-1] == (0,255,0) or i < 2): + linecolours[i] = (0,255,0) + # loop over the set of tracked points + for i in range(1, len(pts)): + # if either of the tracked points are None, ignore + # them + if pts[i - 1] is None or pts[i] is None: + continue + # otherwise, compute the thickness of the line and + # draw the connecting lines + cv2.line(frame, pts[i - 1], pts[i], linecolours[i], 5) + # show the frame to our screen + cv2.imshow("Frame", frame) + key = cv2.waitKey(1) & 0xFF + # if the 'q' key is pressed, stop the loop + if key == ord("q"): + break +# if we are not using a video file, stop the camera video stream +if not args.get("video", False): + vs.stop() +# otherwise, release the camera +else: + vs.release() +# close all windows +cv2.destroyAllWindows() + +