##Def: def approachcircle (r,t,z): move = robot.Pose()*transl(r,t,z) robot.MoveL(move) def approacharea (z): move = robot.Pose()*transl(0,0,z) robot.MoveL(move) def MountingHoles(img,thresh,r): minR = r CannyHighT = 50 min_points = 15 #param2 img_1= cv.cvtColor(img,cv.COLOR_BGR2GRAY) #img3 = cv2.inRange(img_1, thresh, 255) circles = cv.HoughCircles(img_1,cv.HOUGH_GRADIENT, 1, 2*minR, param1=CannyHighT, param2=min_points, minRadius=minR, maxRadius=220) return circles def takeimage(cam_item): bytes_img = RDK.Cam2D_Snapshot("", cam_item) if bytes_img == b'': raise # Image from RoboDK are BGR, uchar, (h,w,3) nparr = np.frombuffer(bytes_img, np.uint8) img = cv.imdecode(nparr, cv.IMREAD_UNCHANGED) if img is None or img.shape == (): raise return img #Installation from robolink import * # RoboDK API from robodk import * # Robot toolbox RDK = Robolink() pose = eye() ITEM_TYPE_ROBOT RDK = robolink.Robolink() robot = RDK.Item('TM12X') import_install('cv2', 'opencv-python') import cv2 as cv import numpy as np import numpy #---------------------------------- # Settings PROCESS_COUNT = -1 # How many frames to process before exiting. -1 means indefinitely. CAM_NAME = "Camera" DISPLAY_SETTINGS = True WDW_NAME_PARAMS = 'RoboDK - Blob detection parameters' DISPLAY_RESULT = True WDW_NAME_RESULTS = 'RoboDK - Blob detections1' # Calculate absolute XYZ position clicked from the camera in absolute coordinates: cam_item = RDK.Item(CAM_NAME, ITEM_TYPE_CAMERA) if not cam_item.Valid(): raise Exception("Camera not found! %s" % CAM_NAME) cam_item.setParam('Open', 1) # Force the camera view to open #---------------------------------------------- # Create an resizable result window if DISPLAY_RESULT: cv.namedWindow(WDW_NAME_RESULTS) #, cv.WINDOW_NORMAL) #---------------------------------------------- # Start a camera feed. # If you are using a connected device, use cv.VideoCapture to get the livefeed. # https://docs.opencv.org/master/d8/dfe/classcv_1_1VideoCapture.html # # capture = cv.VideoCapture(0) # retval, image = capture.read() #---------------------------------------------- # Process camera frames count = 0 while count < PROCESS_COUNT or PROCESS_COUNT < 0: print("=============================================") print("Processing image %i" % count) count += 1 #---------------------------------------------- # Get the image from RoboDK img=takeimage(cam_item) #---------------------------------------------- # Detect blobs keypoints = MountingHoles(img,250,50) i = 0 #---------------------------------------------- # Do something with your detections! # Here's a few examples: # - Reject a part if the number of keypoints (screw holes) is different than 3 # - Calculate the world coordinate of each keypoints to move the robot #---------------------------------------------- # Display the detection to the user (reference image and camera image side by side, with detection results) if DISPLAY_RESULT: # Draw detected blobs and their id i = 0 for keypoint in keypoints[0,:]: cv.putText(img, str(i), (int(keypoint[0]), int(keypoint[1])), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA) cv.circle(img, (int(keypoint[0]), int(keypoint[1])), int(keypoint[2]), (0, 0, 255), 15) # i += 1 # Resize the image, so that it fits your screen img = cv.resize(img, (int(img.shape[1] * .75), int(img.shape[0] * .75)))# cv.imshow(WDW_NAME_RESULTS, img) key = cv.waitKey(500) if key == 27: break # User pressed ESC, exit if cv.getWindowProperty(WDW_NAME_RESULTS, cv.WND_PROP_VISIBLE) < 1: break # User killed the window, exit #-------------------------------------------------------------------------------------------- # Movement functions r=0 t=0 i=0 #approacharea(200) for keypoint in keypoints[0,:]: #print("id:%i coord=(%0.0f, %0.0f)" % (i, X, Y)) X= int(keypoint[0])-320 Y=int(keypoint[1])-240 r=int(keypoint[2]) print("id:%i coord=(%0.0f, %0.0f)" % (i, X, Y)) if X!= 0 or Y!=0 : r=X*0.1 t=Y*0.1 approachcircle(r,t,0) i+=1