12-05-2019, 09:16 AM
Hi there,
I have the Python code below and it works perfectly if i run it without roboDK. If i import it to roboDK and run it from there, it returns me an error (attached image) because of the libraries which are not of roboDK. Do you know how can i solve this?
Code:
import cv2 # state of the art computer vision algorithms library
import numpy as np # fundamental package for scientific computing
import imageio
import matplotlib.pyplot as plt # 2D plotting library producing publication quality figures
import pyrealsense2 as rs
import win32gui
import win32api
import time
import pyautogui
from robolink import * # API to communicate with RoboDK
from robodk import * # basic matrix operations
# Any interaction with RoboDK must be done through
RL = Robolink()
points = rs.points()
pipe = rs.pipeline()
config = rs.config()
profile = pipe.start(config)
colorizer = rs.colorizer()
align = rs.align(rs.stream.color)
state_left = win32api.GetKeyState(0x01) # Left button up = 0 or 1. Button down = -127 or -128
device = profile.get_device() # type: rs.device
depth_sensor = device.first_depth_sensor() # type: rs.depth_sensor
if depth_sensor.supports(rs.option.depth_units):
depth_sensor.set_option(rs.option.depth_units,0.0001)
robot = RL.Item('UR5e')
frame = RL.Item('Reference Frame')
Target = RL.AddTarget('Target 2', frame)
try:
while True:
frames = pipe.wait_for_frames()
frames = align.process(frames)
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame: continue
#print(width,height)
depth = np.asanyarray(colorizer.colorize(depth_frame).get_data())
color = np.asanyarray(color_frame.get_data())
images = np.hstack((color, depth))
#cv2.namedWindow('RGB_View', cv2.WINDOW_NORMAL)
#cv2.setWindowProperty('RGB_View', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('RGB_View', color)
depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
#flags, hcursor, (x,y) = win32gui.GetCursorInfo()
x, y = win32api.GetCursorPos()
#print(x, y)
pixel_coord = [x, y]
keypressed = win32api.GetKeyState(0x01)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
#Calculate distance
if keypressed!=state_left:
state_left = keypressed
print(keypressed)
if keypressed < 0:
if (0<x<632 and 32<y<512):
dist_to_center = depth_frame.get_distance(x,y)
dist_to_center = round(dist_to_center, 4)
depth_point = rs.rs2_deproject_pixel_to_point(depth_intrin, pixel_coord, dist_to_center)
depth_point = [i * 100 for i in depth_point]
depth_point = [round(i, 3) for i in depth_point]
#depth_point = ( ", ".join( repr(e) for e in depth_point))
with open('Coordinates.txt', 'w') as f:
f.write("%s\n" % depth_point)
f.close()
print(depth_point)
#print(depth_point[0:5], depth_point[7:12], depth_point[15:19])
print('The camera is facing an object:',dist_to_center,'meters away')
Target.setPose(Offset(eye(), depth_point[0], depth_point[1], depth_point[2], 0, 0, 0))
robot.MoveL(Target)
time.sleep(0.7)
finally:
pipe.stop()
I have the Python code below and it works perfectly if i run it without roboDK. If i import it to roboDK and run it from there, it returns me an error (attached image) because of the libraries which are not of roboDK. Do you know how can i solve this?
Code:
import cv2 # state of the art computer vision algorithms library
import numpy as np # fundamental package for scientific computing
import imageio
import matplotlib.pyplot as plt # 2D plotting library producing publication quality figures
import pyrealsense2 as rs
import win32gui
import win32api
import time
import pyautogui
from robolink import * # API to communicate with RoboDK
from robodk import * # basic matrix operations
# Any interaction with RoboDK must be done through
RL = Robolink()
points = rs.points()
pipe = rs.pipeline()
config = rs.config()
profile = pipe.start(config)
colorizer = rs.colorizer()
align = rs.align(rs.stream.color)
state_left = win32api.GetKeyState(0x01) # Left button up = 0 or 1. Button down = -127 or -128
device = profile.get_device() # type: rs.device
depth_sensor = device.first_depth_sensor() # type: rs.depth_sensor
if depth_sensor.supports(rs.option.depth_units):
depth_sensor.set_option(rs.option.depth_units,0.0001)
robot = RL.Item('UR5e')
frame = RL.Item('Reference Frame')
Target = RL.AddTarget('Target 2', frame)
try:
while True:
frames = pipe.wait_for_frames()
frames = align.process(frames)
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame: continue
#print(width,height)
depth = np.asanyarray(colorizer.colorize(depth_frame).get_data())
color = np.asanyarray(color_frame.get_data())
images = np.hstack((color, depth))
#cv2.namedWindow('RGB_View', cv2.WINDOW_NORMAL)
#cv2.setWindowProperty('RGB_View', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('RGB_View', color)
depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
#flags, hcursor, (x,y) = win32gui.GetCursorInfo()
x, y = win32api.GetCursorPos()
#print(x, y)
pixel_coord = [x, y]
keypressed = win32api.GetKeyState(0x01)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
#Calculate distance
if keypressed!=state_left:
state_left = keypressed
print(keypressed)
if keypressed < 0:
if (0<x<632 and 32<y<512):
dist_to_center = depth_frame.get_distance(x,y)
dist_to_center = round(dist_to_center, 4)
depth_point = rs.rs2_deproject_pixel_to_point(depth_intrin, pixel_coord, dist_to_center)
depth_point = [i * 100 for i in depth_point]
depth_point = [round(i, 3) for i in depth_point]
#depth_point = ( ", ".join( repr(e) for e in depth_point))
with open('Coordinates.txt', 'w') as f:
f.write("%s\n" % depth_point)
f.close()
print(depth_point)
#print(depth_point[0:5], depth_point[7:12], depth_point[15:19])
print('The camera is facing an object:',dist_to_center,'meters away')
Target.setPose(Offset(eye(), depth_point[0], depth_point[1], depth_point[2], 0, 0, 0))
robot.MoveL(Target)
time.sleep(0.7)
finally:
pipe.stop()