所以我正在研究一個機器人項目,我們必須識別牆上的圖案並相應地定位我們的機器人。我在我的筆記本電腦上開發了這個圖像處理代碼,該代碼抓取圖像,將其轉換爲HSV,應用逐位掩模,使用Canny邊緣檢測並找到輪廓。我以爲我可以將代碼複製並粘貼到覆盆子pi 3上;然而,由於處理能力下降,fps小於1.我一直試圖將代碼分離爲線程,因此我可以擁有一個捕獲圖像的線程,一個將圖像轉換爲HSV並將其過濾的線程,和一條線來做輪廓擬合。爲了讓這些相互溝通,我已經排隊了。Python圖像處理線程
這是我的初始視覺代碼:
import numpy as np
import cv2
import time
import matplotlib.pyplot as plt
import sys
def onmouse(k, x, y, s, p):
global hsv
if k == 1: # left mouse, print pixel at x,y
print(hsv[y, x])
def distance_to_camera(Kwidth, focalLength, pixelWidth):
return (Kwidth * focalLength)/pixelWidth
def contourArea(contours):
area = []
for i in range(0,len(contours)):
area.append([cv2.contourArea(contours[i]),i])
area.sort()
if(area[len(area) - 1] >= 5 * area[0]):
return area[len(area)-1]
else: return 0
if __name__ == '__main__':
cap = cv2.VideoCapture(0)
"""
cap.set(3, 1920)
cap.set(4, 1080)
cap.set(5, 30)
time.sleep(2)
cap.set(15, -8.0)
"""
KNOWN_WIDTH = 18
# focalLength = focalLength = (rect[1][1] * 74)/18
focalLength = 341.7075686984592
distance_data = []
counter1 = 0
numFrames = 100
samples = 1
start_time = time.time()
while (samples < numFrames):
# Capture frame-by-frame
ret, img = cap.read()
length1, width1, channels = img.shape
img = cv2.GaussianBlur(img, (5, 5), 0)
hsv = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2HSV)
# lower_green = np.array([75, 200, 170])
# lower_green = np.array([53,180,122])
#lower_green = np.array([70, 120, 120])
lower_green = np.array([70, 50, 120])
upper_green = np.array([120, 200, 255])
#upper_green = np.array([120, 200, 255])
mask = cv2.inRange(hsv, lower_green, upper_green)
res = cv2.bitwise_and(hsv, hsv, mask=mask)
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(res, 35, 125)
im2, contours, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if (len(contours) > 1):
area,place = contourArea(contours)
#print(area)
if(area != 0):
# print("Contxours: %d" % contours.size())
# print("Hierarchy: %d" % hierarchy.size())
c = contours[place]
cv2.drawContours(img, c, -1, (0, 0, 255), 3)
cv2.drawContours(edged,c, -1, (255, 0, 0), 3)
perimeter = cv2.arcLength(c, True)
M = cv2.moments(c)
cx = 0
cy = 0
if (M['m00'] != 0):
cx = int(M['m10']/M['m00']) # Center of MASS Coordinates
cy = int(M['m01']/M['m00'])
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img, [box], 0, (255, 0, 0), 2)
cv2.circle(img, (cx, cy), 7, (0, 0, 255), -1)
cv2.line(img, (int(width1/2), int(length1/2)), (cx, cy), (255, 0, 0), 2)
if(rect[1][1] != 0):
inches = distance_to_camera(KNOWN_WIDTH, focalLength, rect[1][1])
#print(inches)
distance_data.append(inches)
counter1+=1
samples+=1
"""
cv2.namedWindow("Image w Contours")
cv2.setMouseCallback("Image w Contours", onmouse)
cv2.imshow('Image w Contours', img)
cv2.namedWindow("HSV")
cv2.setMouseCallback("HSV", onmouse)
cv2.imshow('HSV', edged)
if cv2.waitKey(1) & 0xFF == ord('x'):
break
"""
# When everything done, release the capture
totTime = time.time() - start_time
print("--- %s seconds ---" % (totTime))
print('----%s fps ----' % (numFrames/totTime))
cap.release()
cv2.destroyAllWindows()
--- 13.469419717788696 seconds ---
----7.42422480665093 fps ----
plt.plot(distance_data)
plt.xlabel('TimeData')
plt.ylabel('Distance to Target(in) ')
plt.title('Distance vs Time From Camera')
plt.show()
這是我的線程代碼,這在抓住主框架,並過濾它在另一個線程;我希望有另一個輪廓擬合線程,但即使使用這兩個過程,線程代碼也具有與之前代碼幾乎相同的FPS。這些結果也來自我的筆記本電腦,而不是覆盆子pi。
import cv2
import threading
import datetime
import numpy as np
import queue
import time
frame = queue.Queue(0)
canny = queue.Queue(0)
lower_green = np.array([70, 50, 120])
upper_green = np.array([120, 200, 255])
class FilterFrames(threading.Thread):
def __init__(self,threadID,lock):
threading.Thread.__init__(self)
self.lock = lock
self.name = threadID
self.setDaemon(True)
self.start()
def run(self):
while(True):
img1 = frame.get()
img1 = cv2.GaussianBlur(img1, (5, 5), 0)
hsv = cv2.cvtColor(img1.copy(), cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_green, upper_green)
res = cv2.bitwise_and(hsv, hsv, mask=mask)
edged = cv2.Canny(res, 35, 125)
canny.put(edged)
if __name__ == '__main__':
lock = threading.Lock()
numframes = 100
frames = 0
cap = cv2.VideoCapture(0)
filter = FilterFrames(lock=lock, threadID='Filter')
start_time = time.time()
while(frames < numframes):
ret,img = cap.read()
frame.put(img)
frames+=1
totTime = time.time() - start_time
print("--- %s seconds ---" % (totTime))
print('----%s fps ----' % (numframes/totTime))
"""
Results were:
--- 13.590131759643555 seconds ---
----7.358280388197121 fps ----
"""
cap.release()
我想知道如果有什麼我做錯了,隊列的訪問是否正在放緩的代碼,如果我應該使用多模塊,而不是線程這種應用。