2017-08-29 104 views
0

我正在嘗試使用Raspberry Pi實現lane tracking system。 因此,我正在使用OpenCV庫(使用Python代碼)處理Raspberry Pi內部的視頻。但是當我捕捉視頻時,它滯後,不能正常播放。 相同的代碼在Windows操作系統環境下工作正常,但不在Raspberry Pi內。我會很感激任何幫助。OpenCV視頻樹莓派中的捕獲和播放問題

樹莓裨硬件規格

樹莓裨3 B型(1.2GHz的1GB RAM) 相機模塊V2(800萬像素)

import numpy as np 
import cv2 
import RPi.GPIO as GPIO 
import os 
import sys 
import time 
import picamera 
import picamera.array 

global motorPosition 
motorPosition = 0 #1 = left, 2= right, 0= center 
rightMotorPin = 17 #pin 11 
leftMotorPin = 27 #pin 13 
GPIO.setmode(GPIO.BCM) 
GPIO.setup(rightMotorPin, GPIO.OUT) #Left 
GPIO.setup(leftMotorPin, GPIO.OUT) #Right 
GPIO.setwarnings(False) 

motorPosition = 0 
GPIO.output(leftMotorPin, GPIO.HIGH) #Left = 0 
GPIO.output(rightMotorPin, GPIO.HIGH) #Right = 0 

def grayscale(img): 
    return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 


def canny(img, low_threshold, high_threshold): 
    return cv2.Canny(img, low_threshold, high_threshold) 


def gaussian_blur(img, kernel_size): 
    return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) 


def region_of_interest(img, vertices): 
    mask = np.zeros_like(img) 

    if len(img.shape) > 2: 
     channel_count = img.shape[2] 
     ignore_mask_color = (255,) * channel_count 
    else: 
     ignore_mask_color = 255 

    cv2.fillPoly(mask, vertices, ignore_mask_color) 

    masked_image = cv2.bitwise_and(img, mask) 
    return masked_image 


def get_slope(x1, y1, x2, y2): 
    return (y1 - y2)/(x1 - x2) 


def draw_lines(img, lines, color=[255, 255, 0], thickness=20): 
    global motorPosition 
    top = 200 
    bottom = 540 

    left_x1 = [] 
    left_y1 = [] 
    left_x2 = [] 
    left_y2 = [] 

    right_x1 = [] 
    right_y1 = [] 
    right_x2 = [] 
    right_y2 = [] 

    for line in lines: 
     for x1, y1, x2, y2 in line: 

      slope = get_slope(x1, y1, x2, y2) 

      if slope < 0: 
       left_x1.append(x1) 
       left_y1.append(y1) 
       left_x2.append(x2) 
       left_y2.append(y2) 
      else: 
       right_x1.append(x1) 
       right_y1.append(y1) 
       right_x2.append(x2) 
       right_y2.append(y2) 

    try: 
     avg_right_x1 = int(np.mean(right_x1)) 
     avg_right_y1 = int(np.mean(right_y1)) 
     avg_right_x2 = int(np.mean(right_x2)) 
     avg_right_y2 = int(np.mean(right_y2)) 

     right_slope = get_slope(avg_right_x1, avg_right_y1, avg_right_x2, avg_right_y2) 

     right_y1 = top 
     right_x1 = int(avg_right_x1 + (right_y1 - avg_right_y1)/right_slope) 
     right_y2 = bottom 
     right_x2 = int(avg_right_x2 + (right_y2 - avg_right_y2)/right_slope) 

     cv2.line(img, (right_x1, right_y1), (right_x2, right_y2), color, thickness) 
    except ValueError: 
     pass 

    try: 
     avg_left_x1 = int(np.mean(left_x1)) 
     avg_left_y1 = int(np.mean(left_y1)) 
     avg_left_x2 = int(np.mean(left_x2)) 
     avg_left_y2 = int(np.mean(left_y2)) 

     left_slope = get_slope(avg_left_x1, avg_left_y1, avg_left_x2, avg_left_y2) 

     left_y1 = top 
     left_x1 = int(avg_left_x1 + (left_y1 - avg_left_y1)/left_slope) 
     left_y2 = bottom 
     left_x2 = int(avg_left_x2 + (left_y2 - avg_left_y2)/left_slope) 

     cv2.line(img, (left_x1, left_y1), (left_x2, left_y2), color, thickness) 
    except ValueError: 
     pass 

    left_border = (0, left_y1) 
    right_border = (img.shape[1], right_y1) 

    color3 = [0, 0, 255] 
    color4 = [0, 255, 0] 
    thickness3 = 25 

    left_diff = left_x1 - left_border[0] 
    right_diff = right_border[0] - right_x1 
    deviation = left_diff - right_diff 

    if deviation < -13 and motorPosition != 2: 
     motorPosition = 2 
     GPIO.output(leftMotorPin, GPIO.HIGH) #Left = 0 
     GPIO.output(rightMotorPin, GPIO.LOW) #Right = 1 
     print("leaning right") 
     cv2.line(img, (img.shape[1], 0), (img.shape[1], img.shape[0]), color3, thickness3) 

    elif deviation > 13 and motorPosition != 1: 
     motorPosition = 1 
     GPIO.output(leftMotorPin, GPIO.LOW) #Left = 1 
     GPIO.output(rightMotorPin, GPIO.HIGH) #Right = 0 
     print("leaning left") 
     cv2.line(img, (0, 0), (0, img.shape[1]), color3, thickness3) 

    elif (deviation >= -15 or deviation <= 15) and motorPosition != 0: 
     motorPosition = 0 
     GPIO.output(leftMotorPin, GPIO.HIGH) #Left = 0 
     GPIO.output(rightMotorPin, GPIO.HIGH) #Right = 0 
     print("center") 
     cv2.line(img, (0, 0), (0, img.shape[1]), color4, thickness3) 
     cv2.line(img, (img.shape[1], 0), (img.shape[1], img.shape[0]), color4, thickness3) 


def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap): 
    lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, 
          maxLineGap=max_line_gap) 
    line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) 
    draw_lines(line_img, lines) 
    return line_img 


def weighted_img(img, initial_img, a=0.8, b=1.0, g=0.0): 
    return cv2.addWeighted(initial_img, a, img, b, g) 


def process_image(image): 
    kernel_size = 5 

    low_threshold = 200 
    high_threshold = 210 

    gray = grayscale(image) 
    blur = gaussian_blur(gray, kernel_size) 
    edges = canny(blur, low_threshold, high_threshold) 

    rho = 2 
    theta = np.pi/180 
    threshold = 20 
    min_line_len = 7 
    max_line_gap = 10 
    line_image = hough_lines(edges, rho, theta, threshold, min_line_len, max_line_gap) 
    result = weighted_img(line_image, image) 

    return result 
with picamera.PiCamera() as camera: 
    with picamera.array.PiRGBArray(camera) as output: 
     camera.resolution = (320, 240) 
     camera.framerate = 80 
     while(1): 
      print "\n\n------------------\n\n" 
      camera.capture(output, 'bgr') 
      try: 
       img = output.array 
       cv2.waitKey(1) 
       output.truncate(0)     

       if 0xFF & cv2.waitKey(5) == 27: 
        break 
      except KeyboardInterrupt: 
       pass 
       print ('KB interrupted') 
       print ('Process Aborted!') 
       break 
      except Exception as e: 
       exc_type, exc_obj, tb = sys.exc_info() 
       lineno = tb.tb_lineno 
       print ('Error : ' + str(e) + " @ line " + str(lineno)) 
      finally: 
       pass 
       GPIO.cleanup() 
cv2.destroyAllWindows() 
GPIO.cleanup() 
print ('Aborted') 
+3

覆盆子pi的計算能力不是那麼好。嘗試測量每個算法子組件所需的時間。 – Micka

回答

0

我降低分辨率和幀速率,現在它的工作細

camera.resolution = (176, 144) 
camera.framerate = 30