2016-08-20 74 views
5

我想在OpenCV中製作增強現實程序。但是,當我打電話給solvePnP時,我總是會看到一個錯誤。在OpenCV中運行solvePnP時出錯Python

我想要做的是在OpenCV中創建一個增強現實程序,將所選裁剪圖像的單應點映射到整個圖像,並將這些單應點送入solvePnP以獲得姿態估計。

我設法成功實現了單對點,但我似乎無法獲得solvePnP以正常工作出於某種原因。我懷疑我的輸入格式不正確,但我不確定。

如果你想運行自己的代碼,GIT中克隆這(有,以便在需要的文件來運行此): (https://github.com/vanstorm9/SLAM-experiments.git

並運行文件:/增強現實/樣本腳本/測試.py

有人可以解決這個問題嗎?

錯誤:

Traceback (most recent call last): 
    File "/augmented-reality/sample-scripts/test.py", line 315, in <module> 
    (ret, rvecs, tvecs) = cv2.solvePnP(objp, corners2, mtx, dist) 
error: /opencv/modules/calib3d/src/solvepnp.cpp:61: error: (-215) npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) in function solvePnP 

solvePnP的投入和規模

(42, 3)  # objp 
(143, 2, 1) # corners2 
(3, 3)  # mtx 
(1, 5)  # dist 

# objp 
[[ 0. 0. 0.] 
[ 1. 0. 0.] 
[ 2. 0. 0.] 
[ 3. 0. 0.] 
[ 4. 0. 0.] 
[ 5. 0. 0.] 
[ 6. 0. 0.] 
[ 0. 1. 0.] 
[ 1. 1. 0.] 
[ 2. 1. 0.] 
[ 3. 1. 0.] 
[ 4. 1. 0.] 
[ 5. 1. 0.] 
[ 6. 1. 0.] 
[ 0. 2. 0.] 
[ 1. 2. 0.] 
[ 2. 2. 0.] 
[ 3. 2. 0.] 
[ 4. 2. 0.] 
[ 5. 2. 0.] 
[ 6. 2. 0.] 
[ 0. 3. 0.] 
[ 1. 3. 0.] 
[ 2. 3. 0.] 
[ 3. 3. 0.] 
[ 4. 3. 0.] 
[ 5. 3. 0.] 
[ 6. 3. 0.] 
[ 0. 4. 0.] 
[ 1. 4. 0.] 
[ 2. 4. 0.] 
[ 3. 4. 0.] 
[ 4. 4. 0.] 
[ 5. 4. 0.] 
[ 6. 4. 0.] 
[ 0. 5. 0.] 
[ 1. 5. 0.] 
[ 2. 5. 0.] 
[ 3. 5. 0.] 
[ 4. 5. 0.] 
[ 5. 5. 0.] 
[ 6. 5. 0.]] 

#corners2 
[[[ 0.] 
    [ 0.]] 

[[ 1.] 
    [ 1.]] 

[[ 2.] 
    [ 2.]] 

[[ 3.] 
    [ 3.]] 

[[ 4.] 
    [ 4.]] 

[[ 5.] 
    [ 5.]] 

[[ 6.] 
    [ 6.]] 

[[ 7.] 
    [ 7.]] 

[[ 8.] 
    [ 8.]] 

[[ 9.] 
    [ 9.]] 

[[ 10.] 
    [ 10.]] 

[[ 11.] 
    [ 11.]] 

[[ 12.] 
    [ 12.]] 

[[ 13.] 
    [ 13.]] 

[[ 14.] 
    [ 14.]] 

[[ 15.] 
    [ 18.]] 

[[ 16.] 
    [ 19.]] 

[[ 17.] 
    [ 20.]] 

[[ 18.] 
    [ 28.]] 

[[ 19.] 
    [ 29.]] 

[[ 20.] 
    [ 30.]] 

[[ 21.] 
    [ 31.]] 

[[ 22.] 
    [ 32.]] 

[[ 23.] 
    [ 33.]] 

[[ 24.] 
    [ 35.]] 

[[ 25.] 
    [ 36.]] 

[[ 26.] 
    [ 39.]] 

[[ 27.] 
    [ 40.]] 

[[ 28.] 
    [ 41.]] 

[[ 29.] 
    [ 42.]] 

[[ 31.] 
    [ 52.]] 

[[ 32.] 
    [ 53.]] 

[[ 33.] 
    [ 54.]] 

[[ 34.] 
    [ 56.]] 

[[ 35.] 
    [ 57.]] 

[[ 36.] 
    [ 59.]] 

[[ 37.] 
    [ 60.]] 

[[ 38.] 
    [ 61.]] 

[[ 40.] 
    [ 69.]] 

[[ 41.] 
    [ 70.]] 

[[ 42.] 
    [ 71.]] 

[[ 43.] 
    [ 72.]] 

[[ 44.] 
    [ 75.]] 

[[ 45.] 
    [ 76.]] 

[[ 47.] 
    [ 78.]] 

[[ 49.] 
    [ 79.]] 

[[ 50.] 
    [ 86.]] 

[[ 51.] 
    [ 87.]] 

[[ 52.] 
    [ 88.]] 

[[ 53.] 
    [ 89.]] 

[[ 54.] 
    [ 90.]] 

[[ 55.] 
    [ 94.]] 

[[ 48.] 
    [ 95.]] 

[[ 56.] 
    [ 101.]] 

[[ 42.] 
    [ 105.]] 

[[ 61.] 
    [ 109.]] 

[[ 62.] 
    [ 110.]] 

[[ 57.] 
    [ 111.]] 

[[ 58.] 
    [ 112.]] 

[[ 61.] 
    [ 113.]] 

[[ 59.] 
    [ 115.]] 

[[ 58.] 
    [ 116.]] 

[[ 63.] 
    [ 117.]] 

[[ 60.] 
    [ 118.]] 

[[ 70.] 
    [ 119.]] 

[[ 71.] 
    [ 120.]] 

[[ 74.] 
    [ 125.]] 

[[ 75.] 
    [ 126.]] 

[[ 76.] 
    [ 128.]] 

[[ 77.] 
    [ 129.]] 

[[ 78.] 
    [ 131.]] 

[[ 66.] 
    [ 133.]] 

[[ 67.] 
    [ 134.]] 

[[ 69.] 
    [ 135.]] 

[[ 79.] 
    [ 136.]] 

[[ 72.] 
    [ 137.]] 

[[ 80.] 
    [ 139.]] 

[[ 73.] 
    [ 140.]] 

[[ 83.] 
    [ 141.]] 

[[ 82.] 
    [ 142.]] 

[[ 91.] 
    [ 143.]] 

[[ 92.] 
    [ 144.]] 

[[ 93.] 
    [ 145.]] 

[[ 94.] 
    [ 146.]] 

[[ 85.] 
    [ 147.]] 

[[ 86.] 
    [ 148.]] 

[[ 87.] 
    [ 149.]] 

[[ 95.] 
    [ 150.]] 

[[ 101.] 
    [ 153.]] 

[[ 102.] 
    [ 154.]] 

[[ 103.] 
    [ 155.]] 

[[ 104.] 
    [ 156.]] 

[[ 105.] 
    [ 157.]] 

[[ 106.] 
    [ 158.]] 

[[ 107.] 
    [ 159.]] 

[[ 108.] 
    [ 160.]] 

[[ 109.] 
    [ 161.]] 

[[ 110.] 
    [ 163.]] 

[[ 111.] 
    [ 164.]] 

[[ 112.] 
    [ 165.]] 

[[ 113.] 
    [ 166.]] 

[[ 114.] 
    [ 167.]] 

[[ 99.] 
    [ 168.]] 

[[ 100.] 
    [ 169.]] 

[[ 118.] 
    [ 171.]] 

[[ 119.] 
    [ 172.]] 

[[ 120.] 
    [ 173.]] 

[[ 121.] 
    [ 174.]] 

[[ 122.] 
    [ 175.]] 

[[ 123.] 
    [ 176.]] 

[[ 102.] 
    [ 177.]] 

[[ 103.] 
    [ 178.]] 

[[ 106.] 
    [ 180.]] 

[[ 107.] 
    [ 181.]] 

[[ 124.] 
    [ 182.]] 

[[ 115.] 
    [ 183.]] 

[[ 116.] 
    [ 184.]] 

[[ 117.] 
    [ 187.]] 

[[ 150.] 
    [ 188.]] 

[[ 128.] 
    [ 192.]] 

[[ 127.] 
    [ 194.]] 

[[ 129.] 
    [ 197.]] 

[[ 130.] 
    [ 198.]] 

[[ 131.] 
    [ 199.]] 

[[ 135.] 
    [ 200.]] 

[[ 136.] 
    [ 201.]] 

[[ 137.] 
    [ 202.]] 

[[ 138.] 
    [ 203.]] 

[[ 134.] 
    [ 204.]] 

[[ 113.] 
    [ 205.]] 

[[ 141.] 
    [ 206.]] 

[[ 142.] 
    [ 207.]] 

[[ 145.] 
    [ 208.]] 

[[ 143.] 
    [ 212.]] 

[[ 144.] 
    [ 213.]] 

[[ 149.] 
    [ 214.]] 

[[ 157.] 
    [ 216.]] 

[[ 159.] 
    [ 218.]] 

[[ 131.] 
    [ 220.]] 

[[ 112.] 
    [ 221.]] 

[[ 163.] 
    [ 223.]] 

[[ 164.] 
    [ 224.]] 

[[ 157.] 
    [ 228.]]] 

代碼

#!/usr/bin/python 
# -*- coding: utf-8 -*- 
import numpy as np 
import cv2 
import glob 

# Load previously saved data 

mtx = np.load('calib-matrix/mtx.npy') 
dist = np.load('calib-matrix/dist.npy') 

rect = (0, 0, 0, 0) 
startPoint = False 
endPoint = False 

selectedPoint = False 


def on_mouse(
    event, 
    x, 
    y, 
    flags, 
    params, 
    ): 

    global rect, startPoint, endPoint, selectedPoint 

    # get mouse click 

    if event == cv2.EVENT_LBUTTONDOWN: 

     if startPoint == True and endPoint == True: 

     # Resets and delete box once you are done 

      startPoint = False 
      endPoint = False 
      rect = (0, 0, 0, 0) 

     if startPoint == False: 

     # First click, waits for final click to create box 

      rect = (x, y, 0, 0) 
      startPoint = True 
     elif endPoint == False: 

     # creates the box (I think} 

      rect = (rect[0], rect[1], x, y) 
      print '________________' 
      print 'Rectangle location: ', rect[0], ' ', rect[1], ' ', \ 
       x, ' ', y 
      endPoint = True 

    return 


def drawCube(img, corners, imgpts): 
    imgpts = np.int32(imgpts).reshape(-1, 2) 

    # draw ground floor in green 

    img = cv2.drawContours(img, [imgpts[:4]], -1, (0, 255, 0), -3) 

    # draw pillars in blue color 

    for (i, j) in zip(range(4), range(4, 8)): 
     img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]), 255, 3) 

    # draw top layer in red color 

    img = cv2.drawContours(img, [imgpts[4:]], -1, (0, 0, 255), 3) 

    return img 


def draw(img, corners, imgpts): 
    corner = tuple(corners[0].ravel()) 
    img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 
        5) 
    img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 
        5) 
    img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 
        5) 
    return img 


def detectAndDescribe(image): 

    # convert the image to grayscale 
    # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 

    # detect and extract features from the image 

    descriptor = cv2.xfeatures2d.SIFT_create() 
    (kps, features) = descriptor.detectAndCompute(image, None) 

    # convert the keypoints from KeyPoint objects to NumPy 
    # arrays 

    kps = np.float32([kp.pt for kp in kps]) 

    # return a tuple of keypoints and features 

    return (kps, features) 


def matchKeypoints(
    kpsA, 
    kpsB, 
    featuresA, 
    featuresB, 
    ratio, 
    reprojThresh, 
    ): 

    # compute the raw matches and initialize the list of actual 
    # matches 

    matcher = cv2.DescriptorMatcher_create('BruteForce') 
    rawMatches = matcher.knnMatch(featuresA, featuresB, 2) 
    matches = [] 
    match_ar = [] 

    i = 0 

    # loop over the raw matches 

    for m in rawMatches: 

     # ensure the distance is within a certain ratio of each 
     # other (i.e. Lowe's ratio test) 

     if len(m) == 2 and m[0].distance < m[1].distance * ratio: 
      matches.append((m[0].trainIdx, m[0].queryIdx)) 
      if i == 0: 
       match_ar = np.array([[[m[0].trainIdx], 
            [m[0].queryIdx]]], dtype=np.float) 
       match_ar = match_ar.transpose() 
       match_ar = list(match_ar) 
       print type(match_ar) 
       i = i + 1 
      else: 
       m_add = np.array([[m[0].trainIdx], 
           [m[0].queryIdx]])[None, :] 
       m_add = m_add.transpose() 
       match_ar = np.concatenate([match_ar, m_add]) 


    # computing a homography requires at least 4 matches 

    if len(matches) > 4: 

     # construct the two sets of points 

     ptsA = np.float32([kpsA[i] for (_, i) in matches]) 
     ptsB = np.float32([kpsB[i] for (i, _) in matches]) 

     # compute the homography between the two sets of points 

     (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, 
       reprojThresh) 

     # return the matches along with the homograpy matrix 
     # and status of each matched point 
     # return (matches, H, status) 

     return (matches, match_ar, H, status) 

    # otherwise, no homograpy could be computed 

    return None 


def drawMatches(
    imageA, 
    imageB, 
    kpsA, 
    kpsB, 
    matches, 
    status, 
    ): 

    # initialize the output visualization image 

    (hA, wA) = imageA.shape[:2] 
    (hB, wB) = imageB.shape[:2] 
    vis = np.zeros((max(hA, hB), wA + wB, 3), dtype='uint8') 
    print imageA.shape 
    print imageB.shape 
    vis[0:hA, 0:wA] = imageA 
    vis[0:hB, wA:] = imageB 

    # loop over the matches 

    for ((trainIdx, queryIdx), s) in zip(matches, status): 

     # only process the match if the keypoint was successfully 
     # matched 

     if s == 1: 

      # draw the match 

      ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1])) 
      ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1])) 
      cv2.line(vis, ptA, ptB, (0, 255, 0), 1) 

    # return the visualization 

    return vis 


criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 
      0.001) 
objp = np.zeros((6 * 7, 3), np.float32) 
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2) 



axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]]).reshape(-1, 3) 
cubeAxis = np.float32([ 
    [0, 0, 0], 
    [0, 3, 0], 
    [3, 3, 0], 
    [3, 0, 0], 
    [0, 0, -3], 
    [0, 3, -3], 
    [3, 3, -3], 
    [3, 0, -3], 
    ]) 

# Here we are going to try to define our corners 

#fname = 'images/left01.jpg' 
fname = 'images/checkerboard4.jpg' 

img = cv2.imread(fname) 

cv2.namedWindow('Label') 
cv2.setMouseCallback('Label', on_mouse) 

''' 
while 1: 
    if selectedPoint == True: 
     break 

    if startPoint == True and endPoint == True: 
     cv2.rectangle(img, (rect[0], rect[1]), (rect[2], rect[3]), 
         (255, 0, 255), 2) 
    cv2.imshow('Label', img) 
    if cv2.waitKey(20) & 255 == 27: 
     break 
cv2.destroyAllWindows() 
''' 


''' 
reference_color = img[rect[1]:rect[3], rect[0]:rect[2]] 

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
reference_img = gray[rect[1]:rect[3], rect[0]:rect[2]] 
''' 
reference_color = img[101:400, 92:574] 

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
reference_img = gray[101:400, 92:574] 



####### Attempting to preform homography ####### 

sift = cv2.xfeatures2d.SIFT_create() 

(kp1, des1) = detectAndDescribe(gray) 
(kp2, des2) = detectAndDescribe(reference_img) 


(M, M_ar, H, status) = matchKeypoints(kp1,kp2,des1,des2,0.75,4.0,) 

if M is None: 
    print 'No matches found' 
    exit() 

print 'Matches found' 

# vis = drawMatches(gray, reference_img, kp1, kp2, M, status) 

vis = drawMatches(img,reference_color,kp1,kp2,M,status) 
cv2.imshow('vis', vis) 
cv2.waitKey(0) 

corners2 = M_ar 



if 1 == 1: 

    # Find the rotation and translation vectors. 
    print 'here' 
    (ret, rvecs, tvecs) = cv2.solvePnP(objp, corners2, mtx, dist) 

    # project 3D points to image plane 

    (imgpts, jac) = cv2.projectPoints(cubeAxis, rvecs, tvecs, mtx, dist) 

    img = drawCube(img, corners2, imgpts) 
    cv2.imshow('img', img) 
    k = cv2.waitKey(0) & 255 
else: 
    print 'No corners were detected' 

回答

0

我不知道你解決與否。也許你可以使用_,rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners2, mtx, dist)來修復它。因爲solvePnP改變了,但它沒有記錄在他們自己的教程中。