0
我想製作一個android應用程序,它檢測已知對象。我正在使用boofCV來實現它。我在運行時將應用程序資源中的對象圖像存儲起來,使用快速Hessian檢測特徵並使用SURF對其進行描述。然後通過攝像頭的提要,當我得到幀,我使用快速Hessian檢測框架中的功能,並使用SURF描述它。一旦我有源對象和當前幀的特徵,我將它們聯繫起來並獲得匹配。現在我能夠畫出比賽。但我想要做的是使用匹配我想要獲取檢測到的對象的邊界。爲了做到這一點,我開始遵循this的例子。雖然這是一個圖像拼接的例子,但這個概念可以應用於檢測已知對象的邊界。如何使用其特徵獲取對象的邊界?
我已經成功實現了這個例子。問題是線條被繪製,但線條只是直線,在一些框架中只繪製了一條直線,矩形未繪製。
這裏是我的代碼:
public class HomographyProcessing2<Desc extends TupleDesc> extends VideoRenderProcessing<ImageFloat32> {
DetectDescribePoint<ImageFloat32,Desc> detDesc;
AssociateDescription<Desc> associate;
FastQueue<Desc> listSrc;
FastQueue<Desc> listDst;
FastQueue<Point2D_F64> locationSrc = new FastQueue<Point2D_F64>(Point2D_F64.class,true);
FastQueue<Point2D_F64> locationDst = new FastQueue<Point2D_F64>(Point2D_F64.class,true);
// output image which is displayed by the GUI
private Bitmap outputGUI;
// storage used during image convert
private byte[] storage;
// output image which is modified by processing thread
private Bitmap output;
List<Point2D_F64> pointsSrc = new ArrayList<Point2D_F64>();
List<Point2D_F64> pointsDst = new ArrayList<Point2D_F64>();
private ImageFloat32 object;
private ImageFloat32 frame;
Paint paint = new Paint();
ModelManager<Homography2D_F64> manager = new ModelManagerHomography2D_F64();
GenerateHomographyLinear modelFitter = new GenerateHomographyLinear(true);
DistanceHomographySq distance = new DistanceHomographySq();
ModelMatcher<Homography2D_F64,AssociatedPair> modelMatcher = new Ransac<Homography2D_F64,AssociatedPair>(123,manager,modelFitter,distance,60,9);
Homography2D_F64 homography2D_f64 = null;
public HomographyProcessing2(Context context, int width, int height) {
super(ImageType.single(ImageFloat32.class));
Log.e("LEARN", String.valueOf(modelFitter.getMinimumPoints()));
detDesc = CreateDetectorDescriptor.create(CreateDetectorDescriptor.DETECT_FH, CreateDetectorDescriptor.DESC_SURF, ImageFloat32.class);
listSrc = UtilFeature.createQueue(detDesc, 100);
listDst = UtilFeature.createQueue(detDesc,100);
ScoreAssociation score = FactoryAssociation.scoreEuclidean(detDesc.getDescriptionType(),true);
associate = FactoryAssociation.greedy(score,Double.MAX_VALUE,true);
Bitmap bitmap = BitmapFactory.decodeResource(context.getResources(), R.drawable.camera_image);
object = new ImageFloat32(bitmap.getWidth(),bitmap.getHeight());
byte[] storage = null;
ConvertBitmap.declareStorage(bitmap, storage);
ConvertBitmap.bitmapToGray(bitmap,object,storage);
detDesc.detect(object);
describeImage(listSrc, locationSrc);
associate.setSource(listSrc);
output = Bitmap.createBitmap(width,height,Bitmap.Config.ARGB_8888);
outputGUI = Bitmap.createBitmap(width,height,Bitmap.Config.ARGB_8888);
paint.setColor(Color.BLUE);
paint.setTextSize(20);
}
/**
* Using abstracted code, find a transform which minimizes the difference between corresponding features
* in both images. This code is completely model independent and is the core algorithms.
*/
public static<T extends ImageSingleBand, FD extends TupleDesc> Homography2D_F64
computeTransform(T imageA , T imageB ,
DetectDescribePoint<T,FD> detDesc ,
AssociateDescription<FD> associate ,
ModelMatcher<Homography2D_F64,AssociatedPair> modelMatcher)
{
// get the length of the description
List<Point2D_F64> pointsA = new ArrayList<Point2D_F64>();
FastQueue<FD> descA = UtilFeature.createQueue(detDesc, 100);
List<Point2D_F64> pointsB = new ArrayList<Point2D_F64>();
FastQueue<FD> descB = UtilFeature.createQueue(detDesc,100);
// extract feature locations and descriptions from each image
describeImage(imageA, detDesc, pointsA, descA);
describeImage(imageB, detDesc, pointsB, descB);
// Associate features between the two images
associate.setSource(descA);
associate.setDestination(descB);
associate.associate();
// create a list of AssociatedPairs that tell the model matcher how a feature moved
FastQueue<AssociatedIndex> matches = associate.getMatches();
List<AssociatedPair> pairs = new ArrayList<AssociatedPair>();
for(int i = 0; i < matches.size(); i++) {
AssociatedIndex match = matches.get(i);
Point2D_F64 a = pointsA.get(match.src);
Point2D_F64 b = pointsB.get(match.dst);
pairs.add(new AssociatedPair(a,b,false));
}
// find the best fit model to describe the change between these images
if(!modelMatcher.process(pairs))
throw new RuntimeException("Model Matcher failed!");
// return the found image transform
return modelMatcher.getModelParameters().copy();
}
/**
* Detects features inside the two images and computes descriptions at those points.
*/
private static <T extends ImageSingleBand, FD extends TupleDesc>
void describeImage(T image,
DetectDescribePoint<T,FD> detDesc,
List<Point2D_F64> points,
FastQueue<FD> listDescs) {
detDesc.detect(image);
listDescs.reset();
for(int i = 0; i < detDesc.getNumberOfFeatures(); i++) {
points.add(detDesc.getLocation(i).copy());
listDescs.grow().setTo(detDesc.getDescription(i));
}
}
private static Point2D_I32 renderPoint(int x0 , int y0 , Homography2D_F64 fromBtoWork)
{
Point2D_F64 result = new Point2D_F64();
HomographyPointOps_F64.transform(fromBtoWork, new Point2D_F64(x0, y0), result);
return new Point2D_I32((int)result.x,(int)result.y);
}
@Override
protected void process(ImageFloat32 imageFloat32) {
detDesc.detect(imageFloat32);
describeImage(listDst,locationDst);
associate.setDestination(listDst);
associate.associate();
synchronized (new Object()) {
//ConvertBitmap.declareStorage(gray ,storage);
ConvertBitmap.grayToBitmap(imageFloat32, output, storage);
outputGUI = output;
}
synchronized (new Object())
{
pointsSrc.clear();
pointsDst.clear();
FastQueue<AssociatedIndex> matches = associate.getMatches();
List<AssociatedPair> pairs = new ArrayList<AssociatedPair>();
for(int i = 0; i < matches.size(); i++) {
AssociatedIndex match = matches.get(i);
Point2D_F64 a = locationSrc.get(match.src);
Point2D_F64 b = locationDst.get(match.dst);
pairs.add(new AssociatedPair(a,b,false));
pointsDst.add(locationDst.get(match.dst));
}
if(pairs.size() >= 4) {
// find the best fit model to describe the change between these images
if (!modelMatcher.process(pairs))
throw new RuntimeException("Model Matcher failed!");
// return the found image transform
homography2D_f64 = modelMatcher.getModelParameters().copy();
}
}
}
@Override
protected void render(Canvas canvas, double v) {
synchronized (new Object())
{
canvas.drawBitmap(outputGUI, 0, 0, null);
frame = ConvertBitmap.bitmapToGray(outputGUI,frame,storage);
if(homography2D_f64 != null)
{
// Convert into a BoofCV color format
MultiSpectral<ImageFloat32> colorA = new MultiSpectral<ImageFloat32>(ImageFloat32.class,3);
MultiSpectral<ImageFloat32> colorB = new MultiSpectral<ImageFloat32>(ImageFloat32.class,3);
colorB.setBands(new ImageFloat32[]{frame,frame,frame});
colorA.setBands(new ImageFloat32[]{object,object,object});
MultiSpectral<ImageFloat32> work = new MultiSpectral<ImageFloat32>(ImageFloat32.class,outputWidth,outputHeight,3);
Homography2D_F64 fromAToWork = new Homography2D_F64(0.5,0,colorA.width/4,0,scale,colorA.height/4,0,0,1);
Homography2D_F64 fromWorkToA = fromAToWork.invert(null);
// Used to render the results onto an image
PixelTransformHomography_F32 model = new PixelTransformHomography_F32();
ImageDistort<MultiSpectral<ImageFloat32>,MultiSpectral<ImageFloat32>> distort =
DistortSupport.createDistortMS(ImageFloat32.class, model, new ImplBilinearPixel_F32(), false, null);
// Render first image
model.set(fromWorkToA);
distort.apply(colorA,work);
// Render second image
Homography2D_F64 fromWorkToB = fromWorkToA.concat(homography2D_f64,null);
model.set(fromWorkToB);
distort.apply(colorB,work);
Homography2D_F64 fromBtoWork = fromWorkToB.invert(null);
Point2D_I32 corners[] = new Point2D_I32[4];
corners[0] = renderPoint (0,0,fromBtoWork);
corners[1] = renderPoint(object.width,0,fromBtoWork);
corners[2] = renderPoint(object.width,object.height,fromBtoWork);
corners[3] = renderPoint(0,object.height,fromBtoWork);
paint.setColor(Color.GREEN);
paint.setStrokeWidth(5);
canvas.drawLine(corners[0].x,corners[0].y,corners[1].x,corners[1].y,paint);
canvas.drawLine(corners[1].x,corners[1].y,corners[2].x,corners[2].y,paint);
canvas.drawLine(corners[2].x,corners[2].y,corners[3].x,corners[3].y,paint);
canvas.drawLine(corners[3].x,corners[3].y,corners[0].x,corners[0].y,paint);
}
else {
for (int i = 0; i < pointsDst.size(); i++) {
Point2D_F64 point2D_f64 = pointsDst.get(i);
canvas.drawCircle(Float.parseFloat(String.valueOf(point2D_f64.getX())), Float.parseFloat(String.valueOf(point2D_f64.getY())), 2, paint);
}
}
}
}
private void describeImage(FastQueue<Desc> listDesc, FastQueue<Point2D_F64> listLoc) {
listDesc.reset();
listLoc.reset();
int N = detDesc.getNumberOfFeatures();
for(int i = 0; i < N; i++) {
listLoc.grow().set(detDesc.getLocation(i));
listDesc.grow().setTo(detDesc.getDescription(i));
}
}
}
我並不清楚地瞭解單應如何運作的,我覺得這可能是問題所在。
請幫我解決這個問題,或者提供更好的方法來基於它的特徵來檢測對象的邊界。
謝謝!