2014-04-03 49 views
0
function multiObjectTracking() 

%創建用於讀取視頻,檢測運動物體, %和顯示結果我正試圖在MATLAB中進行實時對象檢測和跟蹤。但它給我的錯誤

obj = setupSystemObjects(); 

tracks = initializeTracks(); % create an empty array of tracks 

nextId = 1; % ID of the next track 

%探測到移動物體的系統對象,並跟蹤他們跨越視頻幀

while ~isDone(obj.reader) 
    frame = readFrame(); 
    [centroids, bboxes, mask] = detectObjects(frame); 
    predictNewLocationsOfTracks(); 
    [assignments, unassignedTracks, unassignedDetections] = ... 
     detectionToTrackAssignment(); 

    updateAssignedTracks(); 
    updateUnassignedTracks(); 
    deleteLostTracks(); 
    createNewTracks(); 

    displayTrackingResults(); 
end 

%%創建系統對象 %創建用於讀取視頻幀的系統對象,檢測 %前景對象和顯示瑩結果。

function obj = setupSystemObjects() 

%初始化視頻I/O %用於從文件中讀取視頻,繪製跟蹤 %對象每一幀,並播放視頻創建對象。

 vid = videoinput('winvideo', 1, 'YUY2_320x240'); 
     src = getselectedsource(vid); 

     vid.FramesPerTrigger = 1; 

%TriggerRepeat是零基和總是比觸發器的數量少一個 %。

 vid.TriggerRepeat = 899; 

     preview(vid); 

     start(vid); 

     stoppreview(vid); 

     savedvideo = getdata(vid); 

%創建視頻文件閱讀器

 obj.reader = vision.VideoFileReader(savedvideo); 

%創建兩個視頻播放器,一個顯示視頻, %和一個顯示前景蒙

 obj.videoPlayer = vision.VideoPlayer('Position', [20, 400, 700, 400]); 
     obj.maskPlayer = vision.VideoPlayer('Position', [740, 400, 700, 400]); 

     obj.detector = vision.ForegroundDetector('NumGaussians', 3, ... 
      'NumTrainingFrames', 40, 'MinimumBackgroundRatio', 0.7); 

     obj.blobAnalyser = vision.BlobAnalysis('BoundingBoxOutputPort', true, ... 
      'AreaOutputPort', true, 'CentroidOutputPort', true, ... 
      'MinimumBlobArea', 400); 
    end 

    function tracks = initializeTracks() 

%創建一排空的曲目

 tracks = struct(... 
      'id', {}, ... 
      'bbox', {}, ... 
      'kalmanFilter', {}, ... 
      'age', {}, ... 
      'totalVisibleCount', {}, ... 
      'consecutiveInvisibleCount', {}); 
    end 

%%讀取視頻幀 %從視頻文件中讀取下一個視頻幀。

function frame = readFrame() 
     frame = obj.reader.step(); 
    end 


    function [centroids, bboxes, mask] = detectObjects(frame) 

%檢測前景

 mask = obj.detector.step(frame); 

%應用形態運算以移除噪聲及在孔填充

 mask = imopen(mask, strel('rectangle', [3,3])); 
     mask = imclose(mask, strel('rectangle', [15, 15])); 
     mask = imfill(mask, 'holes'); 

%進行斑點分析找到連接的部件

 [~, centroids, bboxes] = obj.blobAnalyser.step(mask); 
    end 

%%預測現有軌跡的新位置 %使用卡爾曼濾波器預測當前幀中每個軌跡的質心,並相應地更新其邊界框。

function predictNewLocationsOfTracks() 
     for i = 1:length(tracks) 
      bbox = tracks(i).bbox; 

%預測軌跡的當前位置

  predictedCentroid = predict(tracks(i).kalmanFilter); 

%移邊界框,使得其中心位於 %預測位置

  predictedCentroid = int32(predictedCentroid) - bbox(3:4)/2; 
      tracks(i).bbox = [predictedCentroid, bbox(3:4)]; 
     end 
    end 


    function [assignments, unassignedTracks, unassignedDetections] = ... 
      detectionToTrackAssignment() 

     nTracks = length(tracks); 
     nDetections = size(centroids, 1); 

%計算的費用將每個檢測分配給每個軌道

 cost = zeros(nTracks, nDetections); 
     for i = 1:nTracks 
      cost(i, :) = distance(tracks(i).kalmanFilter, centroids); 
     end 

%解決分配問題

 costOfNonAssignment = 20; 
     [assignments, unassignedTracks, unassignedDetections] = ... 
      assignDetectionsToTracks(cost, costOfNonAssignment); 
    end 

    function updateAssignedTracks() 
     numAssignedTracks = size(assignments, 1); 
     for i = 1:numAssignedTracks 
      trackIdx = assignments(i, 1); 
      detectionIdx = assignments(i, 2); 
      centroid = centroids(detectionIdx, :); 
      bbox = bboxes(detectionIdx, :); 

%正確使用新的檢測

  correct(tracks(trackIdx).kalmanFilter, centroid); 

%的的對象的位置 %估計頂替預測邊界框與所檢測 %邊界框

  tracks(trackIdx).bbox = bbox; 

%更新軌跡的年齡

  tracks(trackIdx).age = tracks(trackIdx).age + 1; 

%更新知名度

  tracks(trackIdx).totalVisibleCount = ... 
       tracks(trackIdx).totalVisibleCount + 1; 
      tracks(trackIdx).consecutiveInvisibleCount = 0; 
     end 
    end 

%%更新未分配的曲目 %馬克每個未分配的軌跡爲不可見,並以1

function updateUnassignedTracks() 
     for i = 1:length(unassignedTracks) 
      ind = unassignedTracks(i); 
      tracks(ind).age = tracks(ind).age + 1; 
      tracks(ind).consecutiveInvisibleCount = ... 
       tracks(ind).consecutiveInvisibleCount + 1; 
     end 
    end 


    function deleteLostTracks() 
     if isempty(tracks) 
      return; 
     end 

     invisibleForTooLong = 10; 
     ageThreshold = 8; 

%計算的比例增加其年齡軌跡的可見年齡

 ages = [tracks(:).age]; 
     totalVisibleCounts = [tracks(:).totalVisibleCount]; 
     visibility = totalVisibleCounts ./ ages; 

%找到 '丟失' 的指數跟蹤

 lostInds = (ages < ageThreshold & visibility < 0.6) | ... 
      [tracks(:).consecutiveInvisibleCount] >= invisibleForTooLong; 

%刪除丟失的軌道

 tracks = tracks(~lostInds); 
    end 

    function createNewTracks() 
     centroids = centroids(unassignedDetections, :); 
     bboxes = bboxes(unassignedDetections, :); 

     for i = 1:size(centroids, 1) 

      centroid = centroids(i,:); 
      bbox = bboxes(i, :); 

%創建一個卡爾曼濾波器對象

  kalmanFilter = configureKalmanFilter('ConstantVelocity', ... 
       centroid, [200, 50], [100, 25], 100); 

%創建一個新的軌道

  newTrack = struct(... 
       'id', nextId, ... 
       'bbox', bbox, ... 
       'kalmanFilter', kalmanFilter, ... 
       'age', 1, ... 
       'totalVisibleCount', 1, ... 
       'consecutiveInvisibleCount', 0); 

%將它添加到軌道的陣列

  tracks(end + 1) = newTrack; 

%增量下一個ID

  nextId = nextId + 1; 
     end 
    end 

    function displayTrackingResults() 

%轉換所述框架和所述掩模UINT8 RGB

 frame = im2uint8(frame); 
     mask = uint8(repmat(mask, [1, 1, 3])) .* 255; 

     minVisibleCount = 8; 
     if ~isempty(tracks) 

%嘈雜檢測傾向於導致短暫的軌跡 %僅顯示已經可見的軌跡超過最小數量的幀的 %。

  reliableTrackInds = ... 
       [tracks(:).totalVisibleCount] > minVisibleCount; 
      reliableTracks = tracks(reliableTrackInds); 

%顯示對象。如果在此幀中未檢測到物體 %,則顯示其預測的邊界框。

  if ~isempty(reliableTracks) 

%得到邊框

   bboxes = cat(1, reliableTracks.bbox); 

%得到IDS

   ids = int32([reliableTracks(:).id]); 

%用於指示 %的那些,我們顯示了預測,而不是實際 %的對象創建標籤地點

   labels = cellstr(int2str(ids')); 
       predictedTrackInds = ... 
        [reliableTracks(:).consecutiveInvisibleCount] > 0; 
       isPredicted = cell(size(labels)); 
       isPredicted(predictedTrackInds) = {' predicted'}; 
       labels = strcat(labels, isPredicted); 
在框架上

%戰平

   frame = insertObjectAnnotation(frame, 'rectangle', ... 
        bboxes, labels); 
在面罩

%戰平

   mask = insertObjectAnnotation(mask, 'rectangle', ... 
        bboxes, labels); 
      end 
     end 

%顯示了蔭罩與框架

 obj.maskPlayer.step(mask);   
     obj.videoPlayer.step(frame); 
    end 

displayEndOfDemoMessage(mfilename) 
end 
+0

順便說一句,這裏沒有光流。你可能想編輯你的問題以避免混淆。 – Dima

回答

0

你的問題是,你要使用vision.VideoFileReader,同時嘗試從相機讀取幀。 vision.VideoFileReader僅用於閱讀視頻文件。如果你從相機中獲取幀,你根本不需要它。您應該將videoinput對象添加到obj結構中,並且您應該嘗試在readFrame()內使用getsnapshot

+0

請告訴我如何在此代碼中添加我的實時視頻..? – user3489036

+0

我已經做了。你可以調用'getsnapshot(vid)'來獲取每個視頻幀。 – Dima

相關問題