mardi 3 mars 2015

OpenCV splitting camera feed into grid and determining colour


I've written a piece of code to take my camera feed, split it into a grid and evaluate each square for colour. My code is based on a multiple object tracking tutorial but has been modified heavily. It looks like this:



using namespace std;
using namespace cv;



//Standard Dilate and erode functions to improve white/black areas in Binary Image
// Pointer &thresh used so it affects threshImg so it can be used in tracking.
void morphOps(Mat &thresh){

//Increases size of black to remove unwanted white specks outside of object
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(3,3));

//Increases white-area size to remove holes in object
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(8,8));

erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);


dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);

}


//Tracking for the Filtered Object
void trackFilteredObject(int noteNum, string colourtype, Mat &thresh ,Mat HSVImage, Mat &cam){

vector<Brick> Bricks;

Mat temp;
thresh.copyTo(temp);

threshold(temp, thresh, 120, 255, 3); //3 = Threshold to Zero
int whitePixs = countNonZero(thresh);
int cols = thresh.cols;
int rows = thresh.rows;
int imgSize = (rows*cols)/0.75;

if(whitePixs > imgSize){


Brick Brick;

Brick.setColour(colourtype);
Brick.setnoteNum(noteNum);

Bricks.push_back(Brick);



}

int main(int argc, char* argv[])
{

/// Create a window
namedWindow("window", CV_WINDOW_AUTOSIZE );

while(1){
//initialtes camera, sets capture resolution
VideoCapture capture;
capture.open(1);
capture.set(CV_CAP_PROP_FPS, 30);
capture.set(CV_CAP_PROP_FRAME_WIDTH,640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);

Mat cam;


// Saves camera image to Matrix "cam"


capture.read(cam);


//Sets Widths and Heights based on camera resolution (cam.cols/cam.rows retrieves this)

int Width = cam.cols;
int gridWidth = Width/16;
int Height = cam.rows;
int gridHeight = Height/16;


//Splits image into 256 squares going left to right through rows and descending vertically. (16 squares per row for 4/4 pattern)

Mat BigImage;
Mat HSVImage;

// Converts cam to HSV pallete
cvtColor(cam, HSVImage, COLOR_BGR2HSV);

Size smallSize(gridWidth,gridHeight);
std::vector<Mat> smallImages;

for (int y = 0; y < HSVImage.rows; y += smallSize.height)
{
for (int x = 0; x < HSVImage.cols; x += smallSize.width)
{
cv::Rect rect = cv::Rect(x,y, smallSize.width, smallSize.height);
//Saves the matrix to vector
smallImages.push_back(cv::Mat(HSVImage, rect));

}
}

for (int i = 0; i < smallImages.size(); i++){

Mat HSV;
smallImages.at(i).copyTo(HSV);
int noteNum = i;
Mat threshImg;

inRange(HSV,Scalar(0,0,0),Scalar(255,255,255),threshImg);
morphOps(threshImg); //erodes image
string colour = "Red";
trackFilteredObject(noteNum,colour,threshImg,HSV,cam);

inRange(HSV,Scalar(0,0,0),Scalar(255,255,255),threshImg);
morphOps(threshImg); // threshold = mat after erosion/dilation
colour = "yellow";
trackFilteredObject(noteNum,colour,threshImg,HSV,cam);

inRange(HSV,Scalar(0,0,0),Scalar(255,255,255),threshImg);
morphOps(threshImg);
colour = "Black";
trackFilteredObject(noteNum,colour,threshImg,HSV,cam);

inRange(HSV,Scalar(0,0,0),Scalar(255,255,255),threshImg);
morphOps(threshImg); // threshold = mat after erosion/dilation
colour = "White";
trackFilteredObject(noteNum,colour,threshImg,HSV,cam);

inRange(HSV,Scalar(0,0,0),Scalar(255,255,255),threshImg);
morphOps(threshImg); // threshold = mat after erosion/dilation
colour = "Green";
trackFilteredObject(noteNum,colour,threshImg,HSV,cam);


}
imshow("window", cam);

}

return 0;
}


It works by taking the image and applying a HSV filter. It then Divides the image columns and rows by 16 to get rect dimensions. After this it applies rect as a region of interest for all possible positions using a double "for" loop and pushes_back each segment into a vector of matrices. In another for loop each matrix in the vector is unpacked and analysed for colour by applying a HSV filter and thresholding.


Finally white pixels are then calculate and if the segment has above 75% white then it is saved in the "Brick" class as the colour filtered for. If it doesn't have 75%+ white it will then try the next colour filter.


I am able to compile the code and i believe it runs as it should HOWEVER it is very very slow. The camera seems to only update every 1.5secs or so.


Does anyone have any advice on how i could improve its speed or suggest a more efficient way of doing this?


My class is as follows:



//Brick.h
#include <string>

using namespace std;


class Brick{

public:
Brick(void);
~Brick(void);


string getColour();
void setColour(string whatColour);

int getnoteNum();
void setnoteNum(int whatnoteNum);


private:

int noteNum;
string colour;



};


///



Brick.cpp
#include <stdio.h>
#include <Brick.h>


Brick::Brick(void){


}


Brick::~Brick(void){

}



// get/set Colour
////////////////////////////////

string Brick::getColour(){

return Brick::colour;


}



void Brick::setColour(string whatColour){


Brick::colour = whatColour;

}





// get/set Note Number
////////////////////////////////

int Brick::getnoteNum(){

return Brick::noteNum;


}



void Brick::setnoteNum(int whatnoteNum){


Brick::noteNum = whatnoteNum;

}


I will be so grateful to anyone who replies!


Thank you.


(P.S. please ignore scalar values and noteNum for now)




Aucun commentaire:

Enregistrer un commentaire