📄 vision.cc
字号:
#include "../Common/VisionObject.h"
#include "../Common/VisionLine.h"
#include "../Common/Common.h"
#include "Vision.h"
#include "ObjectRecognition.h"
#include "EdgeRecognition.h"
#include "BMP.h"
#include <OPENR/OPENRAPI.h>
#include <OPENR/OPENR.h>
#include <OPENR/core_macro.h>
#include <OPENR/OFbkImage.h>
#include <OPENR/RCRegion.h>
#include <iostream>
#include <math.h>
#include "Globals.h"
using namespace std;
Vision::Vision() {
numVisionObjects_ = 0;
numVisionLines_ = 0;
visionObjects_ = new VisionObject[MAX_VISION_OBJECTS]; // MAX_VISION_OBJECTS defined in Common.h
visionLines_ = new VisionLine[MAX_VISION_LINES]; // MAX_VISION_OBJECTS defined in Common.h
lut_ = new uchar[64*64*64]; // Stores the look up tables
lutTight_ = new uchar[64*64*64];
lutDown_ = new uchar[64*64*64];
}
void Vision::LoadParameters(char* parameterFile) {
configuration_.ParseFile(parameterFile); // Configuration is of type parse
OPrimitiveID cameraID;
OPENR::OpenPrimitive("PRM:/r1/c1/c2/c3/i1-FbkImageSensor:F1", &cameraID);
// Set the Camera White Balance
char* cwbSetting = configuration_.GetAsString("CameraWhiteBalance");
OPrimitiveControl_CameraParam wb(ocamparamWB_FL_MODE);
if (strcmp(cwbSetting,"INDOOR") == 0) {
wb = ocamparamWB_INDOOR_MODE;
} else if (strcmp(cwbSetting,"OUTDOOR") == 0) {
wb = ocamparamWB_OUTDOOR_MODE;
} else if (strcmp(cwbSetting,"FL") == 0) {
wb = ocamparamWB_FL_MODE;
} else {
cout << "Vision: Camera white balance not found in config file !" << endl << flush;
}
OPENR::ControlPrimitive(cameraID, oprmreqCAM_SET_WHITE_BALANCE, &wb, sizeof(wb), 0, 0);
// Set the Camera Gain from the parameter file
char* cgSetting = configuration_.GetAsString("CameraGain");
OPrimitiveControl_CameraParam gain(ocamparamGAIN_LOW);
if (strcmp(cgSetting,"LOW") == 0) {
gain = ocamparamGAIN_LOW;
} else if (strcmp(cgSetting,"MID") == 0) {
gain = ocamparamGAIN_MID;
} else if (strcmp(cgSetting,"HIGH") == 0) {
gain = ocamparamGAIN_HIGH;
} else {
cout << "Vision: Camera gain not found in config file !" << endl << flush;
}
OPENR::ControlPrimitive(cameraID, oprmreqCAM_SET_GAIN, &gain, sizeof(gain), 0, 0);
// Set the shutter speed
OPrimitiveControl_CameraParam shutter(ocamparamSHUTTER_FAST);
if (strcmp(configuration_.GetAsString("CameraShutterSpeed"),"SLOW") == 0) {
shutter = ocamparamSHUTTER_SLOW;
} else if (strcmp(configuration_.GetAsString("CameraShutterSpeed"),"MID") == 0) {
shutter = ocamparamSHUTTER_MID;
} else if (strcmp(configuration_.GetAsString("CameraShutterSpeed"),"FAST") == 0) {
shutter = ocamparamSHUTTER_FAST;
} else {
cout << "Vision: Camera shutter speed not found in config file !" << endl << flush;
}
OPENR::ControlPrimitive(cameraID, oprmreqCAM_SET_SHUTTER_SPEED, &shutter, sizeof(shutter), 0, 0);
visionData.doEdgeDetection = configuration_.GetAsBool("DoEdgeDetection");
visionData.findSideLineCorner = configuration_.GetAsBool("FindSideLineCorner");
visionData.findCentreLineCorner = configuration_.GetAsBool("FindCentreLineCorner");
visionData.findGoalCorner = configuration_.GetAsBool("FindGoalCorner");
visionData.findCentreCircle = configuration_.GetAsBool("FindCentreCircle");
visionData.findSideLineAngle = configuration_.GetAsBool("FindSideLineAngle");
visionData.useSanityFactors = configuration_.GetAsBool("UseSanityFactors");
visionData.useWhiteBelowBeaconCheck = configuration_.GetAsBool("UseWhiteBelowBeaconCheck");
visionData.maxHeight = configuration_.GetAsDouble("BarrierHeight");
isSavingImages = configuration_.GetAsBool("SaveImages");
isSavingBMPImages = configuration_.GetAsBool("SaveBMPImages");
imageSaveCounter = 0;
LoadLut(configuration_.GetAsString("LookupTable"), lut_);
LoadLut(configuration_.GetAsString("LookupTableTight"), lutTight_);
LoadLut(configuration_.GetAsString("LookupTableDown"), lutDown_);
obstructionModel_.LoadParameters();
}
// This method loads the lookup table
void Vision::LoadLut(char* fileName, uchar* lutable) {
FILE* lutFile = fopen(fileName, "r"); // Open the file which contains the LUT for read access
long lSize;
if (lutFile==NULL) {
cout << "Vision: Unable to load lookup table (" << fileName <<")." << endl << flush;
return;
}
// obtain file size.
fseek (lutFile , 0 , SEEK_END);
lSize = ftell (lutFile);
rewind (lutFile);
// copy the file into the LUT. this is the fast ver..
fread (lutable,1,lSize,lutFile);
// terminate
fclose (lutFile);
}
// runs through YUV, classifying each Y,U,V to some C using the lookup table
// this method eats a huge amount of time. if we're out of processor, maybe convert to asm..
/*
void Vision::ColourClassifier() {
uchar* p = visionData.classified_;
uchar* y = visionData.unclassified_;
uchar* u = visionData.unclassified_+FULL_IMAGE_WIDTH;
uchar* v = visionData.unclassified_+FULL_IMAGE_WIDTH*2;
int h = 0;
int w = 0;
while (h < FULL_IMAGE_HEIGHT) {
w = 0;
while (w < FULL_IMAGE_WIDTH) {
*(p++)=*(lut_+(*y++ >> 2 << 12)+(*u++ >> 2 << 6)+(*v++ >> 2));
w++;
}
y+=FULL_IMAGE_WIDTH*5;
u+=FULL_IMAGE_WIDTH*5;
v+=FULL_IMAGE_WIDTH*5;
h++;
}
}*/
byte ClipRange(int val) {
if (val < 0) { return 0; }
else if (val > 255) { return 255; }
else { return (byte)val; }
}
// runs through YUV, classifying each Y,U,V to some C using the lookup table
// this method eats a huge amount of time. if we're out of processor, maybe convert to asm..
void Vision::ColourClassifier(bool doubleRes, uchar* lutable) {
if (!doubleRes) {
uchar* p = visionData.classified_;
// uchar* p2 = visionData.classifiedTight_;
uchar* y = visionData.unclassified_;
uchar* u = visionData.unclassified_+FULL_IMAGE_WIDTH;
uchar* v = visionData.unclassified_+FULL_IMAGE_WIDTH*2;
int h = 0;
int w = 0;
while (h < FULL_IMAGE_HEIGHT) {
w = 0;
while (w < FULL_IMAGE_WIDTH) {
int offset = (*y++ >> 2 << 12)+(*u++ >> 2 << 6)+(*v++ >> 2);
*(p++)=*(lutable+offset);
//*(p2++)=*(lutTight_+offset);
w++;
}
y+=FULL_IMAGE_WIDTH*(RAW_MULTIPLIER-1);
u+=FULL_IMAGE_WIDTH*(RAW_MULTIPLIER-1);
v+=FULL_IMAGE_WIDTH*(RAW_MULTIPLIER-1);
h++;
}
} else {
uchar* pCurrentLine = visionData.classified_;
uchar* pNextLine = visionData.classified_+DOUBLE_IMAGE_WIDTH;
uchar* pCurrentLineTight = visionData.classifiedTight_;
uchar* pNextLineTight = visionData.classifiedTight_+DOUBLE_IMAGE_WIDTH;
uchar* yLL = visionData.unclassified_;
uchar* u = visionData.unclassified_+FULL_IMAGE_WIDTH;
uchar* v = visionData.unclassified_+FULL_IMAGE_WIDTH*2;
uchar* yLH = visionData.unclassified_+FULL_IMAGE_WIDTH*3;
uchar* yHL = visionData.unclassified_+FULL_IMAGE_WIDTH*4;
uchar* yHH = visionData.unclassified_+FULL_IMAGE_WIDTH*5;
int h = 0;
int w = 0;
while (h < FULL_IMAGE_HEIGHT) {
w = 0;
while (w < FULL_IMAGE_WIDTH) {
int yLLint = *yLL++;
int yHLint = (*yHL++)-128;
int yLHint = (*yLH++)-128;
int yHHint = (*yHH++)-128;
int y11 = (yLLint+yLHint+yHLint+yHHint);
int y00 = ClipRange((((yLLint+yHHint) << 1) - y11)) >> 2 << 12;
int y10 = ClipRange((((yLLint+yHLint) << 1) - y11)) >> 2 << 12;
int y01 = ClipRange((((yLLint+yLHint) << 1) - y11)) >> 2 << 12;
y11 = ClipRange(y11) >> 2 << 12;
int all = (*u++ >> 2 << 6) + (*v++ >> 2);
*(pCurrentLine++)=*(lutable+(y00+all));
*(pCurrentLine++)=*(lutable+(y10+all));
*(pNextLine++)=*(lutable+(y01+all));
*(pNextLine++)=*(lutable+(y11+all));
*(pCurrentLineTight++)=*(lutTight_+(y00+all));
*(pCurrentLineTight++)=*(lutTight_+(y10+all));
*(pNextLineTight++)=*(lutTight_+(y01+all));
*(pNextLineTight++)=*(lutTight_+(y11+all));
w++;
}
yLL+=FULL_IMAGE_WIDTH*5;
u+=FULL_IMAGE_WIDTH*5;
v+=FULL_IMAGE_WIDTH*5;
yLH+=FULL_IMAGE_WIDTH*5;
yHL+=FULL_IMAGE_WIDTH*5;
yHH+=FULL_IMAGE_WIDTH*5;
h++;
pCurrentLine+=DOUBLE_IMAGE_WIDTH;
pNextLine+=DOUBLE_IMAGE_WIDTH;
pCurrentLineTight+=DOUBLE_IMAGE_WIDTH;
pNextLineTight+=DOUBLE_IMAGE_WIDTH;
}
}
}
void Vision::SaveImages(OFbkImageVectorData* fbkImageVectorData) {
byte* unclassified = fbkImageVectorData->GetData(0);
bool saved = false;
if ((frame_ % 25) == 0) {
if (isSavingImages) {
char* fileName = (char*)malloc(128);
sprintf(fileName, "/ms/save%d.rvs", imageSaveCounter);
FILE* imageFile = fopen(fileName, "wb"); // Open the file which contains the LUT for read access
if (imageFile==NULL) {
cout << "Vision: Unable to save image (" << imageFile << ")." << endl << flush;
} else {
fwrite(unclassified,1,FULL_IMAGE_WIDTH*FULL_IMAGE_HEIGHT*6,imageFile);
fclose(imageFile);
saved=true;
}
free(fileName);
}
if (isSavingBMPImages) {
char* fileName = (char*)malloc(128);
sprintf(fileName,"/ms/save%d.bmp",imageSaveCounter);
BMP bmp;
bmp.SaveYCrCb2RGB(fileName, fbkImageVectorData, ofbkimageLAYER_H);
free(fileName);
}
}
if (saved) imageSaveCounter++;
}
// Processes a single frame retrieved from the camera
bool Vision::ProcessFrame(byte* unclassified) {
visionData.unclassified_ = unclassified;
#ifdef ERS_7
visionData.UpdateLookupTrigonometry(sensorValues_[S_INFRARED_FAR], MICRO_TO_RAD(sensorValues_[S_HEAD_TILT_BIG]), MICRO_TO_RAD(sensorValues_[S_HEAD_PAN]), MICRO_TO_RAD(sensorValues_[S_HEAD_TILT_SMALL]), averagedAccData[0], averagedAccData[1], averagedAccData[2]);
#endif
#ifdef ERS_210
visionData.UpdateLookupTrigonometry(sensorValues_[S_INFRARED_FAR], MICRO_TO_RAD(sensorValues_[S_HEAD_TILT]), MICRO_TO_RAD(sensorValues_[S_HEAD_PAN]), MICRO_TO_RAD(sensorValues_[S_HEAD_ROLL]), averagedAccData[0], averagedAccData[1], averagedAccData[2]);
#endif
visionData.GenerateHorizon(); // Generates the horizon which is used by both the ObjectRecognition and EdgeRecongition classes
if (visionData.headTiltCombined_ /*- visionData.brodyTilt_*/ > DEG_TO_RAD(DOWNCHECK) || ABS(visionData.headPan_) > DEG_TO_RAD(20)) { // Cycles through the image and assigns a known colour (if possible) to the pixel
ColourClassifier(false, lut_);
} else {
ColourClassifier(false, lutDown_);
}
currentImageWidth_ = FULL_IMAGE_WIDTH;
currentImageHeight_ = FULL_IMAGE_HEIGHT;
numVisionObjects_ = 0;
numVisionLines_ = 0;
objectRecognition_.FindVisionObjects(&visionData);
edgeRecognition_.FindVanishingPoint(&visionData); // Edge recognition MUST be run after object recognition as it is dependent on the objects found
//obstructionModel_.ProcessBlobs(&visionData);
robotRecognition_.FindRobot(&visionData); // Robot recognition MUST be run after object recognition as it is dependent on the objects found
return true;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -