📄 capclassify.cpp
字号:
}
//----------------------------------------------------------
//----------------------------------------------------------
void convCameraRGB_2_YUYV(image_pixel *dst, unsigned char *src) {
unsigned int row, col, k;
unsigned int BytesPerRow = (unsigned int)(frameWidth*3);
yuv myYUV[2]; // intermediate result
k = 0;
for(row=0; row<dims[0] /* height */; row++) {
for(col = 0; col<processFrameWidth; col+=6, k++) {
/* RGB8 -> YUV */
myYUV[0] = rgb_to_yuv(src[(origY+row)*BytesPerRow + firstPacketStart+col ], \
src[(origY+row)*BytesPerRow + firstPacketStart+col + 1], \
src[(origY+row)*BytesPerRow + firstPacketStart+col + 2] );
myYUV[1] = rgb_to_yuv(src[(origY+row)*BytesPerRow + firstPacketStart+col + 3], \
src[(origY+row)*BytesPerRow + firstPacketStart+col + 4], \
src[(origY+row)*BytesPerRow + firstPacketStart+col + 5] );
/* YUV -> YUYV */
dst[k].u = (unsigned char)((myYUV[0].u + myYUV[1].u) >> 1); // u
dst[k].y1 = myYUV[0].y; // y1
dst[k].v = (unsigned char)((myYUV[0].v + myYUV[1].v) >> 1); // u
dst[k].y2 = myYUV[1].y; // y1
}
}
//mexPrintf("yuyv buffer contains %d elements\n", k);
}
//----------------------------------------------------------
//----------------------------------------------------------
// CMU driver: The YUV4,4,4 format is UYV
// -> 'convert' YUV444 (u1 y1 v1 u2 y2 v2) image to YUYV (u y1 v y2) equivalent (copy)
void convYUV444_2_YUYV(image_pixel *dst, unsigned char *src) {
unsigned int row, col, k;
unsigned int u, v;
unsigned int BytesPerRow = (unsigned int)(frameWidth*3);
k = 0;
for(row=0; row<dims[0] /* height */; row++) {
for(col = 0; col<processFrameWidth; col+=6, k++) {
/* average adjacent 'u-components' */
u = src[(origY+row)*BytesPerRow + firstPacketStart+col ];
dst[k].u = (unsigned char)((u + src[(origY+row)*BytesPerRow + firstPacketStart+col + 3]) >> 1); // u
dst[k].y1 = src[(origY+row)*BytesPerRow + firstPacketStart+col + 1]; // y1
/* average adjacent 'v-components' */
v = src[(origY+row)*BytesPerRow + firstPacketStart+col + 2];
dst[k].v = (unsigned char)((u + src[(origY+row)*BytesPerRow + firstPacketStart+col + 5]) >> 1); // u
dst[k].y2 = src[(origY+row)*BytesPerRow + firstPacketStart+col + 4]; // y2
}
}
//mexPrintf("yuyv buffer contains %d elements\n", k);
}
//----------------------------------------------------------
//----------------------------------------------------------
// CMU driver: The YUV4,2,2 format is UYUV for two adjacent pixels
// -> 'convert' YUV422 (u y1 v y2) image to YUYV (u y1 v y2) equivalent (copy)
void convYUV422_2_YUYV(image_pixel *dst, unsigned char *src) {
unsigned int row, col, k;
unsigned int BytesPerRow = (unsigned int)(frameWidth*2);
k = 0;
for(row=0; row<dims[0] /* height */; row++) {
for(col = 0; col<processFrameWidth; col+=4, k++) {
dst[k].u = src[(origY+row)*BytesPerRow + firstPacketStart+col ]; // u
dst[k].y1 = src[(origY+row)*BytesPerRow + firstPacketStart+col + 1]; // y1
dst[k].v = src[(origY+row)*BytesPerRow + firstPacketStart+col + 2]; // v
dst[k].y2 = src[(origY+row)*BytesPerRow + firstPacketStart+col + 3]; // y2
}
}
//mexPrintf("yuyv buffer contains %d elements\n", k);
}
//----------------------------------------------------------
//----------------------------------------------------------
// CMU driver: The YUV4,1,1 format is UYYVYY for four adjacent pixels
// -> convert YUV411 (u y1 y2 v y3 y4) image to YUYV (u y1 v y2, u y3 v y4) equivalent
// note: 6 input values represent 4 pixels -> bytes per row: frameWidth/4*6 = frameWidth*3/2
void convYUV411_2_YUYV(image_pixel *dst, unsigned char *src) {
unsigned int row, col, k;
unsigned int BytesPerRow = (unsigned int)(frameWidth*3/2);
// convert YUV411 (UYYVYY) to YUYV (CMVISION)
// NOTE: 'formattedOrigX' selects the correct YUV411 element (includes the first requested pixel)
// 'dims[1]' is an integer multiple of the number of pixels in each camera element (4)
// EXAMPLE: '3 pixels from pixel #4 on' -> need two packets (y1-y4) & (y5-y10)
// => firstPacketStart = 0, lastPacketStart = 6, dims[0] = 12
// | | |
// _________________v ____v__v_________
// --0--1--2--3--4--5---6--7--8--9-10-11--12-13--14-15--16--17
// -u1-y1-y2-v1-y3-y4--u2-y5-y6-v2-y7-y8--u3-y9-y10-v3-y11-y12
//
// firstPacketStart = (((int)(4/4)-1) >= 0 ? ((int)(4/4)-1) : 0 ) * 6 =
// (((int)(1)-1) >= 0 ? ((int)(1)-1) : 0 ) * 6 =
// (0 >= 0 ? 0 : 0 ) * 6 = 0 * 6 ========================== 0
// lastPacketStart = ((int)(4+3/4) >= 0 ? (int)(4+3/4) : 0 ) * 6 =
// ((int)(7/4) >= 0 ? (int)(7/4) : 0 ) * 6 =
// ((int)(1.75) >= 0 ? (int)(1.75) : 0 ) * 6 =
// (1 >= 0 ? 1 : 0 ) * 6 = 1 * 6 ========================== 6
// processFrameWidth = (lastPacketStart - firstPacketStart + 1) * 6 =
// (1 - 0 + 1) * 6 ======================================== 12
//
k = 0;
for(row=0; row<dims[0] /* height */; row++) {
for(col = 0; col<processFrameWidth; col+=6, k+=2) {
dst[k].u = src[(origY+row)*BytesPerRow + firstPacketStart+col ]; // u
dst[k].y1 = src[(origY+row)*BytesPerRow + firstPacketStart+col + 1]; // y1
dst[k].y2 = src[(origY+row)*BytesPerRow + firstPacketStart+col + 2]; // y2
dst[k].v = src[(origY+row)*BytesPerRow + firstPacketStart+col + 3]; // v
dst[k+1].u = dst[k].u;
dst[k+1].y1 = src[(origY+row)*BytesPerRow + firstPacketStart+col + 4]; // y3
dst[k+1].y2 = src[(origY+row)*BytesPerRow + firstPacketStart+col + 5]; // y4
dst[k+1].v = dst[k].v;
}
}
//mexPrintf("yuyv buffer contains %d elements\n", k);
}
//----------------------------------------------------------
//----------------------------------------------------------
// dispatcher for all conv<TYPE>_2_YUYV methods (above)
void convCurrentFrame2YUYV(myCameraModes mode, image_pixel *yuyvDest) {
unsigned char *cameraBuf; /* pointer raw data (camera) */
unsigned long cameraBufLength; /* number of bytes stored in the camera buffer (not used) */
/* get pointer to raw data (irrespective of the chosen format) */
cameraBuf = theCamera.GetRawData(&cameraBufLength);
switch(mode) {
case CAMERA_YUV444_160x120:
// convert YUV422 (UYUV) to YUYV (a mere copy - probably done inefficiently)
convYUV444_2_YUYV(yuyvDest, cameraBuf);
break;
case CAMERA_YUV422_320x240:
case CAMERA_YUV422_640x480:
// convert YUV422 (UYUV) to YUYV (a mere copy - probably done inefficiently)
convYUV422_2_YUYV(yuyvDest, cameraBuf);
break;
case CAMERA_YUV411_640x480:
// convert YUV411 (UYYVYY) to YUYV
convYUV411_2_YUYV(yuyvDest, cameraBuf);
break;
case CAMERA_RGB8_640x480:
// convert RGB8 (RGB) to YUYV
convCameraRGB_2_YUYV(yuyvDest, cameraBuf);
break;
case CAMERA_Y8_640x480:
// convert B&W (Y8) to YUYV
convY8_2_YUYV(yuyvDest, cameraBuf);
break;
case CAMERA_Y16_640x480:
// convert B&W (Y8) to YUYV
// not tested (fire-i does not support Y16, fw-01-08)
convY16_2_YUYV(yuyvDest, cameraBuf);
break;
default:
// do nothing (e.g. mode == -1)
;
} /* switch */
}
//----------------------------------------------------------
/* mySetFeature =========================================================== */
/* helper function to set a named feature (e.g. 'Brightness') to a specific value */
static void mySetFeature(const char *feature, int vLo, int vHi = 0) {
int fID = dc1394GetFeatureId(feature);
int aa;
if((pControl = theCamera.GetCameraControl(CAMERA_FEATURE(fID))) != NULL) {
/* found the named feature -> check if manual setting has been requested */
if(vLo < 0 || vHi < 0) {
/* request for automatic mode (if available) */
if(pControl->HasAutoMode()) {
/* (re-)activate automatic mode */
pControl->SetAutoMode(TRUE);
}
} else {
/* manual mode -> set low and high value */
pControl->SetValue(vLo, vHi);
/* is this an automatically controlled feature */
aa = pControl->HasAutoMode();
if(aa) {
/* force automatic mode to off... */
pControl->SetAutoMode(FALSE);
}
}
} else {
/* feature name not registered */
mexPrintf("Unknown cameara feature '%s'\n", feature);
}
} /* mySetFeature ========================================================== */
/* dumpCameraStats ========================================================= */
/* dump some info about the selected camera to the workspace window */
static void dumpCameraStats(void) {
#define maxBufLen 100
char myBuf[maxBufLen];
int myBufLen = maxBufLen;
int myNode, numCameras;
#ifdef DEBUGONLY /* ============================================= */
/* display list of all features as registered by the driver... */
for(int i=0; i<FEATURE_NUM_FEATURES; i++) {
/* current feature */
CAMERA_FEATURE fID = (CAMERA_FEATURE)(i);
if((pControl = theCamera.GetCameraControl(fID)) != NULL) {
/* call 1394camapi function 'dc1394GetFeatureName' to determine feature name */
mexPrintf("Camera feature %d is registered as '%s'\n", fID, dc1394GetFeatureName(fID));
} else {
/* feature name not registered */
mexPrintf("Camera feature %d has no registered name\n", fID);
}
}
#endif /* DEBUGONLY ============================================ */
theCamera.RefreshCameraList();
numCameras = theCamera.GetNumberCameras();
mexPrintf("\n%-30s %d\n", "Number of cameras found:", numCameras);
myNode = theCamera.GetNode();
theCamera.GetNodeDescription(myNode, myBuf, myBufLen);
mexPrintf("Device description (node %d): %s\n", myNode, myBuf);
theCamera.GetCameraName(myBuf, myBufLen);
mexPrintf("%-30s %s\n", "Camera name:", myBuf);
theCamera.GetCameraVendor(myBuf, myBufLen);
mexPrintf("%-30s %s\n", "Vendor name:", myBuf);
mexPrintf("%-30s %ld\n", "Version number:", theCamera.GetVersion());
mexPrintf("%-30s %d\n", "Max speed:", theCamera.GetMaxSpeed());
mexPrintf("%-30s %d\n", "Link status", theCamera.CheckLink());
mexPrintf("%-30s %d\n", "Has power control:", theCamera.HasPowerControl());
mexPrintf("%-30s %d\n", "Status power control:", theCamera.StatusPowerControl());
//int SetPowerControl(BOOL on);
mexPrintf("%-30s %d\n", "Has 1394b:", theCamera.Has1394b());
mexPrintf("%-30s %d\n", "Status 1394b:", theCamera.Status1394b());
//int Set1394b(BOOL on);
// read out camera eeprom
mexPrintf("%-30s %d\n", "Number of channels (camera):", theCamera.MemGetNumChannels());
mexPrintf("%-30s %d\n", "Current channel (camera):", theCamera.MemGetCurrentChannel());
// read out registry
//mexPrintf("%-30s %d\n", "Default brightness (registry):", theCamera.RegLoadSettings("HKEY_LOCAL_MACHINE/SOFTWARE/CMU/1394Camera/6143140808066302/ControlPanes/DefaultView/Brightness"));
// check all video modes supported by the CMU driver for availability on the currently connected camera
//
// NOTE: 'format' always set to '0' (= max resolution: 640 x 480 .... unibrain fire-i)
//
// fw-01-08
//
for(int ii=0; ii<numMYCAMERAMODES; ii++) {
VIDEO_MODE_DESCRIPTOR myModeDesc;
/* get video mode description, format: 0L, */
dc1394GetModeDescriptor(0L, (ULONG)ii, &myModeDesc);
dc1394GetModeString(0L, (ULONG)ii, myBuf, myBufLen);
if(theCamera.HasVideoMode(0L, ii)) {
mexPrintf("%-30s %s [%d bits per pixel]\n", "Camera supports video mode:", myBuf, (int)dc1394GetBitsPerPixel(myModeDesc.colorcode));
} else {
mexPrintf("%-30s %s\n", "Unsupported video mode:", myBuf);
}
}
// display selected features
{
int fID;
unsigned short aa, bb;
/* display current settings of the above features... */
for(int i=0; i<numMYFEATURES; i++) {
fID = dc1394GetFeatureId(myFeatures[i].name);
mexPrintf("\nFeature '%s' (ID %d)\n", myFeatures[i].name, fID);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -