📄 aigesture.cpp
字号:
// AIGesture.cpp: implementation of the CAIGesture class.
//编写人:苏树庆
//类别:手势识别
//////////////////////////////////////////////////////////////////////
#include "stdafx.h"
#include "AIGesture.h"
#include "GestrueInfo.h"
//////////////////////////////////////////////////////////////////////
// Construction/Destruction
//////////////////////////////////////////////////////////////////////
#define max(a,b) (((a) > (b)) ? (a) : (b))
CAIGesture::CAIGesture()
{
}
CAIGesture::~CAIGesture()
{
}
void CAIGesture::ColorRegulate(IplImage* src,IplImage* dst)
{
IplImage* R=cvCreateImage(cvGetSize(src),8,1);
IplImage* G=cvCreateImage(cvGetSize(src),8,1);
IplImage* B=cvCreateImage(cvGetSize(src),8,1);
cvCvtPixToPlane(src,R,G,B,0);
CvScalar aveR=cvAvg(R,NULL);
CvScalar aveG=cvAvg(G,NULL);
CvScalar aveB=cvAvg(B,NULL);
float aGray=(float)(aveR.val[0]+aveG.val[0]+aveB.val[0])/3;
float aR=(float)(aGray/aveR.val[0]);
float aG=(float)(aGray/aveG.val[0]);
float aB=(float)(aGray/aveB.val[0]);
int width=src->width;//IplImage这个结构,看一下就明白了
int height=src->height;
int stepR=R->widthStep/sizeof(uchar);
int stepG=G->widthStep/sizeof(uchar);
int stepB=B->widthStep/sizeof(uchar);
uchar* dataR=(uchar*)R->imageData;
uchar* dataG=(uchar*)G->imageData;
uchar* dataB=(uchar*)B->imageData;
for(int i=0;i<height;i++)
{
for(int j=0;j<width;j++)
{
dataR[i*stepR+j]=(unsigned char)(aR*dataR[i*stepR+j]);
dataG[i*stepG+j]=(unsigned char)(aG*dataG[i*stepG+j]);
dataB[i*stepB+j]=(unsigned char)(aB*dataB[i*stepB+j]);
}
}
cvMerge(R,G,B,0,dst);
cvReleaseImage(&R);
cvReleaseImage(&G);
cvReleaseImage(&B);
}
void CAIGesture::EqualImage(IplImage* src,IplImage* dst)
{
IplImage* gray=cvCreateImage(cvGetSize(src),8,1);
cvCvtColor(src,gray,CV_BGR2GRAY);
cvSaveImage("gray.jpg",gray);
cvEqualizeHist(gray,gray);
cvSaveImage("gray2.jpg",gray);
cvCvtColor(gray,dst,CV_GRAY2BGR);//opencv无法将图像重新转为RGB模式
cvReleaseImage(&gray);
}
void CAIGesture::SkinDetect(IplImage* src,IplImage* dst)
{
IplImage* hsv = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);//用于存图像的一个中间变量,是用来分通道用的,分成hsv通道
IplImage* tmpH1 = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);//通道的中间变量,用于肤色检测的中间变量
IplImage* tmpS1 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage* tmpH2 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage* tmpS3 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage* tmpH3 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage* tmpS2 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage* H = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage* S = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage* V = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage* src_tmp1=cvCreateImage(cvGetSize(src),8,3);
cvSmooth(src,src_tmp1,CV_GAUSSIAN,3,3); //高斯模糊
cvCvtColor(src_tmp1, hsv, CV_BGR2HSV );//颜色转换
cvCvtPixToPlane(hsv,H,S,V,0);//分为3个通道
/*********************肤色检测部分**************/
cvInRangeS(H,cvScalar(0.0,0.0,0,0),cvScalar(20.0,0.0,0,0),tmpH1);
cvInRangeS(S,cvScalar(75.0,0.0,0,0),cvScalar(200.0,0.0,0,0),tmpS1);
cvAnd(tmpH1,tmpS1,tmpH1,0);
// Red Hue with Low Saturation
// Hue 0 to 26 degree and Sat 20 to 90
cvInRangeS(H,cvScalar(0.0,0.0,0,0),cvScalar(13.0,0.0,0,0),tmpH2);
cvInRangeS(S,cvScalar(20.0,0.0,0,0),cvScalar(90.0,0.0,0,0),tmpS2);
cvAnd(tmpH2,tmpS2,tmpH2,0);
// Red Hue to Pink with Low Saturation
// Hue 340 to 360 degree and Sat 15 to 90
cvInRangeS(H,cvScalar(170.0,0.0,0,0),cvScalar(180.0,0.0,0,0),tmpH3);
cvInRangeS(S,cvScalar(15.0,0.0,0,0),cvScalar(90.,0.0,0,0),tmpS3);
cvAnd(tmpH3,tmpS3,tmpH3,0);
// Combine the Hue and Sat detections
cvOr(tmpH3,tmpH2,tmpH2,0);
cvOr(tmpH1,tmpH2,tmpH1,0);
cvCopy(tmpH1,dst);
cvReleaseImage(&hsv);
cvReleaseImage(&tmpH1);
cvReleaseImage(&tmpS1);
cvReleaseImage(&tmpH2);
cvReleaseImage(&tmpS2);
cvReleaseImage(&tmpH3);
cvReleaseImage(&tmpS3);
cvReleaseImage(&H);
cvReleaseImage(&S);
cvReleaseImage(&V);
cvReleaseImage(&src_tmp1);
}
void CAIGesture::FindBigContour(IplImage* src,CvSeq* (&contour),CvMemStorage* storage)//提取最大轮廓
{
CvSeq* contour_tmp,*contourPos;
int contourcount=cvFindContours(src, storage, &contour_tmp, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE );
if(contourcount==0)
return;
CvRect bndRect = cvRect(0,0,0,0);
double contourArea,maxcontArea=0;
for( ; contour_tmp != 0; contour_tmp = contour_tmp->h_next )
{
bndRect = cvBoundingRect( contour_tmp, 0 );
contourArea=bndRect.width*bndRect.height;
if(contourArea>=maxcontArea)//提取最大轮廓
{
maxcontArea=contourArea;
contourPos=contour_tmp;
}
}
contour=contourPos;
}
void CAIGesture::ComputeCenter(CvSeq* (&contour),CvPoint& center,float& radius)//计算质心
{
CvMoments m;
double M00,X,Y;//,XX,YY;
cvMoments(contour,&m,0);
M00=cvGetSpatialMoment(&m,0,0);
X=cvGetSpatialMoment(&m,1,0)/M00;
Y=cvGetSpatialMoment(&m,0,1)/M00;
center.x=(int)X;
center.y=(int)Y;
/*******************下面用来求半径****************************************/
int hullcount;
CvSeq* hull;
CvPoint pt;
float tmpr1,r=0,R=0;
hull=cvConvexHull2(contour,0,CV_COUNTER_CLOCKWISE,0);
hullcount=hull->total;
for(int i=1;i<hullcount;i++)
{
pt=**CV_GET_SEQ_ELEM(CvPoint*,hull,i);//提取每一个点
tmpr1=cvSqrt((float)(center.x-pt.x)*(center.x-pt.x)+(center.y-pt.y)*(center.y-pt.y));//计算与中心点的大小
if(tmpr1>r)//求最大的作为半径
r=tmpr1;
}
radius=r;
}
void CAIGesture::GetFeature(IplImage* src,
CvPoint& center,
float radius,
float angle[FeatureNum][10],
float anglecha[FeatureNum][10],
float count[FeatureNum])
{
int width=src->width;//IplImage这个结构,看一下就明白了
int height=src->height;
int step=src->widthStep/sizeof(uchar);
uchar* data=(uchar*)src->imageData;
int i,j;
float R=0.0;
int a1,b1,x1,y1,a2,b2,x2,y2;//圆心与点的距离
float angle1_tmp[200]={0},angle2_tmp[200]={0},angle1[50]={0},angle2[50]={0};//中间变量,用来求角度
int angle1_tmp_count=0,angle2_tmp_count=0,angle1count=0,angle2count=0,anglecount=0;//中间变量,用来求角度
for(i=0;i<FeatureNum;i++)//分FeatureNum层进行特征提取(也就是5层)分析
{
R=(i+4)*radius/9;
for(j=0;j<=3600;j++)
{
if(j<=900)
{
a1=(int)(R*sin(j*3.14/1800));//这个要自己实际画一张图就明白了
b1=(int)(R*cos(j*3.14/1800));
x1=center.x-b1;
y1=center.y-a1;
a2=(int)(R*sin((j+1)*3.14/1800));
b2=(int)(R*cos((j+1)*3.14/1800));
x2=center.x-b2;
y2=center.y-a2;
}
else if(j>900&&j<=1800)
{
a1=(int)(R*sin((j-900)*3.14/1800));
b1=(int)(R*cos((j-900)*3.14/1800));
x1=center.x+a1;
y1=center.y-b1;
a2=(int)(R*sin((j+1-900)*3.14/1800));
b2=(int)(R*cos((j+1-900)*3.14/1800));
x2=center.x+a2;
y2=center.y-b2;
}
else if(j>1800&&j<2700)
{
a1=(int)(R*sin((j-1800)*3.14/1800));
b1=(int)(R*cos((j-1800)*3.14/1800));
x1=center.x+b1;
y1=center.y+a1;
a2=(int)(R*sin((j+1-1800)*3.14/1800));
b2=(int)(R*cos((j+1-1800)*3.14/1800));
x2=center.x+b2;
y2=center.y+a2;
}
else
{
a1=(int)(R*sin((j-2700)*3.14/1800));
b1=(int)(R*cos((j-2700)*3.14/1800));
x1=center.x-a1;
y1=center.y+b1;
a2=(int)(R*sin((j+1-2700)*3.14/1800));
b2=(int)(R*cos((j+1-2700)*3.14/1800));
x2=center.x-a2;
y2=center.y+b2;
}
if(x1>0&&x1<width&&x2>0&&x2<width&&y1>0&&y1<height&&y2>0&&y2<height)
{
if((int)data[y1*step+x1]==255&&(int)data[y2*step+x2]==0)
{
angle1_tmp[angle1_tmp_count]=(float)(j*0.1);//从肤色到非肤色的角度
angle1_tmp_count++;
}
else if((int)data[y1*step+x1]==0&&(int)data[y2*step+x2]==255)
{
angle2_tmp[angle2_tmp_count]=(float)(j*0.1);//从非肤色到肤色的角度
angle2_tmp_count++;
}
}
}
for(j=0;j<angle1_tmp_count;j++)
{
if(angle1_tmp[j]-angle1_tmp[j-1]<0.2)//忽略太小的角度
continue;
angle1[angle1count]=angle1_tmp[j];
angle1count++;
}
for(j=0;j<angle2_tmp_count;j++)
{
if(angle2_tmp[j]-angle2_tmp[j-1]<0.2)
continue;
angle2[angle2count]=angle2_tmp[j];
angle2count++;
}
for(j=0;j<max(angle1count,angle2count);j++)
{
if(angle1[0]>angle2[0])
{
if(angle1[j]-angle2[j]<7)//忽略小于7度的角度,因为人的手指一般都大于这个值
continue;
angle[i][anglecount]=(float)((angle1[j]-angle2[j])*0.01);//肤色的角度
anglecha[i][anglecount]=(float)((angle2[j+1]-angle1[j])*0.01);//非肤色的角度,例如手指间的角度
anglecount++;
}
else
{
if(angle1[j+1]-angle2[j]<7)
continue;
anglecount++;
angle[i][anglecount]=(float)((angle1[j+1]-angle2[j])*0.01);
anglecha[i][anglecount]=(float)((angle2[j]-angle1[j])*0.01);
}
}
if(angle1[0]<angle2[0])
{
angle[i][0]=(float)((angle1[0]+360-angle2[angle2count-1])*0.01);
}
else
{
anglecha[i][0]=(float)((angle2[0]+360-angle1[angle1count-1])*0.01);
}
count[i]=(float)anglecount;
angle1_tmp_count=0,
angle2_tmp_count=0,
angle1count=0,
angle2count=0,
anglecount=0;
for(j=0;j<200;j++)
{
angle1_tmp[j]=0;
angle2_tmp[j]=0;
}
for(j=0;j<50;j++)
{
angle1[j]=0;
angle2[j]=0;
}
}
}
void CAIGesture::OneGestureTrain(CString GesturePath,CvFileStorage *fs,GestureStruct gesture)//对单张图片进行训练
{
IplImage* TrainImage=0;
IplImage* dst=0;
CvSeq* contour=NULL;
CvMemStorage* storage;
storage = cvCreateMemStorage(0);
CvPoint center=cvPoint(0,0);
float radius=0.0;
float angle[FeatureNum][10]={0},anglecha[FeatureNum][10]={0},anglesum[FeatureNum][10]={0},anglechasum[FeatureNum][10]={0};
float count[FeatureNum]={0},countsum[FeatureNum]={0};
int FileCount=0, i, j;
CFileFind f;
BOOL IsFind=f.FindFile(GesturePath+"*.jpg");
while(IsFind)
{
IsFind=f.FindNextFile();
if(!f.IsDots())
{
TrainImage=cvLoadImage(f.GetFilePath(),1);
if(TrainImage==NULL)
{
AfxMessageBox("无法加载图片");
cvReleaseMemStorage(&storage);
cvReleaseImage(&dst);
cvReleaseImage(&TrainImage);
return;
}
if(dst==NULL&&TrainImage!=NULL)
dst=cvCreateImage(cvGetSize(TrainImage),8,1);
SkinDetect(TrainImage,dst);
FindBigContour(dst,contour,storage);
cvZero(dst);
cvDrawContours( dst, contour, CV_RGB(255,255,255),CV_RGB(255,255,255), -1, -1, 8 );
ComputeCenter(contour,center,radius);
GetFeature(dst,center,radius,angle,anglecha,count);
for(j=0;j<FeatureNum;j++)
{
countsum[j]+=count[j];
for(int k=0;k<10;k++)
{
anglesum[j][k]+=angle[j][k];
anglechasum[j][k]+=anglecha[j][k];
}
}
FileCount++;
cvReleaseImage(&TrainImage);
}
}
for(i=0;i<FeatureNum;i++)
{
gesture.count[i]=countsum[i]/FileCount;
for(j=0;j<10;j++)
{
gesture.angle[i][j]=anglesum[i][j]/FileCount;
gesture.anglecha[i][j]=anglechasum[i][j]/FileCount;
}
}
cvStartWriteStruct(fs,gesture.angleName,CV_NODE_SEQ,NULL);//开始写入yml文件
for(i=0;i<FeatureNum;i++)
cvWriteRawData(fs,&gesture.angle[i][0],10,"f");//写入肤色角度的值
cvEndWriteStruct(fs);
cvStartWriteStruct(fs,gesture.anglechaName,CV_NODE_SEQ,NULL);
for(i=0;i<FeatureNum;i++)
cvWriteRawData(fs,&gesture.anglecha[i][0],10,"f");//写入非肤色角度的值
cvEndWriteStruct(fs);
cvStartWriteStruct(fs,gesture.countName,CV_NODE_SEQ,NULL);
cvWriteRawData(fs,&gesture.count[0],FeatureNum,"f");//写入肤色角度的个数
cvEndWriteStruct(fs);
cvReleaseMemStorage(&storage);
cvReleaseImage(&dst);
}
void CAIGesture::Train()//对指定训练文件夹里面的所有手势进行训练
{
TCHAR *szTemp = new TCHAR[400];
::GetCurrentDirectory( 400 , szTemp );
CString FolderPath=szTemp;
FolderPath+="\\训练样本\\";
delete szTemp;
CvFileStorage *GestureFeature=cvOpenFileStorage("手势特征文件.yml",0,CV_STORAGE_WRITE);//打开文件储存器
FILE* fp;
fp=fopen("手势文件.txt","w+");
int FolderCount=0;
CFileFind f;
BOOL IsFind=f.FindFile(FolderPath+"*.");
GestureStruct gesture;
while(IsFind)
{
IsFind=f.FindNextFile();
if(!f.IsDots())
{
fprintf(fp,"%s\n",f.GetFileName());
gesture.angleName=f.GetFileName()+"angleName";
gesture.anglechaName=f.GetFileName()+"anglechaName";
gesture.countName=f.GetFileName()+"anglecountName";
OneGestureTrain(f.GetFilePath()+"\\",GestureFeature,gesture);
FolderCount++;
}
}
fprintf(fp,"%s%d","手势个数是:",FolderCount);
fclose(fp);
cvReleaseFileStorage(&GestureFeature);
}
//下面部分是静态图片的识别
void CAIGesture::Recognise(IplImage* src,CString& result)
{
FILE* fp;//定义一个文件对象指针,C语言中的,本人对文件的操作不熟悉,所以只能先用这个
fp=fopen("手势文件.txt","r");
if (fp==NULL)
return ;
fseek(fp,-2,2);
int GestureNum=0;
int i;
fscanf(fp,"%d",&GestureNum);
if(GestureNum==0)
{
fseek(fp,-1,2);
fscanf(fp,"%d",&GestureNum);
}
rewind(fp);//指向文件的开始
char* buff=new char[20];//定义一个内存缓冲区
CString* GestureName=new CString[GestureNum];
for(i=0;i<GestureNum;i++)
{
fscanf(fp,"%s\n",buff);//将文件格式读取到buff
GestureName[i]=buff;
}
delete buff;//释放内存
fclose(fp);//关闭文件
float angle[FeatureNum][10]={0},anglecha[FeatureNum][10]={0};//定义求取肤色色度的中间变量
float Sbangle[FeatureNum][10]={0},Sbanglecha[FeatureNum][10]={0};//定义求取非肤色色度的中间变量
float angleresult[FeatureNum]={0},anglecharesult[FeatureNum]={0};//定义求取结果的中间变量
float count[FeatureNum]={0};
float Sbcount[FeatureNum]={0};
int mask[10]={0},maskcount=0,mask1[10]={0},maskcount1=0;
double mask_tmp=0,mask_tmp1=0;
CvSeq* contour=NULL;
CvMemStorage* storage;
storage = cvCreateMemStorage(0);
CvPoint center=cvPoint(0,0);
float radius=0.0;
IplImage* dst=cvCreateImage(cvGetSize(src),8,1);
SkinDetect(src,dst);
FindBigContour(dst,contour,storage);
if(contour==0)
return;
CvRect bndRect=cvBoundingRect( contour, 0 );
//CvRect bndRect=cvRect( 50, 50, 200, 100 );
cvZero(dst);
cvDrawContours( dst, contour, CV_RGB(255,255,255),CV_RGB(255,255,255), -1, -1, 8 );
ComputeCenter(contour,center,radius);
cvRectangle(src,cvPoint(bndRect.x,bndRect.y),
cvPoint(bndRect.x+bndRect.width,bndRect.y+bndRect.height),
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -