📄 functions.java
字号:
* below the center). * * EX: Window Size = 3 where c is the center of the window * * X X X X X X X * X X X X X X X * X X X X X X X * X X X c X X X * X X X X X X X * X X X X X X X * X X X X X X X * * The function uses a window on every pixel (starting in and down the width * and height of the window size and going to the window size from from the * right and bottom, whereby avoiding going beyond the bounds of the image). * All of the values in the window are put into a list, sorted and the median * value (middle) is used as the intensity for the center pixel. This algorithm * is good for reducing minor artifacts in the image such as noise. * * @param s The subject whom is to have the median filter applied to */ public static void applyMedianFilter(Subject s) { GrayscaleImage gmg_Img = s.getGrayscaleImage(); //get the grayscale version of the eye image MedianImage mim_MedianImage = new MedianImage(gmg_Img.getWidth(), gmg_Img.getHeight()); int int_WindowSize = Globals.MEDIAN_FILTER_WINDOW_SIZE; int [] int_Arr_Buffer = new int[((2*int_WindowSize)+1) * ((2*int_WindowSize)+1)]; int int_MedianIndex = int_Arr_Buffer.length / 2; int int_Index; //For each row in the image for(int y = int_WindowSize; y < gmg_Img.getHeight() - int_WindowSize; y++) { //For each column in the image for(int x = int_WindowSize; x < gmg_Img.getWidth() - int_WindowSize; x++) { int_Index = 0; //Add each of the values from the neighboring area to the array for(int i = x - int_WindowSize; i < x + int_WindowSize; i++) { for(int j = y - int_WindowSize; j < y + int_WindowSize; j++) { int_Arr_Buffer[int_Index++] = gmg_Img.getIntensity(i, j); } } //Sort the array so that we can find the median Arrays.sort(int_Arr_Buffer); mim_MedianImage.setIntensity(x, y, int_Arr_Buffer[int_MedianIndex]); } } //Set the median image of the subject to the median image created s.setMedianImage(mim_MedianImage); } /** * */ public static UnwrappedImage meanFilter(Identity img) { UnwrappedImage uwr_Img = img.getIdentityIris(); MedianImage mim_MeanImage = new MedianImage(uwr_Img.getWidth(), uwr_Img.getHeight()); int int_WindowSize = 2; int int_Sum; int int_NumPixels = ((2*int_WindowSize)+1) * ((2*int_WindowSize)+1); //For each row in the image for(int y = int_WindowSize; y < uwr_Img.getHeight() - int_WindowSize; y++) { //For each column in the image for(int x = int_WindowSize; x < uwr_Img.getWidth() - int_WindowSize; x++) { int_Sum = 0; //Add each of the values from the neighboring area to the array for(int i = x - int_WindowSize; i < x + int_WindowSize; i++) { for(int j = y - int_WindowSize; j < y + int_WindowSize; j++) { int_Sum += uwr_Img.getIntensity(i,j); } } //Set the pixel to the value of the median of the buffer mim_MeanImage.setIntensity(x, y, int_Sum / int_NumPixels); } } return uwr_Img; } /** * Finds the center of the pupil of the given subject. The algorithm scans through * the median image from top left to bottom right and makes no assumptions about the * position of the pupil (or eye for that matter). The algorithm begins by finding * a pixel that is below the threshhold (a combination of the lowest intensity in the * current image and some variance) and finds the amount of pixels adjacent to the * right that have an intensity below the threshhold as well. This amount is called the * block size. The center of the block is the suspected center of the pupil. If this * block is the largest observed so far it determines if a block of pixels * going in the vertical direction up and down from the center of the block (effectively * making a cross) are also below the threshold and some variance. If so, the maximum * block size is updated and the center to return is reset to new center. * * @param s The subject */ public static void detectPupilCenter(Subject s) { MedianImage mim_Img = s.getMedianImage(); //get the grayscale version of the eye image Point pnt_Center = new Point(mim_Img.getWidth(),mim_Img.getHeight()); //default center to center of the subject's eye image int int_IntensityBase = lowestIntensity(mim_Img); int int_MaxBlockSize = 0; int int_BlockSize = 0; //For each row in the image for(int y = 0; y < mim_Img.getHeight(); y++) { //For each column in the image for(int x = 0; x < mim_Img.getWidth(); x++) { //If the intensity at the current pizel is less than the threshold if(mim_Img.getIntensity(x, y) < (int_IntensityBase + Globals.PUPIL_VARIANCE)) { //Find the number of pixels to the right with similar intensity int_BlockSize = getCountPixelsToRightBelowThreshhold(mim_Img, x, y, (int_IntensityBase + Globals.PUPIL_VARIANCE)); //If the block is larger than 5 pixels, larger than the maximum band accepted //so far and the vertical band spanning vertically from the center if(int_BlockSize > int_MaxBlockSize && verticalBlockBelowThreshhold(mim_Img, x + (int_BlockSize/2), y, int_BlockSize)) { //Set the maximum band width as the block size int_MaxBlockSize = int_BlockSize; //Save the center of the band pnt_Center = new Point(x + (int_BlockSize/2), y); } //Advance over pixels that have been checked (left half of the band) x += int_BlockSize; } } } //Set the radius found to one more than given to reduce minor errors, effectively //over estimate the pupil radius s.setPupilRadius((int_MaxBlockSize/2)+2); s.setPupilCenter(pnt_Center); } /** * Using the canny edge detection method, detects the edges of the image * based on the current threshhold and sigma values. The canny edge detector * will generate a binay image (black and white) that shows the edges of the * image. This image is saved into the subject as the edge image. * * @param s The subject for which the eye image is to have its edges detected */ public static void detectEdges(Subject s) { CannyEdgeDetector ced_EdgeDetector = new CannyEdgeDetector(); ced_EdgeDetector.setSourceImage(s.getMedianImage()); //Use the median image to detect edges try { ced_EdgeDetector.process(); //Process computes the edges } catch(EdgeDetectorException e) { } s.setEdgeImage(ced_EdgeDetector.getEdgeImage()); } /** * Determines the approximate radius of the iris of the given subject. The * algorithm starts the radius identified by the pupil center detection and * finds a radius for which the circle in the edge image has at least a certain * amount of black pixels (edges) on or nearly on the circle. If the proportion * of the iris radius meeting this criterion is between two bounds then the iris * radius is successfully found. * * @param s The subject */ public static void detectIrisRadius(Subject s) { GrayscaleImage gmg_EdgeImage; Point pnt_PupilCenter = s.getPupilCenter(); double dbl_IrisRadius = 0; double dbl_Radius = 0; double dbl_Ratio = 0.0; boolean bln_IrisFound = false; double dbl_StartPercentMatch = Globals.CIRCLE_PERCENT_MATCH; // While the ratio of the iris radius to the pupil radius is not within the // acceptable bounds and the percentage match is set to a value greather than zero while((dbl_Ratio <.20 || dbl_Ratio > .50) && Globals.CIRCLE_PERCENT_MATCH > 0) //need a ratio of iris radius to pupil radius of about .3 to .4 { //Detect the edges detectEdges(s); //Save the edge image of the subject locally gmg_EdgeImage = s.getEdgeImage(); //Reset vars bln_IrisFound = false; dbl_IrisRadius = 0; //Start 5% of total picture width away from the pupil radius dbl_Radius = s.getPupilRadius() + (gmg_EdgeImage.getWidth() * .05); try { //While the iris is not found while(!bln_IrisFound) { //Increment radius dbl_Radius++; //If the percentage of the pixels along the circle defined by the current radius //and the pupil center that are black is greater than the given threshhold percentage, //the iris has been found if(Functions.circleOfBlackPixels(gmg_EdgeImage, pnt_PupilCenter, dbl_Radius) > Globals.CIRCLE_PERCENT_MATCH) { bln_IrisFound = true; dbl_IrisRadius = dbl_Radius; } } } catch(Exception e) { } if(dbl_IrisRadius > 0) //protect divide by zero dbl_Ratio = s.getPupilRadius() / dbl_IrisRadius; else dbl_Ratio = 0; Globals.CIRCLE_PERCENT_MATCH -= .05; //decrement the percentage match in case the ratio is not accepted } Globals.CIRCLE_PERCENT_MATCH = dbl_StartPercentMatch; s.setIrisRadius(dbl_IrisRadius-2); //Take a little off radius } /** * Unrolls the iris of the subject, defined as the area between the pupil radius and * the iris radius. The iris, a circular object is transformed polarly into a rectangular * image that counteracts the distortion of the wrapping of the iris around the eye. * * @param s The subject to have their iris unrolled */ public static void unrollIris(Subject s) { GrayscaleImage gmg_Img = s.getGrayscaleImage(); UnwrappedImage uwr_Unwrapped = new UnwrappedImage(Constants.INT_UNWRAPPED_WIDTH, Constants.INT_UNWRAPPED_HEIGHT); Point pnt_PupilCenter = s.getPupilCenter(); double dbl_PupilRadius = s.getPupilRadius(); double dbl_IrisRadius = s.getIrisRadius(); double dbl_Theta = 0.0; double dbl_DeltaR = 0.0; double dbl_RelativeR = 0.0; Point pnt_MapTo = null; Point pnt_MapFrom = null; //Iterate over the Y values in the output image for(double MapToY = 0.0; MapToY < uwr_Unwrapped.getHeight(); MapToY ++) { //Iterate over the X values in the output image for(double MapToX = 0.0; MapToX < uwr_Unwrapped.getWidth(); MapToX ++) { //Determine the polar angle to the current coordinate //using the following formula: ANG = 2 * pi * ( X ouput image / width output image) dbl_Theta = 2 * Math.PI * ( MapToX / uwr_Unwrapped.getWidth() ); pnt_MapTo = new Point((int)MapToX, (int)MapToY); //Find the distance between the radius of the iris //and the pupil dbl_DeltaR = dbl_IrisRadius - dbl_PupilRadius; //Compute the relative distance from the pupil radius //to the map from point dbl_RelativeR = dbl_DeltaR * ( MapToY * 1.0 / uwr_Unwrapped.getHeight() ); //The point to map from is the point located along theta //at the pupil radius plus the relative radius addition pnt_MapFrom = Functions.polarToCartesian(pnt_PupilCenter, dbl_Theta, dbl_PupilRadius + dbl_RelativeR); uwr_Unwrapped.setIntensity(pnt_MapTo, gmg_Img.getIntensity(pnt_MapFrom)); } } s.setUnwrappedImage(uwr_Unwrapped); } /** * Attempts to match the subject against the identity and returns a percentage * value of the average percentage difference, per pixel, that the unwrapped * image of the subject varies from that stored in the identity, * * @param s The subject to be matched from * @param ident The identity to matched against * @return double The average percentage change per pixel between the subject * and the identity */ public static double matchIdentity(Subject s, Identity ident) { GrayscaleImage gim_Base = s.getUnwrappedImage(); GrayscaleImage gim_CompareImage; int int_Sum = 0; //Apply the mean filter to the identity gim_CompareImage = meanFilter(ident); //Subtract the compare image from the base image (absolute values) gim_CompareImage = subtractAbsolute(gim_Base, gim_CompareImage); //For each column in the image for(int x = 0; x < gim_CompareImage.getWidth(); x++) { //For each row in the image for(int y = 0; y < gim_CompareImage.getHeight(); y++) { //Add to the difference int_Sum += gim_CompareImage.getIntensity(x, y); } } //Return the average perctentage difference per pixel, scaled by the maximum //per pixel of 255. return 1 - ((double)int_Sum / (255 * gim_CompareImage.getWidth() * gim_CompareImage.getHeight())); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -