⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 usbvideo.c

📁 h内核
💻 C
📖 第 1 页 / 共 4 页
字号:
	up(&uvd->lock);		return count;}/* * Make all of the blocks of data contiguous */static int usbvideo_CompressIsochronous(struct uvd *uvd, struct urb *urb){	char *cdata;	int i, totlen = 0;	for (i = 0; i < urb->number_of_packets; i++) {		int n = urb->iso_frame_desc[i].actual_length;		int st = urb->iso_frame_desc[i].status;		cdata = urb->transfer_buffer + urb->iso_frame_desc[i].offset;		/* Detect and ignore errored packets */		if (st < 0) {			if (uvd->debug >= 1)				err("Data error: packet=%d. len=%d. status=%d.", i, n, st);			uvd->stats.iso_err_count++;			continue;		}		/* Detect and ignore empty packets */		if (n <= 0) {			uvd->stats.iso_skip_count++;			continue;		}		totlen += n;	/* Little local accounting */		RingQueue_Enqueue(&uvd->dp, cdata, n);	}	return totlen;}static void usbvideo_IsocIrq(struct urb *urb, struct pt_regs *regs){	int i, ret, len;	struct uvd *uvd = urb->context;	/* We don't want to do anything if we are about to be removed! */	if (!CAMERA_IS_OPERATIONAL(uvd))		return;#if 0	if (urb->actual_length > 0) {		info("urb=$%p status=%d. errcount=%d. length=%d.",		     urb, urb->status, urb->error_count, urb->actual_length);	} else {		static int c = 0;		if (c++ % 100 == 0)			info("No Isoc data");	}#endif	if (!uvd->streaming) {		if (uvd->debug >= 1)			info("Not streaming, but interrupt!");		return;	}		uvd->stats.urb_count++;	if (urb->actual_length <= 0)		goto urb_done_with;	/* Copy the data received into ring queue */	len = usbvideo_CompressIsochronous(uvd, urb);	uvd->stats.urb_length = len;	if (len <= 0)		goto urb_done_with;	/* Here we got some data */	uvd->stats.data_count += len;	RingQueue_WakeUpInterruptible(&uvd->dp);urb_done_with:	for (i = 0; i < FRAMES_PER_DESC; i++) {		urb->iso_frame_desc[i].status = 0;		urb->iso_frame_desc[i].actual_length = 0;	}	urb->status = 0;	urb->dev = uvd->dev;	ret = usb_submit_urb (urb, GFP_KERNEL);	if(ret)		err("usb_submit_urb error (%d)", ret);	return;}/* * usbvideo_StartDataPump() * * History: * 27-Jan-2000 Used ibmcam->iface, ibmcam->ifaceAltActive instead *             of hardcoded values. Simplified by using for loop, *             allowed any number of URBs. */static int usbvideo_StartDataPump(struct uvd *uvd){	struct usb_device *dev = uvd->dev;	int i, errFlag;	if (uvd->debug > 1)		info("%s($%p)", __FUNCTION__, uvd);	if (!CAMERA_IS_OPERATIONAL(uvd)) {		err("%s: Camera is not operational", __FUNCTION__);		return -EFAULT;	}	uvd->curframe = -1;	/* Alternate interface 1 is is the biggest frame size */	i = usb_set_interface(dev, uvd->iface, uvd->ifaceAltActive);	if (i < 0) {		err("%s: usb_set_interface error", __FUNCTION__);		uvd->last_error = i;		return -EBUSY;	}	if (VALID_CALLBACK(uvd, videoStart))		GET_CALLBACK(uvd, videoStart)(uvd);	else 		err("%s: videoStart not set", __FUNCTION__);	/* We double buffer the Iso lists */	for (i=0; i < USBVIDEO_NUMSBUF; i++) {		int j, k;		struct urb *urb = uvd->sbuf[i].urb;		urb->dev = dev;		urb->context = uvd;		urb->pipe = usb_rcvisocpipe(dev, uvd->video_endp);		urb->interval = 1;		urb->transfer_flags = URB_ISO_ASAP;		urb->transfer_buffer = uvd->sbuf[i].data;		urb->complete = usbvideo_IsocIrq;		urb->number_of_packets = FRAMES_PER_DESC;		urb->transfer_buffer_length = uvd->iso_packet_len * FRAMES_PER_DESC;		for (j=k=0; j < FRAMES_PER_DESC; j++, k += uvd->iso_packet_len) {			urb->iso_frame_desc[j].offset = k;			urb->iso_frame_desc[j].length = uvd->iso_packet_len;		}	}	/* Submit all URBs */	for (i=0; i < USBVIDEO_NUMSBUF; i++) {		errFlag = usb_submit_urb(uvd->sbuf[i].urb, GFP_KERNEL);		if (errFlag)			err("%s: usb_submit_isoc(%d) ret %d", __FUNCTION__, i, errFlag);	}	uvd->streaming = 1;	if (uvd->debug > 1)		info("%s: streaming=1 video_endp=$%02x", __FUNCTION__, uvd->video_endp);	return 0;}/* * usbvideo_StopDataPump() * * This procedure stops streaming and deallocates URBs. Then it * activates zero-bandwidth alt. setting of the video interface. * * History: * 22-Jan-2000 Corrected order of actions to work after surprise removal. * 27-Jan-2000 Used uvd->iface, uvd->ifaceAltInactive instead of hardcoded values. */static void usbvideo_StopDataPump(struct uvd *uvd){	int i, j;	if (uvd->debug > 1)		info("%s($%p)", __FUNCTION__, uvd);	if ((uvd == NULL) || (!uvd->streaming) || (uvd->dev == NULL))		return;	/* Unschedule all of the iso td's */	for (i=0; i < USBVIDEO_NUMSBUF; i++) {		usb_kill_urb(uvd->sbuf[i].urb);	}	if (uvd->debug > 1)		info("%s: streaming=0", __FUNCTION__);	uvd->streaming = 0;	if (!uvd->remove_pending) {		/* Invoke minidriver's magic to stop the camera */		if (VALID_CALLBACK(uvd, videoStop))			GET_CALLBACK(uvd, videoStop)(uvd);		else 			err("%s: videoStop not set", __FUNCTION__);		/* Set packet size to 0 */		j = usb_set_interface(uvd->dev, uvd->iface, uvd->ifaceAltInactive);		if (j < 0) {			err("%s: usb_set_interface() error %d.", __FUNCTION__, j);			uvd->last_error = j;		}	}}/* * usbvideo_NewFrame() * * History: * 29-Mar-00 Added copying of previous frame into the current one. * 6-Aug-00  Added model 3 video sizes, removed redundant width, height. */static int usbvideo_NewFrame(struct uvd *uvd, int framenum){	struct usbvideo_frame *frame;	int n;	if (uvd->debug > 1)		info("usbvideo_NewFrame($%p,%d.)", uvd, framenum);	/* If we're not grabbing a frame right now and the other frame is */	/*  ready to be grabbed into, then use it instead */	if (uvd->curframe != -1)		return 0;	/* If necessary we adjust picture settings between frames */	if (!uvd->settingsAdjusted) {		if (VALID_CALLBACK(uvd, adjustPicture))			GET_CALLBACK(uvd, adjustPicture)(uvd);		uvd->settingsAdjusted = 1;	}	n = (framenum + 1) % USBVIDEO_NUMFRAMES;	if (uvd->frame[n].frameState == FrameState_Ready)		framenum = n;	frame = &uvd->frame[framenum];	frame->frameState = FrameState_Grabbing;	frame->scanstate = ScanState_Scanning;	frame->seqRead_Length = 0;	/* Accumulated in xxx_parse_data() */	frame->deinterlace = Deinterlace_None;	frame->flags = 0; /* No flags yet, up to minidriver (or us) to set them */	uvd->curframe = framenum;	/*	 * Normally we would want to copy previous frame into the current one	 * before we even start filling it with data; this allows us to stop	 * filling at any moment; top portion of the frame will be new and	 * bottom portion will stay as it was in previous frame. If we don't	 * do that then missing chunks of video stream will result in flickering	 * portions of old data whatever it was before.	 *	 * If we choose not to copy previous frame (to, for example, save few	 * bus cycles - the frame can be pretty large!) then we have an option	 * to clear the frame before using. If we experience losses in this	 * mode then missing picture will be black (no flickering).	 *	 * Finally, if user chooses not to clean the current frame before	 * filling it with data then the old data will be visible if we fail	 * to refill entire frame with new data.	 */	if (!(uvd->flags & FLAGS_SEPARATE_FRAMES)) {		/* This copies previous frame into this one to mask losses */		int prev = (framenum - 1 + USBVIDEO_NUMFRAMES) % USBVIDEO_NUMFRAMES;		memmove(frame->data, uvd->frame[prev].data, uvd->max_frame_size);	} else {		if (uvd->flags & FLAGS_CLEAN_FRAMES) {			/* This provides a "clean" frame but slows things down */			memset(frame->data, 0, uvd->max_frame_size);		}	}	return 0;}/* * usbvideo_CollectRawData() * * This procedure can be used instead of 'processData' callback if you * only want to dump the raw data from the camera into the output * device (frame buffer). You can look at it with V4L client, but the * image will be unwatchable. The main purpose of this code and of the * mode FLAGS_NO_DECODING is debugging and capturing of datastreams from * new, unknown cameras. This procedure will be automatically invoked * instead of the specified callback handler when uvd->flags has bit * FLAGS_NO_DECODING set. Therefore, any regular build of any driver * based on usbvideo can use this feature at any time. */static void usbvideo_CollectRawData(struct uvd *uvd, struct usbvideo_frame *frame){	int n;	assert(uvd != NULL);	assert(frame != NULL);	/* Try to move data from queue into frame buffer */	n = RingQueue_GetLength(&uvd->dp);	if (n > 0) {		int m;		/* See how much space we have left */		m = uvd->max_frame_size - frame->seqRead_Length;		if (n > m)			n = m;		/* Now move that much data into frame buffer */		RingQueue_Dequeue(			&uvd->dp,			frame->data + frame->seqRead_Length,			m);		frame->seqRead_Length += m;	}	/* See if we filled the frame */	if (frame->seqRead_Length >= uvd->max_frame_size) {		frame->frameState = FrameState_Done;		uvd->curframe = -1;		uvd->stats.frame_num++;	}}static int usbvideo_GetFrame(struct uvd *uvd, int frameNum){	struct usbvideo_frame *frame = &uvd->frame[frameNum];	if (uvd->debug >= 2)		info("%s($%p,%d.)", __FUNCTION__, uvd, frameNum);	switch (frame->frameState) {        case FrameState_Unused:		if (uvd->debug >= 2)			info("%s: FrameState_Unused", __FUNCTION__);		return -EINVAL;        case FrameState_Ready:        case FrameState_Grabbing:        case FrameState_Error:        {		int ntries, signalPending;	redo:		if (!CAMERA_IS_OPERATIONAL(uvd)) {			if (uvd->debug >= 2)				info("%s: Camera is not operational (1)", __FUNCTION__);			return -EIO;		}		ntries = 0; 		do {			RingQueue_InterruptibleSleepOn(&uvd->dp);			signalPending = signal_pending(current);			if (!CAMERA_IS_OPERATIONAL(uvd)) {				if (uvd->debug >= 2)					info("%s: Camera is not operational (2)", __FUNCTION__);				return -EIO;			}			assert(uvd->fbuf != NULL);			if (signalPending) {				if (uvd->debug >= 2)					info("%s: Signal=$%08x", __FUNCTION__, signalPending);				if (uvd->flags & FLAGS_RETRY_VIDIOCSYNC) {					usbvideo_TestPattern(uvd, 1, 0);					uvd->curframe = -1;					uvd->stats.frame_num++;					if (uvd->debug >= 2)						info("%s: Forced test pattern screen", __FUNCTION__);					return 0;				} else {					/* Standard answer: Interrupted! */					if (uvd->debug >= 2)						info("%s: Interrupted!", __FUNCTION__);					return -EINTR;				}			} else {				/* No signals - we just got new data in dp queue */				if (uvd->flags & FLAGS_NO_DECODING)					usbvideo_CollectRawData(uvd, frame);				else if (VALID_CALLBACK(uvd, processData))					GET_CALLBACK(uvd, processData)(uvd, frame);				else 					err("%s: processData not set", __FUNCTION__);			}		} while (frame->frameState == FrameState_Grabbing);		if (uvd->debug >= 2) {			info("%s: Grabbing done; state=%d. (%lu. bytes)",			     __FUNCTION__, frame->frameState, frame->seqRead_Length);		}		if (frame->frameState == FrameState_Error) {			int ret = usbvideo_NewFrame(uvd, frameNum);			if (ret < 0) {				err("%s: usbvideo_NewFrame() failed (%d.)", __FUNCTION__, ret);				return ret;			}			goto redo;		}		/* Note that we fall through to meet our destiny below */        }        case FrameState_Done:		/*		 * Do all necessary postprocessing of data prepared in		 * "interrupt" code and the collecting code above. The		 * frame gets marked as FrameState_Done by queue parsing code.		 * This status means that we collected enough data and		 * most likely processed it as we went through. However		 * the data may need postprocessing, such as deinterlacing		 * or picture adjustments implemented in software (horror!)		 *		 * As soon as the frame becomes "final" it gets promoted to		 * FrameState_Done_Hold status where it will remain until the		 * caller consumed all the video data from the frame. Then		 * the empty shell of ex-frame is thrown out for dogs to eat.		 * But we, worried about pets, will recycle the frame!		 */		uvd->stats.frame_num++;		if ((uvd->flags & FLAGS_NO_DECODING) == 0) {			if (VALID_CALLBACK(uvd, postProcess))				GET_CALLBACK(uvd, postProcess)(uvd, frame);			if (frame->flags & USBVIDEO_FRAME_FLAG_SOFTWARE_CONTRAST)				usbvideo_SoftwareContrastAdjustment(uvd, frame);		}		frame->frameState = FrameState_Done_Hold;		if (uvd->debug >= 2)			info("%s: Entered FrameState_Done_Hold state.", __FUNCTION__);		return 0;	case FrameState_Done_Hold:		/*		 * We stay in this state indefinitely until someone external,		 * like ioctl() or read() call finishes digesting the frame		 * data. Then it will mark the frame as FrameState_Unused and		 * it will be released back into the wild to roam freely.		 */		if (uvd->debug >= 2)			info("%s: FrameState_Done_Hold state.", __FUNCTION__);		return 0;	}	/* Catch-all for other cases. We shall not be here. */	err("%s: Invalid state %d.", __FUNCTION__, frame->frameState);	frame->frameState = FrameState_Unused;	return 0;}/* * usbvideo_DeinterlaceFrame() * * This procedure deinterlaces the given frame. Some cameras produce * only half of scanlines - sometimes only even lines, sometimes only * odd lines. The deinterlacing method is stored in frame->deinterlace * variable. * * Here we scan the frame vertically and replace missing scanlines with * average between surrounding ones - before and after. If we have no * line above then we just copy next line. Similarly, if we need to * create a last line then preceding line is used. */void usbvideo_DeinterlaceFrame(struct uvd *uvd, struct usbvideo_frame *frame){	if ((uvd == NULL) || (frame == NULL))		return;	if ((frame->deinterlace == Deinterlace_FillEvenLines) ||	    (frame->deinterlace == Deinterlace_FillOddLines))	{		const int v4l_linesize = VIDEOSIZE_X(frame->request) * V4L_BYTES_PER_PIXEL;		int i = (frame->deinterlace == Deinterlace_FillEvenLines) ? 0 : 1;		for (; i < VIDEOSIZE_Y(frame->request); i += 2) {			const unsigned char *fs1, *fs2;			unsigned char *fd;			int ip, in, j;	/* Previous and next lines */			/*			 * Need to average lines before and after 'i'.			 * If we go out of bounds seeking those lines then			 * we point back to existing line.			 */			ip = i - 1;	/* First, get rough numbers */			in = i + 1;			/* Now validate */			if (ip < 0)				ip = in;			if (in >= VIDEOSIZE_Y(frame->request))				in = ip;			/* Sanity check */			if ((ip < 0) || (in < 0) ||			    (ip >= VIDEOSIZE_Y(frame->request)) ||			    (in >= VIDEOSIZE_Y(frame->request)))			{				err("Error: ip=%d. in=%d. req.height=%ld.",				    ip, in, VIDEOSIZE_Y(frame->request));				break;			}			/* Now we need to average lines 'ip' and 'in' to produce line 'i' */			fs1 = frame->data + (v4l_linesize * ip);			fs2 = frame->data + (v4l_linesize * in);			fd = frame->data + (v4l_linesize * i);			/* Average lines around destination */			for (j=0; j < v4l_linesize; j++) {				fd[j] = (unsigned char)((((unsigned) fs1[j]) +							 ((unsigned)fs2[j])) >> 1);			}		}	}	/* Optionally display statistics on the screen */	if (uvd->flags & FLAGS_OVERLAY_STATS)		usbvideo_OverlayStats(uvd, frame);}EXPORT_SYMBOL(usbvideo_DeinterlaceFrame);/* * usbvideo_SoftwareContrastAdjustment() * * This code adjusts the contrast of the frame, assuming RGB24 format. * As most software image processing, this job is CPU-intensive. * Get a camera that supports hardware adjustment! * * History: * 09-Feb-2001  Created. */static void usbvideo_SoftwareContrastAdjustment(struct uvd *uvd, 						struct usbvideo_frame *frame){	int i, j, v4l_linesize;	signed long adj;	const int ccm = 128; /* Color correction median - see below */	if ((uvd == NULL) || (frame == NULL)) {		err("%s: Illegal call.", __FUNCTION__);		return;	}	adj = (uvd->vpic.contrast - 0x8000) >> 8; /* -128..+127 = -ccm..+(ccm-1)*/	RESTRICT_TO_RANGE(adj, -ccm, ccm+1);	if (adj == 0) {		/* In rare case of no adjustment */		return;	}	v4l_linesize = VIDEOSIZE_X(frame->request) * V4L_BYTES_PER_PIXEL;	for (i=0; i < VIDEOSIZE_Y(frame->request); i++) {		unsigned char *fd = frame->data + (v4l_linesize * i);		for (j=0; j < v4l_linesize; j++) {			signed long v = (signed long) fd[j];			/* Magnify up to 2 times, reduce down to zero */			v = 128 + ((ccm + adj) * (v - 128)) / ccm;			RESTRICT_TO_RANGE(v, 0, 0xFF); /* Must flatten tails */			fd[j] = (unsigned char) v;		}	}}MODULE_LICENSE("GPL");

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -