📄 usbvideo.c
字号:
return; /* Unschedule all of the iso td's */ for (i=0; i < USBVIDEO_NUMSBUF; i++) { j = usb_unlink_urb(uvd->sbuf[i].urb); if (j < 0) err("%s: usb_unlink_urb() error %d.", proc, j); } if (uvd->debug > 1) info("%s: streaming=0", proc); uvd->streaming = 0; if (!uvd->remove_pending) { /* Invoke minidriver's magic to stop the camera */ if (VALID_CALLBACK(uvd, videoStop)) GET_CALLBACK(uvd, videoStop)(uvd); else err("%s: videoStop not set" ,proc); /* Set packet size to 0 */ j = usb_set_interface(uvd->dev, uvd->iface, uvd->ifaceAltInactive); if (j < 0) { err("%s: usb_set_interface() error %d.", proc, j); uvd->last_error = j; } }}/* * usbvideo_NewFrame() * * History: * 29-Mar-00 Added copying of previous frame into the current one. * 6-Aug-00 Added model 3 video sizes, removed redundant width, height. */int usbvideo_NewFrame(uvd_t *uvd, int framenum){ usbvideo_frame_t *frame; int n; if (uvd->debug > 1) info("usbvideo_NewFrame($%p,%d.)", uvd, framenum); /* If we're not grabbing a frame right now and the other frame is */ /* ready to be grabbed into, then use it instead */ if (uvd->curframe != -1) return 0; /* If necessary we adjust picture settings between frames */ if (!uvd->settingsAdjusted) { if (VALID_CALLBACK(uvd, adjustPicture)) GET_CALLBACK(uvd, adjustPicture)(uvd); uvd->settingsAdjusted = 1; } n = (framenum - 1 + USBVIDEO_NUMFRAMES) % USBVIDEO_NUMFRAMES; if (uvd->frame[n].frameState == FrameState_Ready) framenum = n; frame = &uvd->frame[framenum]; frame->frameState = FrameState_Grabbing; frame->scanstate = ScanState_Scanning; frame->seqRead_Length = 0; /* Accumulated in xxx_parse_data() */ frame->deinterlace = Deinterlace_None; frame->flags = 0; /* No flags yet, up to minidriver (or us) to set them */ uvd->curframe = framenum; /* * Normally we would want to copy previous frame into the current one * before we even start filling it with data; this allows us to stop * filling at any moment; top portion of the frame will be new and * bottom portion will stay as it was in previous frame. If we don't * do that then missing chunks of video stream will result in flickering * portions of old data whatever it was before. * * If we choose not to copy previous frame (to, for example, save few * bus cycles - the frame can be pretty large!) then we have an option * to clear the frame before using. If we experience losses in this * mode then missing picture will be black (no flickering). * * Finally, if user chooses not to clean the current frame before * filling it with data then the old data will be visible if we fail * to refill entire frame with new data. */ if (!(uvd->flags & FLAGS_SEPARATE_FRAMES)) { /* This copies previous frame into this one to mask losses */ memmove(frame->data, uvd->frame[1-framenum].data, uvd->max_frame_size); } else { if (uvd->flags & FLAGS_CLEAN_FRAMES) { /* This provides a "clean" frame but slows things down */ memset(frame->data, 0, uvd->max_frame_size); } } return 0;}/* * usbvideo_CollectRawData() * * This procedure can be used instead of 'processData' callback if you * only want to dump the raw data from the camera into the output * device (frame buffer). You can look at it with V4L client, but the * image will be unwatchable. The main purpose of this code and of the * mode FLAGS_NO_DECODING is debugging and capturing of datastreams from * new, unknown cameras. This procedure will be automatically invoked * instead of the specified callback handler when uvd->flags has bit * FLAGS_NO_DECODING set. Therefore, any regular build of any driver * based on usbvideo can use this feature at any time. */void usbvideo_CollectRawData(uvd_t *uvd, usbvideo_frame_t *frame){ int n; assert(uvd != NULL); assert(frame != NULL); /* Try to move data from queue into frame buffer */ n = RingQueue_GetLength(&uvd->dp); if (n > 0) { int m; /* See how much space we have left */ m = uvd->max_frame_size - frame->seqRead_Length; if (n > m) n = m; /* Now move that much data into frame buffer */ RingQueue_Dequeue( &uvd->dp, frame->data + frame->seqRead_Length, m); frame->seqRead_Length += m; } /* See if we filled the frame */ if (frame->seqRead_Length >= uvd->max_frame_size) { frame->frameState = FrameState_Done; uvd->curframe = -1; uvd->stats.frame_num++; }}int usbvideo_GetFrame(uvd_t *uvd, int frameNum){ static const char proc[] = "usbvideo_GetFrame"; usbvideo_frame_t *frame = &uvd->frame[frameNum]; if (uvd->debug >= 2) info("%s($%p,%d.)", proc, uvd, frameNum); switch (frame->frameState) { case FrameState_Unused: if (uvd->debug >= 2) info("%s: FrameState_Unused", proc); return -EINVAL; case FrameState_Ready: case FrameState_Grabbing: case FrameState_Error: { int ntries, signalPending; redo: if (!CAMERA_IS_OPERATIONAL(uvd)) { if (uvd->debug >= 2) info("%s: Camera is not operational (1)", proc); return -EIO; } ntries = 0; do { RingQueue_InterruptibleSleepOn(&uvd->dp); signalPending = signal_pending(current); if (!CAMERA_IS_OPERATIONAL(uvd)) { if (uvd->debug >= 2) info("%s: Camera is not operational (2)", proc); return -EIO; } assert(uvd->fbuf != NULL); if (signalPending) { if (uvd->debug >= 2) info("%s: Signal=$%08x", proc, signalPending); if (uvd->flags & FLAGS_RETRY_VIDIOCSYNC) { usbvideo_TestPattern(uvd, 1, 0); uvd->curframe = -1; uvd->stats.frame_num++; if (uvd->debug >= 2) info("%s: Forced test pattern screen", proc); return 0; } else { /* Standard answer: Interrupted! */ if (uvd->debug >= 2) info("%s: Interrupted!", proc); return -EINTR; } } else { /* No signals - we just got new data in dp queue */ if (uvd->flags & FLAGS_NO_DECODING) usbvideo_CollectRawData(uvd, frame); else if (VALID_CALLBACK(uvd, processData)) GET_CALLBACK(uvd, processData)(uvd, frame); else err("%s: processData not set", proc); } } while (frame->frameState == FrameState_Grabbing); if (uvd->debug >= 2) { info("%s: Grabbing done; state=%d. (%lu. bytes)", proc, frame->frameState, frame->seqRead_Length); } if (frame->frameState == FrameState_Error) { int ret = usbvideo_NewFrame(uvd, frameNum); if (ret < 0) { err("%s: usbvideo_NewFrame() failed (%d.)", proc, ret); return ret; } goto redo; } /* Note that we fall through to meet our destiny below */ } case FrameState_Done: /* * Do all necessary postprocessing of data prepared in * "interrupt" code and the collecting code above. The * frame gets marked as FrameState_Done by queue parsing code. * This status means that we collected enough data and * most likely processed it as we went through. However * the data may need postprocessing, such as deinterlacing * or picture adjustments implemented in software (horror!) * * As soon as the frame becomes "final" it gets promoted to * FrameState_Done_Hold status where it will remain until the * caller consumed all the video data from the frame. Then * the empty shell of ex-frame is thrown out for dogs to eat. * But we, worried about pets, will recycle the frame! */ uvd->stats.frame_num++; if ((uvd->flags & FLAGS_NO_DECODING) == 0) { if (VALID_CALLBACK(uvd, postProcess)) GET_CALLBACK(uvd, postProcess)(uvd, frame); if (frame->flags & USBVIDEO_FRAME_FLAG_SOFTWARE_CONTRAST) usbvideo_SoftwareContrastAdjustment(uvd, frame); } frame->frameState = FrameState_Done_Hold; if (uvd->debug >= 2) info("%s: Entered FrameState_Done_Hold state.", proc); return 0; case FrameState_Done_Hold: /* * We stay in this state indefinitely until someone external, * like ioctl() or read() call finishes digesting the frame * data. Then it will mark the frame as FrameState_Unused and * it will be released back into the wild to roam freely. */ if (uvd->debug >= 2) info("%s: FrameState_Done_Hold state.", proc); return 0; } /* Catch-all for other cases. We shall not be here. */ err("%s: Invalid state %d.", proc, frame->frameState); frame->frameState = FrameState_Unused; return 0;}/* * usbvideo_DeinterlaceFrame() * * This procedure deinterlaces the given frame. Some cameras produce * only half of scanlines - sometimes only even lines, sometimes only * odd lines. The deinterlacing method is stored in frame->deinterlace * variable. * * Here we scan the frame vertically and replace missing scanlines with * average between surrounding ones - before and after. If we have no * line above then we just copy next line. Similarly, if we need to * create a last line then preceding line is used. */void usbvideo_DeinterlaceFrame(uvd_t *uvd, usbvideo_frame_t *frame){ if ((uvd == NULL) || (frame == NULL)) return; if ((frame->deinterlace == Deinterlace_FillEvenLines) || (frame->deinterlace == Deinterlace_FillOddLines)) { const int v4l_linesize = VIDEOSIZE_X(frame->request) * V4L_BYTES_PER_PIXEL; int i = (frame->deinterlace == Deinterlace_FillEvenLines) ? 0 : 1; for (; i < VIDEOSIZE_Y(frame->request); i += 2) { const unsigned char *fs1, *fs2; unsigned char *fd; int ip, in, j; /* Previous and next lines */ /* * Need to average lines before and after 'i'. * If we go out of bounds seeking those lines then * we point back to existing line. */ ip = i - 1; /* First, get rough numbers */ in = i + 1; /* Now validate */ if (ip < 0) ip = in; if (in >= VIDEOSIZE_Y(frame->request)) in = ip; /* Sanity check */ if ((ip < 0) || (in < 0) || (ip >= VIDEOSIZE_Y(frame->request)) || (in >= VIDEOSIZE_Y(frame->request))) { err("Error: ip=%d. in=%d. req.height=%ld.", ip, in, VIDEOSIZE_Y(frame->request)); break; } /* Now we need to average lines 'ip' and 'in' to produce line 'i' */ fs1 = frame->data + (v4l_linesize * ip); fs2 = frame->data + (v4l_linesize * in); fd = frame->data + (v4l_linesize * i); /* Average lines around destination */ for (j=0; j < v4l_linesize; j++) { fd[j] = (unsigned char)((((unsigned) fs1[j]) + ((unsigned)fs2[j])) >> 1); } } } /* Optionally display statistics on the screen */ if (uvd->flags & FLAGS_OVERLAY_STATS) usbvideo_OverlayStats(uvd, frame);}/* * usbvideo_SoftwareContrastAdjustment() * * This code adjusts the contrast of the frame, assuming RGB24 format. * As most software image processing, this job is CPU-intensive. * Get a camera that supports hardware adjustment! * * History: * 09-Feb-2001 Created. */void usbvideo_SoftwareContrastAdjustment(uvd_t *uvd, usbvideo_frame_t *frame){ static const char proc[] = "usbvideo_SoftwareContrastAdjustment"; int i, j, v4l_linesize; signed long adj; const int ccm = 128; /* Color correction median - see below */ if ((uvd == NULL) || (frame == NULL)) { err("%s: Illegal call.", proc); return; } adj = (uvd->vpic.contrast - 0x8000) >> 8; /* -128..+127 = -ccm..+(ccm-1)*/ RESTRICT_TO_RANGE(adj, -ccm, ccm+1); if (adj == 0) { /* In rare case of no adjustment */ return; } v4l_linesize = VIDEOSIZE_X(frame->request) * V4L_BYTES_PER_PIXEL; for (i=0; i < VIDEOSIZE_Y(frame->request); i++) { unsigned char *fd = frame->data + (v4l_linesize * i); for (j=0; j < v4l_linesize; j++) { signed long v = (signed long) fd[j]; /* Magnify up to 2 times, reduce down to zero */ v = 128 + ((ccm + adj) * (v - 128)) / ccm; RESTRICT_TO_RANGE(v, 0, 0xFF); /* Must flatten tails */ fd[j] = (unsigned char) v; } }}/* * /proc interface * * We will be creating directories and entries under /proc/video using * external 'video_proc_entry' directory which is exported by videodev.o * module. Within that directory we will create $driver/ directory to * uniquely and uniformly refer to our specific $driver. Within that * directory we will finally create an entry that is named after the * video device node - video3, for example. The format of that file * is determined by callbacks that the minidriver may provide. If no * callbacks are provided (neither read nor write) then we don't create * the entry. * * Here is a sample directory entry: /proc/video/ibmcam/video3 * * The "file" video3 (in example above) is readable and writeable, in * theory. If the minidriver provides callbacks to do reading and * writing then both those procedures are supported. However if the * driver leaves callbacks in default (NULL) state the default * read and write handlers are used. The default read handler reports * that the driver does not support /proc fs. The default write handler * returns error code on any write attempt. */#if USES_PROC_FSextern struct proc_dir_entry *video_proc_entry;static void usbvideo_procfs_level1_create(usbvideo_t *ut){ static const char proc[] = "usbvideo_procfs_level1_create"; if (ut == NULL) { err("%s: ut == NULL", proc); return; } if (video_proc_entry == NULL) { err("%s: /proc/video/ doesn't exist.", proc); return; } ut->procfs_dEntry = create_proc_entry(ut->drvName, S_IFDIR, video_proc_entry); if (ut->procfs_dEntry != NULL) { if (ut->md_module != NULL) ut->procfs_dEntry->owner = ut->md_module; } else { err("%s: Unable to initialize /proc/video/%s", proc, ut->drvName); }}static void usbvideo_procfs_level1_destroy(usbvideo_t *ut){ static const char proc[] = "usbvideo_procfs_level1_destroy"; if (ut == NULL) { err("%s: ut == NULL", proc); return; } if (ut->procfs_dEntry != NULL) { remove_proc_entry(ut->drvName, video_proc_entry); ut->procfs_dEntry = NULL; }}static void usbvideo_procfs_level2_create(uvd_t *uvd){ static const char proc[] = "usbvideo_procfs_level2_create"; if (uvd == NULL) { err("%s: uvd == NULL", proc); return; } assert(uvd->handle != NULL); if (uvd->handle->procfs_dEntry == NULL) { err("%s: uvd->handle->procfs_dEntry == NULL", proc); return; } sprintf(uvd->videoName, "video%d", uvd->vdev.minor); uvd->procfs_vEntry = create_proc_entry( uvd->videoName, S_IFREG | S_IRUGO | S_IWUSR, uvd->handle->procfs_dEntry); if (uvd->procfs_vEntry != NULL) { uvd->procfs_vEntry->data = uvd; uvd->procfs_vEntry->read_proc = uvd->handle->cb.procfs_read; uvd->procfs_vEntry->write_proc = uvd->handle->cb.procfs_write; } else { err("%s: Failed to create entry \"%s\"", proc, uvd->videoName); }}static void usbvideo_procfs_level2_destroy(uvd_t *uvd){ static const char proc[] = "usbvideo_procfs_level2_destroy"; if (uvd == NULL) { err("%s: uvd == NULL", proc); return; } if (uvd->procfs_vEntry != NULL) { remove_proc_entry(uvd->videoName, uvd->procfs_vEntry); uvd->procfs_vEntry = NULL; }}static int usbvideo_default_procfs_read_proc( char *page, char **start, off_t off, int count, int *eof, void *data){ char *out = page; int len; /* Stay under PAGE_SIZE or else
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -