📄 v4l2.c
字号:
/* * drivers/media/video/mx2ads/v4l2.c * * Description: * Video for Linux Two Interface for mx2ads Capture device. * * This driver based on capture driver for OMAP camera * Conceptual Usage: (exact syntax may vary) * 1. insmod videodevX * 2. insmod camera [unit_video=0] * driver registers major 81, minor 0, which * on most systems equates to either /dev/video0 * or sometimes simply /dev/video * 3. Now you can run apps that use the v4l2 interface * of the installed camera driver. (see apps/v4l2/<>.c) * * * Author: MontaVista Software, Inc. * source@mvista.com * * Created 2002, Copyright (C) 2002 RidgeRun, Inc. All rights reserved. * Created 2002, Copyright (C) 2002 Texas Instruments All rights reserved. * 2003 - 2004 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. * * Original Author: Bill Dirks <bdirks@pacbell.net> * based on code by Alan Cox, <alan@cymru.net> */#include <linux/config.h> /* retrieve the CONFIG_* macros */#include <linux/module.h>#include <linux/version.h>#include <linux/delay.h>#include <linux/errno.h>#include <linux/fs.h>#include <linux/kernel.h>#include <linux/slab.h>#include <linux/vmalloc.h>#include <linux/mm.h>#include <linux/poll.h>#include <linux/ioport.h>#include <linux/interrupt.h>#include <linux/sched.h>#include <linux/videodev.h>#include <linux/fb.h>#include <asm/uaccess.h>#include <asm/pgtable.h>#include <asm/page.h>#include <asm/io.h>#include <asm/arch/pll.h>#define MODULE_NAME "v4l2-mx2ads"#include "common.h"#include "camif.h"#include "v4l2.h"#ifdef CONFIG_PM#include <linux/pm.h>#endifstatic struct tq_struct fbinfo_tsk_q_entry;static void update_fbinfo_task (void *);extern long sys_ioctl (unsigned int fd, unsigned int cmd, unsigned long arg);#define DEFAULT_FRAME_BUFF "/dev/fb0"#define NUM_CAPFMT (4)#define MAX_BPP 2 /* max bytes per pixel *//* * Supported pixel formats. All the underlying cameras must support * these pixel formats. If the camera doesn't support a pixel format * in hardware, it will program the camera for the closest supported * format and then use its convert_image() method. */static struct v4l2_fmtdesc capfmt[CAMIF_CHANNELS_NUM][NUM_CAPFMT] = { { {0, {"RGB-16 (5-5-5)"}, V4L2_PIX_FMT_RGB555, 0, 16, {0, 0}, }, {1, {"RGB-16 (5-6-5)"}, V4L2_PIX_FMT_RGB565, 0, 16, {0, 0}, }, {2, {"YUV 4:2:2 (Y-U-Y-V)"}, V4L2_PIX_FMT_YUYV, V4L2_FMT_CS_601YUV, 16, {0, 0}, }, {3, {"YUV 4:2:2 (U-Y-V-Y)"}, V4L2_PIX_FMT_YUYV, V4L2_FMT_CS_601YUV, 16, {0, 0}, }, }, { {0, {"YUV 4:2:0 (Planar)"}, V4L2_PIX_FMT_YUV420, 0, 12, {0, 0}, }, {1, {"YUV 4:2:2 (Planar)"}, V4L2_PIX_FMT_YUV422P, 0, 16, {0, 0}, }, {2, {"YUV 4:2:0 (Planar)"}, V4L2_PIX_FMT_YVU420, 0, 12, {0, 0}, }, {3, {"YUV 4:2:2 (Planar)"}, V4L2_PIX_FMT_YVU422P, 0, 16, {0, 0}, }, }#if 0 {4, {"RGB-32 (B-G-R-?)"}, V4L2_PIX_FMT_BGR32, 0, 32, {0, 0}, }, {5, {"Greyscale-8"}, V4L2_PIX_FMT_GREY, V4L2_FMT_CS_601YUV, 8, {0, 0}, }, {6, {"YUV 4:2:0 (planar)"}, V4L2_PIX_FMT_YUV420, V4L2_FMT_CS_601YUV, 12, {0, 0}, },#endif};/* * Array of image formats supported by the various cameras used on * OMAP. These must be ordered from smallest image size to largest. * The specific camera will support all or a subset of these. */const struct image_size mx2ads_image_size[] = { { 88, 72 }, /* QQCIF */ { 160, 120 }, /* QQVGA */ { 176, 144 }, /* QCIF */ { 320, 240 }, /* QVGA */ { 352, 288 }, /* CIF */ { 640, 480 }, /* VGA */ { 1280, 960 }, /* SXGA */};/* * Array of pixel formats supported by the various cameras used on * OMAP. The camera uses its convert_image() method to convert from * a native pixel format to one of the above capfmt[] formats. */const int mx2ads_pixfmt_depth[] = { 16, /* YUV */ 16, /* RGB565 */ 15 /* RGB555 */};/* Extreme video dimensions */#define MIN_WIDTH 32#define MIN_HEIGHT 24#define MAX_WIDTH (mx2ads_image_size[SXGA].width)#define MAX_HEIGHT (mx2ads_image_size[SXGA].height)#define MAX_IMAGE_SIZE (MAX_WIDTH * MAX_HEIGHT * MAX_BPP)#define MAX_FRAME_AGE (200) /* ms *//* * The Capture device structure array. This is the only global * variable in the module besides those used by the device probing * and enumeration routines (command line overrides) */static struct capture_device capture[CAMIF_CHANNELS_NUM];static struct capture_pwm_t capture_pwm;static const int unit_video = 0;static intget_framebuffer_info (struct capture_device *dev){ int fbfd, retcode; dev->fbinfo_valid = 0; fbfd = sys_open (DEFAULT_FRAME_BUFF, O_RDWR, 0); if (fbfd < 0) { err ("Error: cannot open framebuffer device.\n"); return fbfd; } /* Get fixed screen information */ if ((retcode = sys_ioctl (fbfd, FBIOGET_FSCREENINFO, (unsigned long) (&dev->fbfix)))) { err ("Error reading fb fixed information.\n"); return retcode; } /* Get variable screen information */ if ((retcode = sys_ioctl (fbfd, FBIOGET_VSCREENINFO, (unsigned long) (&dev->fbvar)))) { err ("Error reading fb var information.\n"); return retcode; } sys_close (fbfd); dev->fbinfo_valid = 1; return 0;}static inline struct capture_device *capture_device_from_file (struct file *file){ return (struct capture_device *) v4l2_device_from_file (file);}static intisqrt (unsigned int q){ /* A little integer square root routine */ int i; int r; unsigned int b2 = 0x40000000; unsigned int t; for (i = 16, r = 0; i > 0 && q; --i) { t = ((unsigned int) r << i) + b2; if (t <= q) { q -= t; r |= (1 << (i - 1)); } b2 >>= 2; } return r;}static unsigned longcurrent_time_ms (void){ struct timeval now; do_gettimeofday (&now); return now.tv_sec * 1000 + now.tv_usec / 1000;}/* * * V I D E O D E C O D E R S * */static intdecoder_initialize (struct capture_device *dev){ /* Video decoder information fields */ dev->videc.standards = (1 << V4L2_STD_NTSC) | (1 << V4L2_STD_PAL); dev->videc.ntsc_hskip = 30; dev->videc.ntsc_vskip = 12; dev->videc.ntsc_width = 640; dev->videc.ntsc_height = 480; dev->videc.ntsc_field_order = 0; dev->videc.pal_hskip = 62; dev->videc.pal_vskip = 14; dev->videc.pal_width = 640; dev->videc.pal_height = 480; dev->videc.pal_field_order = 0; dev->videc.preferred_field = 0; dev->videc.num_inputs = 2; dev->videc.decoder_is_stable = 1; return 1;}static intdecoder_set_input (struct capture_device *dev, int i){ ENTRY (); dev->input = i; /* dev->videc.decoder_is_stable = 0; */ /* TODO: Switch the hardware to the new input */ return 1;}static intdecoder_set_frame_period (struct capture_device *dev, int fp){ int retcode = -EINVAL; if (dev->camif->set_frame_period) retcode = dev->camif->set_frame_period (fp); if (retcode >= 0) { dev->videc.frame_period = retcode; return 1; } dev->videc.frame_period = 333667; return retcode;}static intdecoder_set_standard (struct capture_device *dev, int x){ dev->videc.standard = x; int fp; switch (x) { case V4L2_STD_NTSC: fp = 333667; break; case V4L2_STD_PAL: case V4L2_STD_SECAM: fp = 400000; break; default: fp = 333667; break; } return decoder_set_frame_period (dev, fp);}static intdecoder_set_vcrmode (struct capture_device *dev, int x){ dev->source[dev->input].vcrmode = x; /* TODO: Switch decoder to VCR sync timing mode */ return 1;}static intdecoder_is_stable (struct capture_device *dev){ /* TODO: Check if decoder is synced to input */ return 1;}static intdecoder_probe (struct capture_device *dev){ /* TODO: Probe I2C bus or whatever for the video decoder */ /* Fill in the method fields */ dev->videc.initialize = decoder_initialize; dev->videc.set_input = decoder_set_input; dev->videc.set_standard = decoder_set_standard; dev->videc.set_vcrmode = decoder_set_vcrmode; dev->videc.is_stable = decoder_is_stable; dev->videc.set_frame_period = decoder_set_frame_period; dbg("Found decoder chip\n"); return 1; /* Found */}/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * * Probe I2C bus for video decoder and fill in the device fields */static intfind_decoder (struct capture_device *dev){ if (!decoder_probe (dev)) return 0; /* Failure */ return 1;}static voidset_video_input (struct capture_device *dev, int i){ if (i < 0 || i >= dev->videc.num_inputs) return; dev->videc.set_input (dev, i); dev->videc.set_vcrmode (dev, dev->source[i].vcrmode);}/* * * V I D E O C A P T U R E F U N C T I O N S * *//* Stop the music! */static voidcapture_abort (struct capture_device *dev){ ENTRY(); /* Turn off the capture hardware */ dev->camif->abort (dev->id); dev->capture_started = 0; /* * Wake up any processes that might be waiting for a frame * and let them return an error */ wake_up_interruptible (&dev->new_video_frame); EXIT();}/* The image format has changed, width, height, pixel format. * Decide if the format is ok or take the closest valid format. */static intcapture_new_format (struct capture_device *dev){ int max_image_size; int max_height; int max_width; int max_pixels; int t, retcode = 0; ENTRY(); dev->ready_to_capture = 0; max_width = MAX_WIDTH; max_height = MAX_HEIGHT; dev->clientfmt.flags = V4L2_FMT_CS_601YUV; /* desired default. */ dev->clientfmt.flags |= V4L2_FMT_FLAG_ODDFIELD; switch (dev->clientfmt.pixelformat) { case V4L2_PIX_FMT_GREY: dev->clientfmt.depth = 8; break; case V4L2_PIX_FMT_YUV420: dev->clientfmt.depth = 12; break; case V4L2_PIX_FMT_RGB555: case V4L2_PIX_FMT_RGB565: dev->clientfmt.flags = 0; /* fall thru */ case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: dev->clientfmt.depth = 16; break; case V4L2_PIX_FMT_BGR24: dev->clientfmt.depth = 24; dev->clientfmt.flags = 0; break; case V4L2_PIX_FMT_BGR32: dev->clientfmt.depth = 32; dev->clientfmt.flags = 0; break; default: dbg("unknown format %4.4s\n", (char *)&dev->clientfmt.pixelformat); dev->clientfmt.depth = 16; dev->clientfmt.pixelformat = V4L2_PIX_FMT_YUYV; dev->clientfmt.flags = 0; break; } dev->capture_bypp = (dev->clientfmt.depth + 7) >> 3; if (dev->clientfmt.width < MIN_WIDTH) dev->clientfmt.width = MIN_WIDTH; if (dev->clientfmt.height < MIN_HEIGHT) dev->clientfmt.height = MIN_HEIGHT; max_image_size = MAX_IMAGE_SIZE; if (dev->stream_buffers_mapped) { /* Limited by size of existing buffers */ max_image_size = dev->stream_buf[0].vidbuf.length; } max_pixels = max_image_size / dev->capture_bypp; t = isqrt ((max_pixels * dev->clientfmt.width) / dev->clientfmt.height); if (t < max_width) max_width = t; t = isqrt ((max_pixels * dev->clientfmt.height) / dev->clientfmt.width); if (t < max_height) max_height = t; if (dev->clientfmt.width > max_width) dev->clientfmt.width = max_width; if (dev->clientfmt.height > max_height) dev->clientfmt.height = max_height; dev->clientfmt.width &= ~3; dev->clientfmt.height &= ~3; /* tell the camera about the format, it may modify width and height. */ if (dev->camera) { if ((retcode = dev->camif->set_format (dev->id, &dev->clientfmt))) { EXIT(); return retcode; } } dev->clientfmt.sizeimage = (dev->clientfmt.width * dev->clientfmt.height * dev->clientfmt.depth) / 8; dev->capture_size = dev->clientfmt.width * dev->clientfmt.height * dev->capture_bypp; EXIT(); return 0;}/**************************** * Routine: * Description: ****************************/static voidDeallocateBuffer (struct capture_device *dev){ if (dev->capture_buffer != NULL) { if (dev->camera && dev->input == 0) { consistent_free(dev->capture_buffer, dev->capture_buffer_size, dev->capture_buffer_phys); } else { vfree (dev->capture_buffer); } dev->capture_buffer = NULL; }}/**************************** * Routine: * Description: ****************************/static voidAllocateBuffer (struct capture_device *dev){ DeallocateBuffer (dev); /* dev->input=0 is camera image (if real h/w present).*/ /* dev->input=1 is color bar test pattern.*/ if (dev->camera && dev->input == 0) { dev->capture_buffer_size = dev->capture_size; dev->capture_buffer = consistent_alloc(GFP_KERNEL | GFP_DMA, dev->capture_buffer_size, &dev->capture_buffer_phys); } else { dev->capture_buffer_size = (dev->capture_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); dev->capture_buffer = (__u8 *) vmalloc (dev->capture_buffer_size); }}/* Allocate buffers, and get everything ready to capture * an image, but don't start capturing yet. */static intcapture_begin (struct capture_device *dev){ ENTRY(); capture_abort (dev); if (dev->ready_to_capture) { EXIT(); return dev->ready_to_capture; } if ((dev->capture_buffer_size < dev->capture_size) || dev->SwitchInputs) { dev->SwitchInputs = 0; AllocateBuffer (dev); if (dev->capture_buffer == NULL) { dev->capture_buffer_size = 0; err ("Can't allocate capture buffer" " %d bytes\n", dev->capture_size); EXIT(); return dev->ready_to_capture; } } EXIT(); return (dev->ready_to_capture = 1);}/* Start an image capture */static voidcapture_grab_frame (struct capture_device *dev){ if (dev->ready_to_capture && dev->capture_started) return; capture_begin (dev); if (!dev->ready_to_capture) return; if (dev->camera && dev->input == 0) { /* Start the camera h/w. It will call us back on image completion.*/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -