📄 dispc.c
字号:
int plane = (int)vma->vm_private_data; atomic_dec(&dispc.map_count[plane]);}static struct vm_operations_struct mmap_user_ops = { .open = mmap_user_open, .close = mmap_user_close,};static int omap_dispc_mmap_user(struct fb_info *info, struct vm_area_struct *vma){ struct omapfb_plane_struct *plane = info->par; unsigned long off; unsigned long start; u32 len; if (vma->vm_end - vma->vm_start == 0) return 0; if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; off = vma->vm_pgoff << PAGE_SHIFT; start = info->fix.smem_start; len = info->fix.smem_len; if (off >= len) return -EINVAL; if ((vma->vm_end - vma->vm_start + off) > len) return -EINVAL; off += start; vma->vm_pgoff = off >> PAGE_SHIFT; vma->vm_flags |= VM_IO | VM_RESERVED; vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_ops = &mmap_user_ops; vma->vm_private_data = (void *)plane->idx; if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; /* vm_ops.open won't be called for mmap itself. */ atomic_inc(&dispc.map_count[plane->idx]); return 0;}static void unmap_kern(struct omapfb_mem_region *region){ vunmap(region->vaddr);}static int alloc_palette_ram(void){ dispc.palette_vaddr = dma_alloc_writecombine(dispc.fbdev->dev, MAX_PALETTE_SIZE, &dispc.palette_paddr, GFP_KERNEL); if (dispc.palette_vaddr == NULL) { dev_err(dispc.fbdev->dev, "failed to alloc palette memory\n"); return -ENOMEM; } return 0;}static void free_palette_ram(void){ dma_free_writecombine(dispc.fbdev->dev, MAX_PALETTE_SIZE, dispc.palette_vaddr, dispc.palette_paddr);}static int alloc_fbmem(struct omapfb_mem_region *region){ region->vaddr = dma_alloc_writecombine(dispc.fbdev->dev, region->size, ®ion->paddr, GFP_KERNEL); if (region->vaddr == NULL) { dev_err(dispc.fbdev->dev, "unable to allocate FB DMA memory\n"); return -ENOMEM; } return 0;}static void free_fbmem(struct omapfb_mem_region *region){ dma_free_writecombine(dispc.fbdev->dev, region->size, region->vaddr, region->paddr);}static struct resmap *init_resmap(unsigned long start, size_t size){ unsigned page_cnt; struct resmap *res_map; page_cnt = PAGE_ALIGN(size) / PAGE_SIZE; res_map = kzalloc(sizeof(struct resmap) + RESMAP_SIZE(page_cnt), GFP_KERNEL); if (res_map == NULL) return NULL; res_map->start = start; res_map->page_cnt = page_cnt; res_map->map = (unsigned long *)(res_map + 1); return res_map;}static void cleanup_resmap(struct resmap *res_map){ kfree(res_map);}static inline int resmap_mem_type(unsigned long start){ if (start >= OMAP2_SRAM_START && start < OMAP2_SRAM_START + OMAP2_SRAM_SIZE) return OMAPFB_MEMTYPE_SRAM; else return OMAPFB_MEMTYPE_SDRAM;}static inline int resmap_page_reserved(struct resmap *res_map, unsigned page_nr){ return *RESMAP_PTR(res_map, page_nr) & RESMAP_MASK(page_nr) ? 1 : 0;}static inline void resmap_reserve_page(struct resmap *res_map, unsigned page_nr){ BUG_ON(resmap_page_reserved(res_map, page_nr)); *RESMAP_PTR(res_map, page_nr) |= RESMAP_MASK(page_nr);}static inline void resmap_free_page(struct resmap *res_map, unsigned page_nr){ BUG_ON(!resmap_page_reserved(res_map, page_nr)); *RESMAP_PTR(res_map, page_nr) &= ~RESMAP_MASK(page_nr);}static void resmap_reserve_region(unsigned long start, size_t size){ struct resmap *res_map; unsigned start_page; unsigned end_page; int mtype; unsigned i; mtype = resmap_mem_type(start); res_map = dispc.res_map[mtype]; dev_dbg(dispc.fbdev->dev, "reserve mem type %d start %08lx size %d\n", mtype, start, size); start_page = (start - res_map->start) / PAGE_SIZE; end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE; for (i = start_page; i < end_page; i++) resmap_reserve_page(res_map, i);}static void resmap_free_region(unsigned long start, size_t size){ struct resmap *res_map; unsigned start_page; unsigned end_page; unsigned i; int mtype; mtype = resmap_mem_type(start); res_map = dispc.res_map[mtype]; dev_dbg(dispc.fbdev->dev, "free mem type %d start %08lx size %d\n", mtype, start, size); start_page = (start - res_map->start) / PAGE_SIZE; end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE; for (i = start_page; i < end_page; i++) resmap_free_page(res_map, i);}static unsigned long resmap_alloc_region(int mtype, size_t size){ unsigned i; unsigned total; unsigned start_page; unsigned long start; struct resmap *res_map = dispc.res_map[mtype]; BUG_ON(mtype >= DISPC_MEMTYPE_NUM || res_map == NULL || !size); size = PAGE_ALIGN(size) / PAGE_SIZE; start_page = 0; total = 0; for (i = 0; i < res_map->page_cnt; i++) { if (resmap_page_reserved(res_map, i)) { start_page = i + 1; total = 0; } else if (++total == size) break; } if (total < size) return 0; start = res_map->start + start_page * PAGE_SIZE; resmap_reserve_region(start, size * PAGE_SIZE); return start;}/* Note that this will only work for user mappings, we don't deal with * kernel mappings here, so fbcon will keep using the old region. */static int omap_dispc_setup_mem(int plane, size_t size, int mem_type, unsigned long *paddr){ struct omapfb_mem_region *rg; unsigned long new_addr = 0; if ((unsigned)plane > dispc.mem_desc.region_cnt) return -EINVAL; if (mem_type >= DISPC_MEMTYPE_NUM) return -EINVAL; if (dispc.res_map[mem_type] == NULL) return -ENOMEM; rg = &dispc.mem_desc.region[plane]; if (size == rg->size && mem_type == rg->type) return 0; if (atomic_read(&dispc.map_count[plane])) return -EBUSY; if (rg->size != 0) resmap_free_region(rg->paddr, rg->size); if (size != 0) { new_addr = resmap_alloc_region(mem_type, size); if (!new_addr) { /* Reallocate old region. */ resmap_reserve_region(rg->paddr, rg->size); return -ENOMEM; } } rg->paddr = new_addr; rg->size = size; rg->type = mem_type; *paddr = new_addr; return 0;}static int setup_fbmem(struct omapfb_mem_desc *req_md){ struct omapfb_mem_region *rg; int i; int r; unsigned long mem_start[DISPC_MEMTYPE_NUM]; unsigned long mem_end[DISPC_MEMTYPE_NUM]; if (!req_md->region_cnt) { dev_err(dispc.fbdev->dev, "no memory regions defined\n"); return -ENOENT; } rg = &req_md->region[0]; memset(mem_start, 0xff, sizeof(mem_start)); memset(mem_end, 0, sizeof(mem_end)); for (i = 0; i < req_md->region_cnt; i++, rg++) { int mtype; if (rg->paddr) { rg->alloc = 0; if (rg->vaddr == NULL) { rg->map = 1; if ((r = mmap_kern(rg)) < 0) return r; } } else { if (rg->type != OMAPFB_MEMTYPE_SDRAM) { dev_err(dispc.fbdev->dev, "unsupported memory type\n"); return -EINVAL; } rg->alloc = rg->map = 1; if ((r = alloc_fbmem(rg)) < 0) return r; } mtype = rg->type; if (rg->paddr < mem_start[mtype]) mem_start[mtype] = rg->paddr; if (rg->paddr + rg->size > mem_end[mtype]) mem_end[mtype] = rg->paddr + rg->size; } for (i = 0; i < DISPC_MEMTYPE_NUM; i++) { unsigned long start; size_t size; if (mem_end[i] == 0) continue; start = mem_start[i]; size = mem_end[i] - start; dispc.res_map[i] = init_resmap(start, size); r = -ENOMEM; if (dispc.res_map[i] == NULL) goto fail; /* Initial state is that everything is reserved. This * includes possible holes as well, which will never be * freed. */ resmap_reserve_region(start, size); } dispc.mem_desc = *req_md; return 0;fail: for (i = 0; i < DISPC_MEMTYPE_NUM; i++) { if (dispc.res_map[i] != NULL) cleanup_resmap(dispc.res_map[i]); } return r;}static void cleanup_fbmem(void){ struct omapfb_mem_region *rg; int i; for (i = 0; i < DISPC_MEMTYPE_NUM; i++) { if (dispc.res_map[i] != NULL) cleanup_resmap(dispc.res_map[i]); } rg = &dispc.mem_desc.region[0]; for (i = 0; i < dispc.mem_desc.region_cnt; i++, rg++) { if (rg->alloc) free_fbmem(rg); else { if (rg->map) unmap_kern(rg); } }}static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode, struct omapfb_mem_desc *req_vram){ int r; u32 l; struct lcd_panel *panel = fbdev->panel; int tmo = 10000; int skip_init = 0; int i; memset(&dispc, 0, sizeof(dispc)); dispc.base = ioremap(DISPC_BASE, SZ_1K); if (!dispc.base) { dev_err(fbdev->dev, "can't ioremap DISPC\n"); return -ENOMEM; } dispc.fbdev = fbdev; dispc.ext_mode = ext_mode; init_completion(&dispc.frame_done); if ((r = get_dss_clocks()) < 0) goto fail0; enable_interface_clocks(1); enable_lcd_clocks(1);#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT l = dispc_read_reg(DISPC_CONTROL); /* LCD enabled ? */ if (l & 1) { pr_info("omapfb: skipping hardware initialization\n"); skip_init = 1; }#endif if (!skip_init) { /* Reset monitoring works only w/ the 54M clk */ enable_digit_clocks(1); /* Soft reset */ MOD_REG_FLD(DISPC_SYSCONFIG, 1 << 1, 1 << 1); while (!(dispc_read_reg(DISPC_SYSSTATUS) & 1)) { if (!--tmo) { dev_err(dispc.fbdev->dev, "soft reset failed\n"); r = -ENODEV; enable_digit_clocks(0); goto fail1; } } enable_digit_clocks(0); } /* Enable smart idle and autoidle */ l = dispc_read_reg(DISPC_CONTROL); l &= ~((3 << 12) | (3 << 3)); l |= (2 << 12) | (2 << 3) | (1 << 0); dispc_write_reg(DISPC_SYSCONFIG, l); omap_writel(1 << 0, DSS_BASE + DSS_SYSCONFIG); /* Set functional clock autogating */ l = dispc_read_reg(DISPC_CONFIG); l |= 1 << 9; dispc_write_reg(DISPC_CONFIG, l); l = dispc_read_reg(DISPC_IRQSTATUS); dispc_write_reg(l, DISPC_IRQSTATUS); /* Enable those that we handle always */ omap_dispc_enable_irqs(DISPC_IRQ_FRAMEMASK); if ((r = request_irq(INT_24XX_DSS_IRQ, omap_dispc_irq_handler, 0, MODULE_NAME, fbdev)) < 0) { dev_err(dispc.fbdev->dev, "can't get DSS IRQ\n"); goto fail1; } /* L3 firewall setting: enable access to OCM RAM */ __raw_writel(0x402000b0, IO_ADDRESS(0x680050a0)); if ((r = alloc_palette_ram()) < 0) goto fail2; if ((r = setup_fbmem(req_vram)) < 0) goto fail3; if (!skip_init) { for (i = 0; i < dispc.mem_desc.region_cnt; i++) { memset(dispc.mem_desc.region[i].vaddr, 0, dispc.mem_desc.region[i].size); } /* Set logic clock to fck, pixel clock to fck/2 for now */ MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(16, 8), 1 << 16); MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(0, 8), 2 << 0); setup_plane_fifo(0, ext_mode); setup_plane_fifo(1, ext_mode); setup_plane_fifo(2, ext_mode); setup_color_conv_coef(); set_lcd_tft_mode(panel->config & OMAP_LCDC_PANEL_TFT); set_load_mode(DISPC_LOAD_FRAME_ONLY); if (!ext_mode) { set_lcd_data_lines(panel->data_lines); omap_dispc_set_lcd_size(panel->x_res, panel->y_res); set_lcd_timings(); } else set_lcd_data_lines(panel->bpp); enable_rfbi_mode(ext_mode); } l = dispc_read_reg(DISPC_REVISION); pr_info("omapfb: DISPC version %d.%d initialized\n", l >> 4 & 0x0f, l & 0x0f); enable_lcd_clocks(0); return 0;fail3: free_palette_ram();fail2: free_irq(INT_24XX_DSS_IRQ, fbdev);fail1: enable_lcd_clocks(0); enable_interface_clocks(0); put_dss_clocks();fail0: iounmap(dispc.base); return r;}static void omap_dispc_cleanup(void){ int i; omap_dispc_set_update_mode(OMAPFB_UPDATE_DISABLED); /* This will also disable clocks that are on */ for (i = 0; i < dispc.mem_desc.region_cnt; i++) omap_dispc_enable_plane(i, 0); cleanup_fbmem(); free_palette_ram(); free_irq(INT_24XX_DSS_IRQ, dispc.fbdev); enable_interface_clocks(0); put_dss_clocks(); iounmap(dispc.base);}const struct lcd_ctrl omap2_int_ctrl = { .name = "internal", .init = omap_dispc_init, .cleanup = omap_dispc_cleanup, .get_caps = omap_dispc_get_caps, .set_update_mode = omap_dispc_set_update_mode, .get_update_mode = omap_dispc_get_update_mode, .update_window = omap_dispc_update_window, .suspend = omap_dispc_suspend, .resume = omap_dispc_resume, .setup_plane = omap_dispc_setup_plane, .setup_mem = omap_dispc_setup_mem, .set_scale = omap_dispc_set_scale, .enable_plane = omap_dispc_enable_plane, .set_color_key = omap_dispc_set_color_key, .get_color_key = omap_dispc_get_color_key, .mmap = omap_dispc_mmap_user,};
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -