drm_vm.h
来自「优龙2410linux2.6.8内核源代码」· C头文件 代码 · 共 664 行 · 第 1/2 页
H
664 行
return DRM(do_vm_nopage)(vma, address);}static struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma, unsigned long address, int unused) { return DRM(do_vm_shm_nopage)(vma, address);}static struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, unsigned long address, int unused) { return DRM(do_vm_dma_nopage)(vma, address);}static struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma, unsigned long address, int unused) { return DRM(do_vm_sg_nopage)(vma, address);}#endif/** AGP virtual memory operations */static struct vm_operations_struct DRM(vm_ops) = { .nopage = DRM(vm_nopage), .open = DRM(vm_open), .close = DRM(vm_close),};/** Shared virtual memory operations */static struct vm_operations_struct DRM(vm_shm_ops) = { .nopage = DRM(vm_shm_nopage), .open = DRM(vm_open), .close = DRM(vm_shm_close),};/** DMA virtual memory operations */static struct vm_operations_struct DRM(vm_dma_ops) = { .nopage = DRM(vm_dma_nopage), .open = DRM(vm_open), .close = DRM(vm_close),};/** Scatter-gather virtual memory operations */static struct vm_operations_struct DRM(vm_sg_ops) = { .nopage = DRM(vm_sg_nopage), .open = DRM(vm_open), .close = DRM(vm_close),};/** * \c open method for shared virtual memory. * * \param vma virtual memory area. * * Create a new drm_vma_entry structure as the \p vma private data entry and * add it to drm_device::vmalist. */void DRM(vm_open)(struct vm_area_struct *vma){ drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->dev; drm_vma_entry_t *vma_entry; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); atomic_inc(&dev->vma_count); vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS); if (vma_entry) { down(&dev->struct_sem); vma_entry->vma = vma; vma_entry->next = dev->vmalist; vma_entry->pid = current->pid; dev->vmalist = vma_entry; up(&dev->struct_sem); }}/** * \c close method for all virtual memory types. * * \param vma virtual memory area. * * Search the \p vma private data entry in drm_device::vmalist, unlink it, and * free it. */void DRM(vm_close)(struct vm_area_struct *vma){ drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->dev; drm_vma_entry_t *pt, *prev; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); atomic_dec(&dev->vma_count); down(&dev->struct_sem); for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { if (pt->vma == vma) { if (prev) { prev->next = pt->next; } else { dev->vmalist = pt->next; } DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS); break; } } up(&dev->struct_sem);}/** * mmap DMA memory. * * \param filp file pointer. * \param vma virtual memory area. * \return zero on success or a negative number on failure. * * Sets the virtual memory area operations structure to vm_dma_ops, the file * pointer, and calls vm_open(). */int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma){ drm_file_t *priv = filp->private_data; drm_device_t *dev; drm_device_dma_t *dma; unsigned long length = vma->vm_end - vma->vm_start; lock_kernel(); dev = priv->dev; dma = dev->dma; DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", vma->vm_start, vma->vm_end, VM_OFFSET(vma)); /* Length must match exact page count */ if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { unlock_kernel(); return -EINVAL; } unlock_kernel(); vma->vm_ops = &DRM(vm_dma_ops);#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */#else vma->vm_flags |= VM_RESERVED; /* Don't swap */#endif vma->vm_file = filp; /* Needed for drm_vm_open() */ DRM(vm_open)(vma); return 0;}#ifndef DRIVER_GET_MAP_OFS#define DRIVER_GET_MAP_OFS() (map->offset)#endif#ifndef DRIVER_GET_REG_OFS#ifdef __alpha__#define DRIVER_GET_REG_OFS() (dev->hose->dense_mem_base - \ dev->hose->mem_space->start)#else#define DRIVER_GET_REG_OFS() 0#endif#endif/** * mmap DMA memory. * * \param filp file pointer. * \param vma virtual memory area. * \return zero on success or a negative number on failure. * * If the virtual memory area has no offset associated with it then it's a DMA * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, * checks that the restricted flag is not set, sets the virtual memory operations * according to the mapping type and remaps the pages. Finally sets the file * pointer and calls vm_open(). */int DRM(mmap)(struct file *filp, struct vm_area_struct *vma){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_map_t *map = NULL; drm_map_list_t *r_list; unsigned long offset = 0; struct list_head *list; DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", vma->vm_start, vma->vm_end, VM_OFFSET(vma)); if ( !priv->authenticated ) return -EACCES; /* We check for "dma". On Apple's UniNorth, it's valid to have * the AGP mapped at physical address 0 * --BenH. */ if (!VM_OFFSET(vma)#if __REALLY_HAVE_AGP && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)#endif ) return DRM(mmap_dma)(filp, vma); /* A sequential search of a linked list is fine here because: 1) there will only be about 5-10 entries in the list and, 2) a DRI client only has to do this mapping once, so it doesn't have to be optimized for performance, even if the list was a bit longer. */ list_for_each(list, &dev->maplist->head) { unsigned long off; r_list = list_entry(list, drm_map_list_t, head); map = r_list->map; if (!map) continue; off = DRIVER_GET_MAP_OFS(); if (off == VM_OFFSET(vma)) break; } if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) return -EPERM; /* Check for valid size. */ if (map->size != vma->vm_end - vma->vm_start) return -EINVAL; if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);#if defined(__i386__) || defined(__x86_64__) pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;#else /* Ye gads this is ugly. With more thought we could move this up higher and use `protection_map' instead. */ vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect( __pte(pgprot_val(vma->vm_page_prot)))));#endif } switch (map->type) { case _DRM_AGP:#if __REALLY_HAVE_AGP if (dev->agp->cant_use_aperture) { /* * On some platforms we can't talk to bus dma address from the CPU, so for * memory of type DRM_AGP, we'll deal with sorting out the real physical * pages and mappings in nopage() */#if defined(__powerpc__) pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;#endif vma->vm_ops = &DRM(vm_ops); break; }#endif /* fall through to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: if (VM_OFFSET(vma) >= __pa(high_memory)) {#if defined(__i386__) || defined(__x86_64__) if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; }#elif defined(__powerpc__) pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;#endif vma->vm_flags |= VM_IO; /* not in core dump */ }#if defined(__ia64__) if (map->type != _DRM_AGP) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);#endif offset = DRIVER_GET_REG_OFS();#ifdef __sparc__ if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma) + offset, vma->vm_end - vma->vm_start, vma->vm_page_prot, 0))#else if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma) + offset, vma->vm_end - vma->vm_start, vma->vm_page_prot))#endif return -EAGAIN; DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," " offset = 0x%lx\n", map->type, vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset); vma->vm_ops = &DRM(vm_ops); break; case _DRM_SHM: vma->vm_ops = &DRM(vm_shm_ops); vma->vm_private_data = (void *)map; /* Don't let this area swap. Change when DRM_KERNEL advisory is supported. */#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ vma->vm_flags |= VM_LOCKED;#else vma->vm_flags |= VM_RESERVED;#endif break; case _DRM_SCATTER_GATHER: vma->vm_ops = &DRM(vm_sg_ops); vma->vm_private_data = (void *)map;#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ vma->vm_flags |= VM_LOCKED;#else vma->vm_flags |= VM_RESERVED;#endif break; default: return -EINVAL; /* This should never happen. */ }#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */#else vma->vm_flags |= VM_RESERVED; /* Don't swap */#endif vma->vm_file = filp; /* Needed for drm_vm_open() */ DRM(vm_open)(vma); return 0;}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?