📄 drm_drv.h
字号:
int i;#if __HAVE_CTX_BITMAP int retcode;#endif DRM_DEBUG( "\n" );#ifdef MODULE DRM(parse_options)( drm_opts );#endif DRM(numdevs) = drm_count_cards(); /* Force at least one instance. */ if (DRM(numdevs) <= 0) DRM(numdevs) = 1; DRM(device) = kmalloc(sizeof(*DRM(device)) * DRM(numdevs), GFP_KERNEL); if (!DRM(device)) { return -ENOMEM; } DRM(minor) = kmalloc(sizeof(*DRM(minor)) * DRM(numdevs), GFP_KERNEL); if (!DRM(minor)) { kfree(DRM(device)); return -ENOMEM; } DRIVER_PREINIT(); DRM(mem_init)(); for (i = 0; i < DRM(numdevs); i++) { dev = &(DRM(device)[i]); memset( (void *)dev, 0, sizeof(*dev) ); dev->count_lock = SPIN_LOCK_UNLOCKED; sema_init( &dev->struct_sem, 1 ); if ((DRM(minor)[i] = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0) return -EPERM; dev->device = MKDEV(DRM_MAJOR, DRM(minor)[i] ); dev->name = DRIVER_NAME;#if __REALLY_HAVE_AGP dev->agp = DRM(agp_init)();#if __MUST_HAVE_AGP if ( dev->agp == NULL ) { DRM_ERROR( "Cannot initialize the agpgart module.\n" ); DRM(stub_unregister)(DRM(minor)[i]); DRM(takedown)( dev ); return -ENOMEM; }#endif#if __REALLY_HAVE_MTRR if (dev->agp) dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base, dev->agp->agp_info.aper_size*1024*1024, MTRR_TYPE_WRCOMB, 1 );#endif#endif#if __HAVE_CTX_BITMAP retcode = DRM(ctxbitmap_init)( dev ); if( retcode ) { DRM_ERROR( "Cannot allocate memory for context bitmap.\n" ); DRM(stub_unregister)(DRM(minor)[i]); DRM(takedown)( dev ); return retcode; }#endif DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n", DRIVER_NAME, DRIVER_MAJOR, DRIVER_MINOR, DRIVER_PATCHLEVEL, DRIVER_DATE, DRM(minor)[i] ); } DRIVER_POSTINIT(); return 0;}/* drm_cleanup is called via cleanup_module at module unload time. */static void __exit drm_cleanup( void ){ drm_device_t *dev; int i; DRM_DEBUG( "\n" ); for (i = DRM(numdevs) - 1; i >= 0; i--) { dev = &(DRM(device)[i]); if ( DRM(stub_unregister)(DRM(minor)[i]) ) { DRM_ERROR( "Cannot unload module\n" ); } else { DRM_DEBUG("minor %d unregistered\n", DRM(minor)[i]); if (i == 0) { DRM_INFO( "Module unloaded\n" ); } }#if __HAVE_CTX_BITMAP DRM(ctxbitmap_cleanup)( dev );#endif#if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR if ( dev->agp && dev->agp->agp_mtrr >= 0) { int retval; retval = mtrr_del( dev->agp->agp_mtrr, dev->agp->agp_info.aper_base, dev->agp->agp_info.aper_size*1024*1024 ); DRM_DEBUG( "mtrr_del=%d\n", retval ); }#endif DRM(takedown)( dev );#if __REALLY_HAVE_AGP if ( dev->agp ) { DRM(agp_uninit)(); DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS ); dev->agp = NULL; }#endif } DRIVER_POSTCLEANUP(); kfree(DRM(minor)); kfree(DRM(device)); DRM(numdevs) = 0;}module_init( drm_init );module_exit( drm_cleanup );int DRM(version)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_version_t version; int len; if ( copy_from_user( &version, (drm_version_t *)arg, sizeof(version) ) ) return -EFAULT;#define DRM_COPY( name, value ) \ len = strlen( value ); \ if ( len > name##_len ) len = name##_len; \ name##_len = strlen( value ); \ if ( len && name ) { \ if ( copy_to_user( name, value, len ) ) \ return -EFAULT; \ } version.version_major = DRIVER_MAJOR; version.version_minor = DRIVER_MINOR; version.version_patchlevel = DRIVER_PATCHLEVEL; DRM_COPY( version.name, DRIVER_NAME ); DRM_COPY( version.date, DRIVER_DATE ); DRM_COPY( version.desc, DRIVER_DESC ); if ( copy_to_user( (drm_version_t *)arg, &version, sizeof(version) ) ) return -EFAULT; return 0;}int DRM(open)( struct inode *inode, struct file *filp ){ drm_device_t *dev = NULL; int retcode = 0; int i; for (i = 0; i < DRM(numdevs); i++) { if (MINOR(inode->i_rdev) == DRM(minor)[i]) { dev = &(DRM(device)[i]); break; } } if (!dev) { return -ENODEV; } DRM_DEBUG( "open_count = %d\n", dev->open_count ); retcode = DRM(open_helper)( inode, filp, dev ); if ( !retcode ) {#if LINUX_VERSION_CODE < 0x020333 MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */#endif atomic_inc( &dev->counts[_DRM_STAT_OPENS] ); spin_lock( &dev->count_lock ); if ( !dev->open_count++ ) { spin_unlock( &dev->count_lock ); return DRM(setup)( dev ); } spin_unlock( &dev->count_lock ); } return retcode;}int DRM(release)( struct inode *inode, struct file *filp ){ drm_file_t *priv = filp->private_data; drm_device_t *dev; int retcode = 0; lock_kernel(); dev = priv->dev; DRM_DEBUG( "open_count = %d\n", dev->open_count ); DRIVER_PRERELEASE(); /* ======================================================== * Begin inline drm_release */ DRM_DEBUG( "pid = %d, device = 0x%x, open_count = %d\n", current->pid, dev->device, dev->open_count ); if ( dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && dev->lock.pid == current->pid ) { DRM_DEBUG( "Process %d dead, freeing lock for context %d\n", current->pid, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );#if __HAVE_RELEASE DRIVER_RELEASE();#endif DRM(lock_free)( dev, &dev->lock.hw_lock->lock, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); /* FIXME: may require heavy-handed reset of hardware at this point, possibly processed via a callback to the X server. */ }#if __HAVE_RELEASE else if ( dev->lock.hw_lock ) { /* The lock is required to reclaim buffers */ DECLARE_WAITQUEUE( entry, current ); add_wait_queue( &dev->lock.lock_queue, &entry ); for (;;) { current->state = TASK_INTERRUPTIBLE; if ( !dev->lock.hw_lock ) { /* Device has been unregistered */ retcode = -EINTR; break; } if ( DRM(lock_take)( &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT ) ) { dev->lock.pid = priv->pid; dev->lock.lock_time = jiffies; atomic_inc( &dev->counts[_DRM_STAT_LOCKS] ); break; /* Got lock */ } /* Contention */#if 0 atomic_inc( &dev->total_sleeps );#endif schedule(); if ( signal_pending( current ) ) { retcode = -ERESTARTSYS; break; } } current->state = TASK_RUNNING; remove_wait_queue( &dev->lock.lock_queue, &entry ); if( !retcode ) { DRIVER_RELEASE(); DRM(lock_free)( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT ); } }#elif __HAVE_DMA DRM(reclaim_buffers)( dev, priv->pid );#endif DRM(fasync)( -1, filp, 0 ); down( &dev->struct_sem ); if ( priv->remove_auth_on_close == 1 ) { drm_file_t *temp = dev->file_first; while ( temp ) { temp->authenticated = 0; temp = temp->next; } } if ( priv->prev ) { priv->prev->next = priv->next; } else { dev->file_first = priv->next; } if ( priv->next ) { priv->next->prev = priv->prev; } else { dev->file_last = priv->prev; } up( &dev->struct_sem ); DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES ); /* ======================================================== * End inline drm_release */#if LINUX_VERSION_CODE < 0x020333 MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */#endif atomic_inc( &dev->counts[_DRM_STAT_CLOSES] ); spin_lock( &dev->count_lock ); if ( !--dev->open_count ) { if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) { DRM_ERROR( "Device busy: %d %d\n", atomic_read( &dev->ioctl_count ), dev->blocked ); spin_unlock( &dev->count_lock ); unlock_kernel(); return -EBUSY; } spin_unlock( &dev->count_lock ); unlock_kernel(); return DRM(takedown)( dev ); } spin_unlock( &dev->count_lock ); unlock_kernel(); return retcode;}/* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm. */int DRM(ioctl)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_ioctl_desc_t *ioctl; drm_ioctl_t *func; int nr = DRM_IOCTL_NR(cmd); int retcode = 0; atomic_inc( &dev->ioctl_count ); atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] ); ++priv->ioctl_count; DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%x, auth=%d\n", current->pid, cmd, nr, dev->device, priv->authenticated ); if ( nr >= DRIVER_IOCTL_COUNT ) { retcode = -EINVAL; } else { ioctl = &DRM(ioctls)[nr]; func = ioctl->func; if ( !func ) { DRM_DEBUG( "no function\n" ); retcode = -EINVAL; } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )|| ( ioctl->auth_needed && !priv->authenticated ) ) { retcode = -EACCES; } else { retcode = func( inode, filp, cmd, arg ); } } atomic_dec( &dev->ioctl_count ); return retcode;}int DRM(lock)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; DECLARE_WAITQUEUE( entry, current ); drm_lock_t lock; int ret = 0;#if __HAVE_MULTIPLE_DMA_QUEUES drm_queue_t *q;#endif#if __HAVE_DMA_HISTOGRAM cycles_t start; dev->lck_start = start = get_cycles();#endif if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) ) return -EFAULT; if ( lock.context == DRM_KERNEL_CONTEXT ) { DRM_ERROR( "Process %d using kernel context %d\n", current->pid, lock.context ); return -EINVAL; } DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", lock.context, current->pid, dev->lock.hw_lock->lock, lock.flags );#if __HAVE_DMA_QUEUE if ( lock.context < 0 ) return -EINVAL;#elif __HAVE_MULTIPLE_DMA_QUEUES if ( lock.context < 0 || lock.context >= dev->queue_count ) return -EINVAL; q = dev->queuelist[lock.context];#endif#if __HAVE_DMA_FLUSH ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );#endif if ( !ret ) { add_wait_queue( &dev->lock.lock_queue, &entry ); for (;;) { current->state = TASK_INTERRUPTIBLE; if ( !dev->lock.hw_lock ) { /* Device has been unregistered */ ret = -EINTR; break; } if ( DRM(lock_take)( &dev->lock.hw_lock->lock, lock.context ) ) { dev->lock.pid = current->pid; dev->lock.lock_time = jiffies; atomic_inc( &dev->counts[_DRM_STAT_LOCKS] ); break; /* Got lock */ } /* Contention */ schedule(); if ( signal_pending( current ) ) { ret = -ERESTARTSYS; break; } } current->state = TASK_RUNNING; remove_wait_queue( &dev->lock.lock_queue, &entry ); }#if __HAVE_DMA_FLUSH DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */#endif if ( !ret ) { sigemptyset( &dev->sigmask ); sigaddset( &dev->sigmask, SIGSTOP ); sigaddset( &dev->sigmask, SIGTSTP ); sigaddset( &dev->sigmask, SIGTTIN ); sigaddset( &dev->sigmask, SIGTTOU ); dev->sigdata.context = lock.context; dev->sigdata.lock = dev->lock.hw_lock; block_all_signals( DRM(notifier), &dev->sigdata, &dev->sigmask );#if __HAVE_DMA_READY if ( lock.flags & _DRM_LOCK_READY ) { DRIVER_DMA_READY(); }#endif#if __HAVE_DMA_QUIESCENT if ( lock.flags & _DRM_LOCK_QUIESCENT ) { DRIVER_DMA_QUIESCENT(); }#endif#if __HAVE_KERNEL_CTX_SWITCH if ( dev->last_context != lock.context ) { DRM(context_switch)(dev, dev->last_context, lock.context); }#endif } DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );#if __HAVE_DMA_HISTOGRAM atomic_inc(&dev->histo.lacq[DRM(histogram_slot)(get_cycles()-start)]);#endif return ret;}int DRM(unlock)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_lock_t lock; if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) ) return -EFAULT; if ( lock.context == DRM_KERNEL_CONTEXT ) { DRM_ERROR( "Process %d using kernel context %d\n", current->pid, lock.context ); return -EINVAL; } atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );#if __HAVE_KERNEL_CTX_SWITCH /* We no longer really hold it, but if we are the next * agent to request it then we should just be able to * take it immediately and not eat the ioctl. */ dev->lock.pid = 0; { __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock; unsigned int old, new, prev, ctx; ctx = lock.context; do { old = *plock; new = ctx; prev = cmpxchg(plock, old, new); } while (prev != old); } wake_up_interruptible(&dev->lock.lock_queue);#else DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT );#if __HAVE_DMA_SCHEDULE DRM(dma_schedule)( dev, 1 );#endif /* FIXME: Do we ever really need to check this??? */ if ( 1 /* !dev->context_flag */ ) { if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT ) ) { DRM_ERROR( "\n" ); } }#endif /* !__HAVE_KERNEL_CTX_SWITCH */ unblock_all_signals(); return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -