📄 sbull.c
字号:
#endif
{
while(1) {
INIT_REQUEST;
printk("<1>request %p: cmd %i sec %li (nr. %li)\n", CURRENT,
CURRENT->cmd,
CURRENT->sector,
CURRENT->current_nr_sectors);
end_request(1); /* success */
}
}
#elif defined(SBULL_MULTIQUEUE) /* 2.4 only */
/*
* Clean up this request.
*/
int sbull_end_request(struct request *req, int status)
{
if (end_that_request_first(req, status, DEVICE_NAME))
return 1;
end_that_request_last(req);
return 0;
}
void sbull_request(request_queue_t *q)
{
Sbull_Dev *device;
struct request *req;
int status;
/* Find our device */
device = sbull_locate_device (blkdev_entry_next_request(&q->queue_head));
if (device->busy) /* no race here - io_request_lock held */
return;
device->busy = 1;
/* Process requests in the queue */
while(! list_empty(&q->queue_head)) {
/* Pull the next request off the list. */
req = blkdev_entry_next_request(&q->queue_head);
blkdev_dequeue_request(req);
spin_unlock_irq (&io_request_lock);
spin_lock(&device->lock);
/* Process all of the buffers in this (possibly clustered) request. */
do {
status = sbull_transfer(device, req);
} while (end_that_request_first(req, status, DEVICE_NAME));
spin_unlock(&device->lock);
spin_lock_irq (&io_request_lock);
end_that_request_last(req);
}
device->busy = 0;
}
/*
* Tell the block layer where to queue a request.
*/
request_queue_t *sbull_find_queue(kdev_t device)
{
int devno = DEVICE_NR(device);
if (devno >= sbull_devs) {
static int count = 0;
if (count++ < 5) /* print the message at most five times */
printk(KERN_WARNING "sbull: request for unknown device\n");
return NULL;
}
return &sbull_devices[devno].queue;
}
#else /* not SBULL_MULTIQUEUE */
#ifdef LINUX_24
void sbull_request(request_queue_t *q)
#else
void sbull_request()
#endif
{
Sbull_Dev *device;
int status;
while(1) {
INIT_REQUEST; /* returns when queue is empty */
/* Which "device" are we using? */
device = sbull_locate_device (CURRENT);
if (device == NULL) {
end_request(0);
continue;
}
/* Perform the transfer and clean up. */
spin_lock(&device->lock);
status = sbull_transfer(device, CURRENT);
spin_unlock(&device->lock);
end_request(status);
}
}
#endif /* not SBULL_EMPTY_REQUEST nor SBULL_MULTIQUEUE */
/*
* Finally, the module stuff
*/
int sbull_init(void)
{
int result, i;
/*
* Copy the (static) cfg variables to public prefixed ones to allow
* snoozing with a debugger.
*/
sbull_major = major;
sbull_devs = devs;
sbull_rahead = rahead;
sbull_size = size;
sbull_blksize = blksize;
sbull_hardsect = hardsect;
#ifdef LINUX_20
/* Hardsect can't be changed :( */
if (hardsect != 512) {
printk(KERN_ERR "sbull: can't change hardsect size\n");
hardsect = sbull_hardsect = 512;
}
#endif
/*
* Register your major, and accept a dynamic number
*/
result = register_blkdev(sbull_major, "sbull", &sbull_bdops);
if (result < 0) {
printk(KERN_WARNING "sbull: can't get major %d\n",sbull_major);
return result;
}
if (sbull_major == 0) sbull_major = result; /* dynamic */
major = sbull_major; /* Use `major' later on to save typing */
/*
* Assign the other needed values: request, rahead, size, blksize,
* hardsect. All the minor devices feature the same value.
* Note that `sbull' defines all of them to allow testing non-default
* values. A real device could well avoid setting values in global
* arrays if it uses the default values.
*/
read_ahead[major] = sbull_rahead;
result = -ENOMEM; /* for the possible errors */
sbull_sizes = kmalloc(sbull_devs * sizeof(int), GFP_KERNEL);
if (!sbull_sizes)
goto fail_malloc;
for (i=0; i < sbull_devs; i++) /* all the same size */
sbull_sizes[i] = sbull_size;
blk_size[major]=sbull_sizes;
sbull_blksizes = kmalloc(sbull_devs * sizeof(int), GFP_KERNEL);
if (!sbull_blksizes)
goto fail_malloc;
for (i=0; i < sbull_devs; i++) /* all the same blocksize */
sbull_blksizes[i] = sbull_blksize;
blksize_size[major]=sbull_blksizes;
sbull_hardsects = kmalloc(sbull_devs * sizeof(int), GFP_KERNEL);
if (!sbull_hardsects)
goto fail_malloc;
for (i=0; i < sbull_devs; i++) /* all the same hardsect */
sbull_hardsects[i] = sbull_hardsect;
hardsect_size[major]=sbull_hardsects;
/* FIXME: max_readahead and max_sectors */
/*
* allocate the devices -- we can't have them static, as the number
* can be specified at load time
*/
sbull_devices = kmalloc(sbull_devs * sizeof (Sbull_Dev), GFP_KERNEL);
if (!sbull_devices)
goto fail_malloc;
memset(sbull_devices, 0, sbull_devs * sizeof (Sbull_Dev));
for (i=0; i < sbull_devs; i++) {
/* data and usage remain zeroed */
sbull_devices[i].size = 1024 * sbull_size;
init_timer(&(sbull_devices[i].timer));
sbull_devices[i].timer.data = (unsigned long)(sbull_devices+i);
sbull_devices[i].timer.function = sbull_expires;
spin_lock_init(&sbull_devices[i].lock);
}
/*
* Get the queue set up, and register our (nonexistent) partitions.
*/
#ifdef SBULL_MULTIQUEUE
for (i = 0; i < sbull_devs; i++) {
blk_init_queue(&sbull_devices[i].queue, sbull_request);
blk_queue_headactive(&sbull_devices[i].queue, 0);
}
blk_dev[major].queue = sbull_find_queue;
#else
# ifdef LINUX_24
if (noqueue)
blk_queue_make_request(BLK_DEFAULT_QUEUE(major), sbull_make_request);
else
# endif /* LINUX_24 */
blk_init_queue(BLK_DEFAULT_QUEUE(major), sbull_request);
#endif
/* A no-op in 2.4.0, but all drivers seem to do it anyway */
for (i = 0; i < sbull_devs; i++)
register_disk(NULL, MKDEV(major, i), 1, &sbull_bdops,
sbull_size << 1);
#ifndef SBULL_DEBUG
EXPORT_NO_SYMBOLS; /* otherwise, leave global symbols visible */
#endif
printk ("<1>sbull: init complete, %d devs, size %d blks %d hs %d\n",
sbull_devs, sbull_size, sbull_blksize, sbull_hardsect);
#ifdef SBULL_MULTIQUEUE
printk ("<1>sbull: Using multiqueue request\n");
#elif defined(LINUX_24)
if (noqueue)
printk (KERN_INFO "sbull: using direct make_request\n");
#endif
#ifdef DO_RAW_INTERFACE
sbullr_init();
#endif
return 0; /* succeed */
fail_malloc:
read_ahead[major] = 0;
if (sbull_sizes) kfree(sbull_sizes);
blk_size[major] = NULL;
if (sbull_blksizes) kfree(sbull_blksizes);
blksize_size[major] = NULL;
if (sbull_hardsects) kfree(sbull_hardsects);
hardsect_size[major] = NULL;
if (sbull_devices) kfree(sbull_devices);
unregister_blkdev(major, "sbull");
return result;
}
void sbull_cleanup(void)
{
int i;
/*
* Before anything else, get rid of the timer functions. Set the "usage"
* flag on each device as well, under lock, so that if the timer fires up
* just before we delete it, it will either complete or abort. Otherwise
* we have nasty race conditions to worry about.
*/
for (i = 0; i < sbull_devs; i++) {
Sbull_Dev *dev = sbull_devices + i;
del_timer(&dev->timer);
spin_lock(&dev->lock);
dev->usage++;
spin_unlock(&dev->lock);
}
#ifdef DO_RAW_INTERFACE
sbullr_release();
#endif
/* flush it all and reset all the data structures */
for (i=0; i<sbull_devs; i++)
fsync_dev(MKDEV(sbull_major, i)); /* flush the devices */
unregister_blkdev(major, "sbull");
/*
* Fix up the request queue(s)
*/
#ifdef SBULL_MULTIQUEUE
for (i = 0; i < sbull_devs; i++)
blk_cleanup_queue(&sbull_devices[i].queue);
blk_dev[major].queue = NULL;
#else
blk_cleanup_queue(BLK_DEFAULT_QUEUE(major));
#endif
/* Clean up the global arrays */
read_ahead[major] = 0;
kfree(blk_size[major]);
blk_size[major] = NULL;
kfree(blksize_size[major]);
blksize_size[major] = NULL;
kfree(hardsect_size[major]);
hardsect_size[major] = NULL;
/* FIXME: max_readahead and max_sectors */
/* finally, the usual cleanup */
for (i=0; i < sbull_devs; i++) {
if (sbull_devices[i].data)
vfree(sbull_devices[i].data);
}
kfree(sbull_devices);
}
/*
* Below here is the "raw device" implementation, available only
* in 2.4.
*/
#ifdef DO_RAW_INTERFACE
/*
* Transfer an iovec
*/
static int sbullr_rw_iovec(Sbull_Dev *dev, struct kiobuf *iobuf, int rw,
int sector, int nsectors)
{
struct request fakereq;
struct page *page;
int offset = iobuf->offset, ndone = 0, pageno, result;
/* Perform I/O on each sector */
fakereq.sector = sector;
fakereq.current_nr_sectors = 1;
fakereq.cmd = rw;
for (pageno = 0; pageno < iobuf->nr_pages; pageno++) {
page = iobuf->maplist[pageno];
while (ndone < nsectors) {
/* Fake up a request structure for the operation */
fakereq.buffer = (void *) (kmap(page) + offset);
result = sbull_transfer(dev, &fakereq);
kunmap(page);
if (result == 0)
return ndone;
/* Move on to the next one */
ndone++;
fakereq.sector++;
offset += SBULLR_SECTOR;
if (offset >= PAGE_SIZE) {
offset = 0;
break;
}
}
}
return ndone;
}
/*
* Handle actual transfers of data.
*/
static int sbullr_transfer (Sbull_Dev *dev, char *buf, size_t count,
loff_t *offset, int rw)
{
struct kiobuf *iobuf;
int result;
/* Only block alignment and size allowed */
if ((*offset & SBULLR_SECTOR_MASK) || (count & SBULLR_SECTOR_MASK))
return -EINVAL;
if ((unsigned long) buf & SBULLR_SECTOR_MASK)
return -EINVAL;
/* Allocate an I/O vector */
result = alloc_kiovec(1, &iobuf);
if (result)
return result;
/* Map the user I/O buffer and do the I/O. */
result = map_user_kiobuf(rw, iobuf, (unsigned long) buf, count);
if (result) {
free_kiovec(1, &iobuf);
return result;
}
spin_lock(&dev->lock);
result = sbullr_rw_iovec(dev, iobuf, rw, *offset >> SBULLR_SECTOR_SHIFT,
count >> SBULLR_SECTOR_SHIFT);
spin_unlock(&dev->lock);
/* Clean up and return. */
unmap_kiobuf(iobuf);
free_kiovec(1, &iobuf);
if (result > 0)
*offset += result << SBULLR_SECTOR_SHIFT;
return result << SBULLR_SECTOR_SHIFT;
}
/*
* Read and write syscalls.
*/
ssize_t sbullr_read(struct file *filp, char *buf, size_t size, loff_t *off)
{
Sbull_Dev *dev = sbull_devices + MINOR(filp->f_dentry->d_inode->i_rdev);
return sbullr_transfer(dev, buf, size, off, READ);
}
ssize_t sbullr_write(struct file *filp, const char *buf, size_t size,
loff_t *off)
{
Sbull_Dev *dev = sbull_devices + MINOR(filp->f_dentry->d_inode->i_rdev);
return sbullr_transfer(dev, (char *) buf, size, off, WRITE);
}
static int sbullr_registered = 0;
static struct file_operations sbullr_fops = {
read: sbullr_read,
write: sbullr_write,
open: sbull_open,
release: sbull_release,
ioctl: sbull_ioctl,
};
static void sbullr_init()
{
int result;
/* Simplify the math */
if (sbull_hardsect != SBULLR_SECTOR) {
printk(KERN_NOTICE "Sbullr requires hardsect = %d\n", SBULLR_SECTOR);
return;
}
SET_MODULE_OWNER(&sbullr_fops);
result = register_chrdev(sbullr_major, "sbullr", &sbullr_fops);
if (result >= 0)
sbullr_registered = 1;
if (sbullr_major == 0)
sbullr_major = result;
}
static void sbullr_release()
{
if (sbullr_registered)
unregister_chrdev(sbullr_major, "sbullr");
}
#endif /* DO_RAW_INTERFACE */
module_init(sbull_init);
module_exit(sbull_cleanup);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -