📄 spull.c
字号:
return 0;
}
/*
* Block-driver specific functions
*/
/*
* Find the device for this request.
*/
static Spull_Dev *spull_locate_device(const struct request *req)
{
int devno;
Spull_Dev *device;
/* Check if the minor number is in range */
devno = DEVICE_NR(req->rq_dev);
if (devno >= spull_devs) {
static int count = 0;
if (count++ < 5) /* print the message at most five times */
printk(KERN_WARNING "spull: request for unknown device\n");
return NULL;
}
device = spull_devices + devno;
return device;
}
/*
* Perform an actual transfer.
*/
static int spull_transfer(Spull_Dev *device, const struct request *req)
{
int size, minor = MINOR(req->rq_dev);
u8 *ptr;
ptr = device->data +
(spull_partitions[minor].start_sect + req->sector)*SPULL_HARDSECT;
size = req->current_nr_sectors*SPULL_HARDSECT;
/*
* Make sure that the transfer fits within the device.
*/
if (req->sector + req->current_nr_sectors >
spull_partitions[minor].nr_sects) {
static int count = 0;
if (count++ < 5)
printk(KERN_WARNING "spull: request past end of partition\n");
return 0;
}
/*
* Looks good, do the transfer.
*/
switch(req->cmd) {
case READ:
memcpy(req->buffer, ptr, size); /* from spull to buffer */
return 1;
case WRITE:
memcpy(ptr, req->buffer, size); /* from buffer to spull */
return 1;
default:
/* can't happen */
return 0;
}
}
#ifdef LINUX_24
void spull_request(request_queue_t *q)
#else
void spull_request()
#endif
{
Spull_Dev *device;
int status;
long flags;
while(1) {
INIT_REQUEST; /* returns when queue is empty */
/* Which "device" are we using? (Is returned locked) */
device = spull_locate_device (CURRENT);
if (device == NULL) {
end_request(0);
continue;
}
spin_lock_irqsave(&device->lock, flags);
/* Perform the transfer and clean up. */
status = spull_transfer(device, CURRENT);
spin_unlock_irqrestore(&device->lock, flags);
end_request(status); /* success */
}
}
/*
* The fake interrupt-driven request
*/
struct timer_list spull_timer; /* the engine for async invocation */
#ifdef LINUX_24
void spull_irqdriven_request(request_queue_t *q)
#else
void spull_irqdriven_request()
#endif
{
Spull_Dev *device;
int status;
long flags;
/* If we are already processing requests, don't do any more now. */
if (spull_busy)
return;
while(1) {
INIT_REQUEST; /* returns when queue is empty */
/* Which "device" are we using? */
device = spull_locate_device (CURRENT);
if (device == NULL) {
end_request(0);
continue;
}
spin_lock_irqsave(&device->lock, flags);
/* Perform the transfer and clean up. */
status = spull_transfer(device, CURRENT);
spin_unlock_irqrestore(&device->lock, flags);
/* ... and wait for the timer to expire -- no end_request(1) */
spull_timer.expires = jiffies + spull_irq;
add_timer(&spull_timer);
spull_busy = 1;
return;
}
}
/* this is invoked when the timer expires */
void spull_interrupt(unsigned long unused)
{
unsigned long flags;
spin_lock_irqsave(&io_request_lock, flags);
end_request(1); /* This request is done - we always succeed */
spull_busy = 0; /* We have io_request_lock, no conflict with request */
if (! QUEUE_EMPTY) /* more of them? */
#ifdef LINUX_24
spull_irqdriven_request(NULL); /* Start the next transfer */
#else
spull_irqdriven_request();
#endif
spin_unlock_irqrestore(&io_request_lock, flags);
}
/*
* Finally, the module stuff
*/
int spull_init(void)
{
int result, i;
/*
* Copy the (static) cfg variables to public prefixed ones to allow
* snoozing with a debugger.
*/
spull_major = major;
spull_devs = devs;
spull_rahead = rahead;
spull_size = size;
spull_blksize = blksize;
/*
* Register your major, and accept a dynamic number
*/
result = register_blkdev(spull_major, "spull", &spull_bdops);
if (result < 0) {
printk(KERN_WARNING "spull: can't get major %d\n",spull_major);
return result;
}
if (spull_major == 0) spull_major = result; /* dynamic */
major = spull_major; /* Use `major' later on to save typing */
spull_gendisk.major = major; /* was unknown at load time */
/*
* allocate the devices -- we can't have them static, as the number
* can be specified at load time
*/
spull_devices = kmalloc(spull_devs * sizeof (Spull_Dev), GFP_KERNEL);
if (!spull_devices)
goto fail_malloc;
memset(spull_devices, 0, spull_devs * sizeof (Spull_Dev));
for (i=0; i < spull_devs; i++) {
/* data and usage remain zeroed */
spull_devices[i].size = blksize * spull_size;
init_timer(&(spull_devices[i].timer));
spull_devices[i].timer.data = (unsigned long)(spull_devices+i);
spull_devices[i].timer.function = spull_expires;
spin_lock_init(&spull_devices[i].lock);
}
/*
* Assign the other needed values: request, rahead, size, blksize,
* hardsect. All the minor devices feature the same value.
* Note that `spull' defines all of them to allow testing non-default
* values. A real device could well avoid setting values in global
* arrays if it uses the default values.
*/
read_ahead[major] = spull_rahead;
result = -ENOMEM; /* for the possible errors */
spull_sizes = kmalloc( (spull_devs << SPULL_SHIFT) * sizeof(int),
GFP_KERNEL);
if (!spull_sizes)
goto fail_malloc;
/* Start with zero-sized partitions, and correctly sized units */
memset(spull_sizes, 0, (spull_devs << SPULL_SHIFT) * sizeof(int));
for (i=0; i< spull_devs; i++)
spull_sizes[i<<SPULL_SHIFT] = spull_size;
blk_size[MAJOR_NR] = spull_gendisk.sizes = spull_sizes;
/* Allocate the partitions array. */
spull_partitions = kmalloc( (spull_devs << SPULL_SHIFT) *
sizeof(struct hd_struct), GFP_KERNEL);
if (!spull_partitions)
goto fail_malloc;
memset(spull_partitions, 0, (spull_devs << SPULL_SHIFT) *
sizeof(struct hd_struct));
/* fill in whole-disk entries */
for (i=0; i < spull_devs; i++)
spull_partitions[i << SPULL_SHIFT].nr_sects =
spull_size*(blksize/SPULL_HARDSECT);
spull_gendisk.part = spull_partitions;
spull_gendisk.nr_real = spull_devs;
#ifndef LINUX_24
spull_gendisk.max_nr = spull_devs;
#endif
/*
* Put our gendisk structure on the list.
*/
spull_gendisk.next = gendisk_head;
gendisk_head = &spull_gendisk;
/* dump the partition table to see it */
for (i=0; i < spull_devs << SPULL_SHIFT; i++)
PDEBUGG("part %i: beg %lx, size %lx\n", i,
spull_partitions[i].start_sect,
spull_partitions[i].nr_sects);
/*
* Allow interrupt-driven operation, if "irq=" has been specified
*/
spull_irq = irq; /* copy the static variable to the visible one */
if (spull_irq) {
PDEBUG("setting timer\n");
spull_timer.function = spull_interrupt;
blk_init_queue(BLK_DEFAULT_QUEUE(major), spull_irqdriven_request);
}
else
blk_init_queue(BLK_DEFAULT_QUEUE(major), spull_request);
#ifdef NOTNOW
for (i = 0; i < spull_devs; i++)
register_disk(NULL, MKDEV(major, i), 1, &spull_bdops,
spull_size << 1);
#endif
#ifndef SPULL_DEBUG
EXPORT_NO_SYMBOLS; /* otherwise, leave global symbols visible */
#endif
printk ("<1>spull: init complete, %d devs, size %d blks %d\n",
spull_devs, spull_size, spull_blksize);
return 0; /* succeed */
fail_malloc:
read_ahead[major] = 0;
if (spull_sizes) kfree(spull_sizes);
if (spull_partitions) kfree(spull_partitions);
blk_size[major] = NULL;
if (spull_devices) kfree(spull_devices);
unregister_blkdev(major, "spull");
return result;
}
void spull_cleanup(void)
{
int i;
struct gendisk **gdp;
/*
* Before anything else, get rid of the timer functions. Set the "usage"
* flag on each device as well, under lock, so that if the timer fires up
* just before we delete it, it will either complete or abort. Otherwise
* we have nasty race conditions to worry about.
*/
for (i = 0; i < spull_devs; i++) {
Spull_Dev *dev = spull_devices + i;
del_timer(&dev->timer);
spin_lock(&dev->lock);
dev->usage++;
spin_unlock(&dev->lock);
}
/* flush it all and reset all the data structures */
/*
* Unregister the device now to avoid further operations during cleanup.
*/
unregister_blkdev(major, "spull");
for (i = 0; i < (spull_devs << SPULL_SHIFT); i++)
fsync_dev(MKDEV(spull_major, i)); /* flush the devices */
blk_cleanup_queue(BLK_DEFAULT_QUEUE(major));
read_ahead[major] = 0;
kfree(blk_size[major]); /* which is gendisk->sizes as well */
blk_size[major] = NULL;
kfree(spull_gendisk.part);
kfree(blksize_size[major]);
blksize_size[major] = NULL;
/*
* Get our gendisk structure off the list.
*/
for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next))
if (*gdp == &spull_gendisk) {
*gdp = (*gdp)->next;
break;
}
/* finally, the usual cleanup */
for (i=0; i < spull_devs; i++) {
if (spull_devices[i].data)
vfree(spull_devices[i].data);
}
kfree(spull_devices);
}
module_init(spull_init);
module_exit(spull_cleanup);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -