📄 i2o_block.c
字号:
} /* * Everything ok, so pull from kernel queue onto our queue */ req->errors = 0; blkdev_dequeue_request(req); req->waiting = NULL; ireq = i2ob_queues[dev->unit]->i2ob_qhead; i2ob_queues[dev->unit]->i2ob_qhead = ireq->next; ireq->req = req; i2ob_send(m, dev, ireq, i2ob[unit].start_sect, (unit&0xF0)); }}/* * SCSI-CAM for ioctl geometry mapping * Duplicated with SCSI - this should be moved into somewhere common * perhaps genhd ? * * LBA -> CHS mapping table taken from: * * "Incorporating the I2O Architecture into BIOS for Intel Architecture * Platforms" * * This is an I2O document that is only available to I2O members, * not developers. * * From my understanding, this is how all the I2O cards do this * * Disk Size | Sectors | Heads | Cylinders * ---------------+---------+-------+------------------- * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) * */#define BLOCK_SIZE_528M 1081344#define BLOCK_SIZE_1G 2097152#define BLOCK_SIZE_21G 4403200#define BLOCK_SIZE_42G 8806400#define BLOCK_SIZE_84G 17612800static void i2o_block_biosparam( unsigned long capacity, unsigned short *cyls, unsigned char *hds, unsigned char *secs) { unsigned long heads, sectors, cylinders; sectors = 63L; /* Maximize sectors per track */ if(capacity <= BLOCK_SIZE_528M) heads = 16; else if(capacity <= BLOCK_SIZE_1G) heads = 32; else if(capacity <= BLOCK_SIZE_21G) heads = 64; else if(capacity <= BLOCK_SIZE_42G) heads = 128; else heads = 255; cylinders = capacity / (heads * sectors); *cyls = (unsigned short) cylinders; /* Stuff return values */ *secs = (unsigned char) sectors; *hds = (unsigned char) heads; }/* * Rescan the partition tables */ static int do_i2ob_revalidate(kdev_t dev, int maxu){ int minor=MINOR(dev); int i; minor&=0xF0; i2ob_dev[minor].refcnt++; if(i2ob_dev[minor].refcnt>maxu+1) { i2ob_dev[minor].refcnt--; return -EBUSY; } for( i = 15; i>=0 ; i--) { int m = minor+i; invalidate_device(MKDEV(MAJOR_NR, m), 1); i2ob_gendisk.part[m].start_sect = 0; i2ob_gendisk.part[m].nr_sects = 0; } /* * Do a physical check and then reconfigure */ i2ob_install_device(i2ob_dev[minor].controller, i2ob_dev[minor].i2odev, minor); i2ob_dev[minor].refcnt--; return 0;}/* * Issue device specific ioctl calls. */static int i2ob_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg){ struct i2ob_device *dev; int minor; /* Anyone capable of this syscall can do *real bad* things */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!inode) return -EINVAL; minor = MINOR(inode->i_rdev); if (minor >= (MAX_I2OB<<4)) return -ENODEV; dev = &i2ob_dev[minor]; switch (cmd) { case BLKGETSIZE: return put_user(i2ob[minor].nr_sects, (long *) arg); case BLKGETSIZE64: return put_user((u64)i2ob[minor].nr_sects << 9, (u64 *)arg); case HDIO_GETGEO: { struct hd_geometry g; int u=minor&0xF0; i2o_block_biosparam(i2ob_sizes[u]<<1, &g.cylinders, &g.heads, &g.sectors); g.start = i2ob[minor].start_sect; return copy_to_user((void *)arg,&g, sizeof(g))?-EFAULT:0; } case BLKRRPART: if(!capable(CAP_SYS_ADMIN)) return -EACCES; return do_i2ob_revalidate(inode->i_rdev,1); case BLKFLSBUF: case BLKROSET: case BLKROGET: case BLKRASET: case BLKRAGET: case BLKPG: return blk_ioctl(inode->i_rdev, cmd, arg); default: return -EINVAL; }}/* * Close the block device down */ static int i2ob_release(struct inode *inode, struct file *file){ struct i2ob_device *dev; int minor; minor = MINOR(inode->i_rdev); if (minor >= (MAX_I2OB<<4)) return -ENODEV; dev = &i2ob_dev[(minor&0xF0)]; /* * This is to deail with the case of an application * opening a device and then the device dissapears while * it's in use, and then the application tries to release * it. ex: Unmounting a deleted RAID volume at reboot. * If we send messages, it will just cause FAILs since * the TID no longer exists. */ if(!dev->i2odev) return 0; if (dev->refcnt <= 0) printk(KERN_ALERT "i2ob_release: refcount(%d) <= 0\n", dev->refcnt); dev->refcnt--; if(dev->refcnt==0) { /* * Flush the onboard cache on unmount */ u32 msg[5]; int *query_done = &dev->done_flag; msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid; msg[2] = i2ob_context|0x40000000; msg[3] = (u32)query_done; msg[4] = 60<<16; DEBUG("Flushing..."); i2o_post_wait(dev->controller, msg, 20, 60); /* * Unlock the media */ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid; msg[2] = i2ob_context|0x40000000; msg[3] = (u32)query_done; msg[4] = -1; DEBUG("Unlocking..."); i2o_post_wait(dev->controller, msg, 20, 2); DEBUG("Unlocked.\n"); /* * Now unclaim the device. */ if (i2o_release_device(dev->i2odev, &i2o_block_handler)) printk(KERN_ERR "i2ob_release: controller rejected unclaim.\n"); DEBUG("Unclaim\n"); } return 0;}/* * Open the block device. */ static int i2ob_open(struct inode *inode, struct file *file){ int minor; struct i2ob_device *dev; if (!inode) return -EINVAL; minor = MINOR(inode->i_rdev); if (minor >= MAX_I2OB<<4) return -ENODEV; dev=&i2ob_dev[(minor&0xF0)]; if(!dev->i2odev) return -ENODEV; if(dev->refcnt++==0) { u32 msg[6]; DEBUG("Claim "); if(i2o_claim_device(dev->i2odev, &i2o_block_handler)) { dev->refcnt--; printk(KERN_INFO "I2O Block: Could not open device\n"); return -EBUSY; } DEBUG("Claimed "); /* * Mount the media if needed. Note that we don't use * the lock bit. Since we have to issue a lock if it * refuses a mount (quite possible) then we might as * well just send two messages out. */ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = I2O_CMD_BLOCK_MMOUNT<<24|HOST_TID<<12|dev->tid; msg[4] = -1; msg[5] = 0; DEBUG("Mount "); i2o_post_wait(dev->controller, msg, 24, 2); /* * Lock the media */ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = I2O_CMD_BLOCK_MLOCK<<24|HOST_TID<<12|dev->tid; msg[4] = -1; DEBUG("Lock "); i2o_post_wait(dev->controller, msg, 20, 2); DEBUG("Ready.\n"); } return 0;}/* * Issue a device query */ static int i2ob_query_device(struct i2ob_device *dev, int table, int field, void *buf, int buflen){ return i2o_query_scalar(dev->controller, dev->tid, table, field, buf, buflen);}/* * Install the I2O block device we found. */ static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, int unit){ u64 size; u32 blocksize; u32 limit; u8 type; u32 flags, status; struct i2ob_device *dev=&i2ob_dev[unit]; int i; /* * For logging purposes... */ printk(KERN_INFO "i2ob: Installing tid %d device at unit %d\n", d->lct_data.tid, unit); /* * Ask for the current media data. If that isn't supported * then we ask for the device capacity data */ if(i2ob_query_device(dev, 0x0004, 1, &blocksize, 4) != 0 || i2ob_query_device(dev, 0x0004, 0, &size, 8) !=0 ) { i2ob_query_device(dev, 0x0000, 3, &blocksize, 4); i2ob_query_device(dev, 0x0000, 4, &size, 8); } i2ob_query_device(dev, 0x0000, 5, &flags, 4); i2ob_query_device(dev, 0x0000, 6, &status, 4); i2ob_sizes[unit] = (int)(size>>10); for(i=unit; i <= unit+15 ; i++) i2ob_hardsizes[i] = blocksize; i2ob_gendisk.part[unit].nr_sects = size>>9; i2ob[unit].nr_sects = (int)(size>>9); /* Set limit based on inbound frame size */ limit = (d->controller->status_block->inbound_frame_size - 8)/2; limit = limit<<9; /* * Max number of Scatter-Gather Elements */ for(i=unit;i<=unit+15;i++) { i2ob_max_sectors[i] = 256; i2ob_dev[i].max_segments = (d->controller->status_block->inbound_frame_size - 8)/2; if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.queue_buggy == 2) i2ob_dev[i].depth = 32; if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.queue_buggy == 1) { i2ob_max_sectors[i] = 32; i2ob_dev[i].max_segments = 8; i2ob_dev[i].depth = 4; } if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.short_req) { i2ob_max_sectors[i] = 8; i2ob_dev[i].max_segments = 8; } } sprintf(d->dev_name, "%s%c", i2ob_gendisk.major_name, 'a' + (unit>>4)); printk(KERN_INFO "%s: Max segments %d, queue depth %d, byte limit %d.\n", d->dev_name, i2ob_dev[unit].max_segments, i2ob_dev[unit].depth, limit); i2ob_query_device(dev, 0x0000, 0, &type, 1); printk(KERN_INFO "%s: ", d->dev_name); switch(type) { case 0: printk("Disk Storage");break; case 4: printk("WORM");break; case 5: printk("CD-ROM");break; case 7: printk("Optical device");break; default: printk("Type %d", type); } if(status&(1<<10)) printk("(RAID)"); if(((flags & (1<<3)) && !(status & (1<<3))) || ((flags & (1<<4)) && !(status & (1<<4)))) { printk(KERN_INFO " Not loaded.\n"); return 1; } printk(": %dMB, %d byte sectors", (int)(size>>20), blocksize); if(status&(1<<0)) { u32 cachesize; i2ob_query_device(dev, 0x0003, 0, &cachesize, 4); cachesize>>=10; if(cachesize>4095) printk(", %dMb cache", cachesize>>10); else printk(", %dKb cache", cachesize); } printk(".\n"); printk(KERN_INFO "%s: Maximum sectors/read set to %d.\n", d->dev_name, i2ob_max_sectors[unit]); /* * If this is the first I2O block device found on this IOP, * we need to initialize all the queue data structures * before any I/O can be performed. If it fails, this * device is useless. */ if(!i2ob_queues[c->unit]) { if(i2ob_init_iop(c->unit)) return 1; } /* * This will save one level of lookup/indirection in critical * code so that we can directly get the queue ptr from the * device instead of having to go the IOP data structure. */ dev->req_queue = &i2ob_queues[c->unit]->req_queue; grok_partitions(&i2ob_gendisk, unit>>4, 1<<4, (long)(size>>9)); /* * Register for the events we're interested in and that the * device actually supports. */ i2o_event_register(c, d->lct_data.tid, i2ob_context, unit, (I2OB_EVENT_MASK & d->lct_data.event_capabilities)); return 0;}/* * Initialize IOP specific queue structures. This is called * once for each IOP that has a block device sitting behind it. */static int i2ob_init_iop(unsigned int unit){ int i; i2ob_queues[unit] = (struct i2ob_iop_queue*) kmalloc(sizeof(struct i2ob_iop_queue), GFP_ATOMIC); if(!i2ob_queues[unit]) { printk(KERN_WARNING "Could not allocate request queue for I2O block device!\n"); return -1; } for(i = 0; i< MAX_I2OB_DEPTH; i++) { i2ob_queues[unit]->request_queue[i].next = &i2ob_queues[unit]->request_queue[i+1]; i2ob_queues[unit]->request_queue[i].num = i; } /* Queue is MAX_I2OB + 1... */ i2ob_queues[unit]->request_queue[i].next = NULL; i2ob_queues[unit]->i2ob_qhead = &i2ob_queues[unit]->request_queue[0]; atomic_set(&i2ob_queues[unit]->queue_depth, 0); blk_init_queue(&i2ob_queues[unit]->req_queue, i2ob_request); blk_queue_headactive(&i2ob_queues[unit]->req_queue, 0); i2ob_queues[unit]->req_queue.back_merge_fn = i2ob_back_merge; i2ob_queues[unit]->req_queue.front_merge_fn = i2ob_front_merge; i2ob_queues[unit]->req_queue.merge_requests_fn = i2ob_merge_requests; i2ob_queues[unit]->req_queue.queuedata = &i2ob_queues[unit]; return 0;}/* * Get the request queue for the given device. */ static request_queue_t* i2ob_get_queue(kdev_t dev){ int unit = MINOR(dev)&0xF0; return i2ob_dev[unit].req_queue;}/* * Probe the I2O subsytem for block class devices */static void i2ob_scan(int bios){ int i; int warned = 0; struct i2o_device *d, *b=NULL; struct i2o_controller *c; struct i2ob_device *dev; for(i=0; i< MAX_I2O_CONTROLLERS; i++) { c=i2o_find_controller(i); if(c==NULL) continue; /*
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -