📄 i2o_block.c
字号:
}static int i2ob_merge_requests(request_queue_t *q, struct request *req, struct request *next, int __max_segments){ int max_segments = i2ob_dev[MINOR(req->rq_dev)].max_segments; int total_segments = req->nr_segments + next->nr_segments; if (__max_segments < max_segments) max_segments = __max_segments; if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data) total_segments--; if (total_segments > max_segments) return 0; req->nr_segments = total_segments; return 1;}static int i2ob_flush(struct i2o_controller *c, struct i2ob_device *d, int unit){ unsigned long msg; u32 m = i2ob_get(d); if(m == 0xFFFFFFFF) return -1; msg = c->mem_offset + m; /* * Ask the controller to write the cache back. This sorts out * the supertrak firmware flaw and also does roughly the right * thing for other cases too. */ i2o_raw_writel(FIVE_WORD_MSG_SIZE|SGL_OFFSET_0, msg); i2o_raw_writel(I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|d->tid, msg+4); i2o_raw_writel(i2ob_context|(unit<<8), msg+8); i2o_raw_writel(0, msg+12); i2o_raw_writel(60<<16, msg+16); DEBUG("FLUSH"); i2o_post_message(c,m); return 0;} /* * OSM reply handler. This gets all the message replies */static void i2o_block_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg){ unsigned long flags; struct i2ob_request *ireq = NULL; u8 st; u32 *m = (u32 *)msg; u8 unit = (m[2]>>8)&0xF0; /* low 4 bits are partition */ struct i2ob_device *dev = &i2ob_dev[(unit&0xF0)]; /* * Pull the lock over ready */ spin_lock_prefetch(&io_request_lock); /* * FAILed message */ if(m[0] & (1<<13)) { DEBUG("FAIL"); /* * FAILed message from controller * We increment the error count and abort it * * In theory this will never happen. The I2O block class * specification states that block devices never return * FAILs but instead use the REQ status field...but * better be on the safe side since no one really follows * the spec to the book :) */ ireq=&i2ob_queues[c->unit]->request_queue[m[3]]; ireq->req->errors++; spin_lock_irqsave(&io_request_lock, flags); i2ob_unhook_request(ireq, c->unit); i2ob_end_request(ireq->req); spin_unlock_irqrestore(&io_request_lock, flags); /* Now flush the message by making it a NOP */ m[0]&=0x00FFFFFF; m[0]|=(I2O_CMD_UTIL_NOP)<<24; i2o_post_message(c,virt_to_bus(m)); return; } if(msg->function == I2O_CMD_UTIL_EVT_REGISTER) { spin_lock(&i2ob_evt_lock); memcpy(evt_msg, msg, (m[0]>>16)<<2); spin_unlock(&i2ob_evt_lock); up(&i2ob_evt_sem); return; } if(!dev->i2odev) { /* * This is HACK, but Intel Integrated RAID allows user * to delete a volume that is claimed, locked, and in use * by the OS. We have to check for a reply from a * non-existent device and flag it as an error or the system * goes kaput... */ ireq=&i2ob_queues[c->unit]->request_queue[m[3]]; ireq->req->errors++; printk(KERN_WARNING "I2O Block: Data transfer to deleted device!\n"); spin_lock_irqsave(&io_request_lock, flags); i2ob_unhook_request(ireq, c->unit); i2ob_end_request(ireq->req); spin_unlock_irqrestore(&io_request_lock, flags); return; } /* * Lets see what is cooking. We stuffed the * request in the context. */ ireq=&i2ob_queues[c->unit]->request_queue[m[3]]; st=m[4]>>24; if(st!=0) { int err; char *bsa_errors[] = { "Success", "Media Error", "Failure communicating to device", "Device Failure", "Device is not ready", "Media not present", "Media is locked by another user", "Media has failed", "Failure communicating to device", "Device bus failure", "Device is locked by another user", "Device is write protected", "Device has reset", "Volume has changed, waiting for acknowledgement" }; err = m[4]&0xFFFF; /* * Device not ready means two things. One is that the * the thing went offline (but not a removal media) * * The second is that you have a SuperTrak 100 and the * firmware got constipated. Unlike standard i2o card * setups the supertrak returns an error rather than * blocking for the timeout in these cases. * * Don't stick a supertrak100 into cache aggressive modes */ printk(KERN_ERR "\n/dev/%s error: %s", dev->i2odev->dev_name, bsa_errors[m[4]&0XFFFF]); if(m[4]&0x00FF0000) printk(" - DDM attempted %d retries", (m[4]>>16)&0x00FF ); printk(".\n"); ireq->req->errors++; } else ireq->req->errors = 0; /* * Dequeue the request. We use irqsave locks as one day we * may be running polled controllers from a BH... */ spin_lock_irqsave(&io_request_lock, flags); i2ob_unhook_request(ireq, c->unit); i2ob_end_request(ireq->req); atomic_dec(&i2ob_queues[c->unit]->queue_depth); /* * We may be able to do more I/O */ i2ob_request(dev->req_queue); spin_unlock_irqrestore(&io_request_lock, flags);}/* * Event handler. Needs to be a separate thread b/c we may have * to do things like scan a partition table, or query parameters * which cannot be done from an interrupt or from a bottom half. */static int i2ob_evt(void *dummy){ unsigned int evt; unsigned long flags; int unit; int i; //The only event that has data is the SCSI_SMART event. struct i2o_reply { u32 header[4]; u32 evt_indicator; u8 ASC; u8 ASCQ; u16 pad; u8 data[16]; } *evt_local; lock_kernel(); daemonize(); unlock_kernel(); strcpy(current->comm, "i2oblock"); evt_running = 1; while(1) { if(down_interruptible(&i2ob_evt_sem)) { evt_running = 0; printk("exiting..."); break; } /* * Keep another CPU/interrupt from overwriting the * message while we're reading it * * We stuffed the unit in the TxContext and grab the event mask * None of the BSA we care about events have EventData */ spin_lock_irqsave(&i2ob_evt_lock, flags); evt_local = (struct i2o_reply *)evt_msg; spin_unlock_irqrestore(&i2ob_evt_lock, flags); unit = le32_to_cpu(evt_local->header[3]); evt = le32_to_cpu(evt_local->evt_indicator); switch(evt) { /* * New volume loaded on same TID, so we just re-install. * The TID/controller don't change as it is the same * I2O device. It's just new media that we have to * rescan. */ case I2O_EVT_IND_BSA_VOLUME_LOAD: { i2ob_install_device(i2ob_dev[unit].i2odev->controller, i2ob_dev[unit].i2odev, unit); break; } /* * No media, so set all parameters to 0 and set the media * change flag. The I2O device is still valid, just doesn't * have media, so we don't want to clear the controller or * device pointer. */ case I2O_EVT_IND_BSA_VOLUME_UNLOAD: { for(i = unit; i <= unit+15; i++) { i2ob_sizes[i] = 0; i2ob_hardsizes[i] = 0; i2ob_max_sectors[i] = 0; i2ob[i].nr_sects = 0; i2ob_gendisk.part[i].nr_sects = 0; } i2ob_media_change_flag[unit] = 1; break; } case I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ: printk(KERN_WARNING "%s: Attempt to eject locked media\n", i2ob_dev[unit].i2odev->dev_name); break; /* * The capacity has changed and we are going to be * updating the max_sectors and other information * about this disk. We try a revalidate first. If * the block device is in use, we don't want to * do that as there may be I/Os bound for the disk * at the moment. In that case we read the size * from the device and update the information ourselves * and the user can later force a partition table * update through an ioctl. */ case I2O_EVT_IND_BSA_CAPACITY_CHANGE: { u64 size; if(do_i2ob_revalidate(MKDEV(MAJOR_NR, unit),0) != -EBUSY) continue; if(i2ob_query_device(&i2ob_dev[unit], 0x0004, 0, &size, 8) !=0 ) i2ob_query_device(&i2ob_dev[unit], 0x0000, 4, &size, 8); spin_lock_irqsave(&io_request_lock, flags); i2ob_sizes[unit] = (int)(size>>10); i2ob_gendisk.part[unit].nr_sects = size>>9; i2ob[unit].nr_sects = (int)(size>>9); spin_unlock_irqrestore(&io_request_lock, flags); break; } /* * We got a SCSI SMART event, we just log the relevant * information and let the user decide what they want * to do with the information. */ case I2O_EVT_IND_BSA_SCSI_SMART: { char buf[16]; printk(KERN_INFO "I2O Block: %s received a SCSI SMART Event\n",i2ob_dev[unit].i2odev->dev_name); evt_local->data[16]='\0'; sprintf(buf,"%s",&evt_local->data[0]); printk(KERN_INFO " Disk Serial#:%s\n",buf); printk(KERN_INFO " ASC 0x%02x \n",evt_local->ASC); printk(KERN_INFO " ASCQ 0x%02x \n",evt_local->ASCQ); break; } /* * Non event */ case 0: break; /* * An event we didn't ask for. Call the card manufacturer * and tell them to fix their firmware :) */ case 0x20: /* * If a promise card reports 0x20 event then the brown stuff * hit the fan big time. The card seems to recover but loses * the pending writes. Deeply ungood except for testing fsck */ if(i2ob_dev[unit].i2odev->controller->bus.pci.promise) panic("I2O controller firmware failed. Reboot and force a filesystem check.\n"); default: printk(KERN_INFO "%s: Received event 0x%X we didn't register for\n" KERN_INFO " Blame the I2O card manufacturer 8)\n", i2ob_dev[unit].i2odev->dev_name, evt); break; } }; complete_and_exit(&i2ob_thread_dead,0); return 0;}/* * The I2O block driver is listed as one of those that pulls the * front entry off the queue before processing it. This is important * to remember here. If we drop the io lock then CURRENT will change * on us. We must unlink CURRENT in this routine before we return, if * we use it. */static void i2ob_request(request_queue_t *q){ struct request *req; struct i2ob_request *ireq; int unit; struct i2ob_device *dev; u32 m; while (!list_empty(&q->queue_head)) { /* * On an IRQ completion if there is an inactive * request on the queue head it means it isnt yet * ready to dispatch. */ req = blkdev_entry_next_request(&q->queue_head); if(req->rq_status == RQ_INACTIVE) return; unit = MINOR(req->rq_dev); dev = &i2ob_dev[(unit&0xF0)]; /* * Queue depths probably belong with some kind of * generic IOP commit control. Certainly its not right * its global! */ if(atomic_read(&i2ob_queues[dev->unit]->queue_depth) >= dev->depth) break; /* Get a message */ m = i2ob_get(dev); if(m==0xFFFFFFFF) { if(atomic_read(&i2ob_queues[dev->unit]->queue_depth) == 0) printk(KERN_ERR "i2o_block: message queue and request queue empty!!\n"); break; } /* * Everything ok, so pull from kernel queue onto our queue */ req->errors = 0; blkdev_dequeue_request(req); req->waiting = NULL; ireq = i2ob_queues[dev->unit]->i2ob_qhead; i2ob_queues[dev->unit]->i2ob_qhead = ireq->next; ireq->req = req; i2ob_send(m, dev, ireq, i2ob[unit].start_sect, (unit&0xF0)); }}/* * SCSI-CAM for ioctl geometry mapping * Duplicated with SCSI - this should be moved into somewhere common * perhaps genhd ? * * LBA -> CHS mapping table taken from: * * "Incorporating the I2O Architecture into BIOS for Intel Architecture * Platforms" * * This is an I2O document that is only available to I2O members, * not developers. * * From my understanding, this is how all the I2O cards do this * * Disk Size | Sectors | Heads | Cylinders * ---------------+---------+-------+------------------- * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) * */#define BLOCK_SIZE_528M 1081344#define BLOCK_SIZE_1G 2097152#define BLOCK_SIZE_21G 4403200#define BLOCK_SIZE_42G 8806400#define BLOCK_SIZE_84G 17612800static void i2o_block_biosparam( unsigned long capacity, unsigned short *cyls, unsigned char *hds, unsigned char *secs) { unsigned long heads, sectors, cylinders; sectors = 63L; /* Maximize sectors per track */ if(capacity <= BLOCK_SIZE_528M) heads = 16; else if(capacity <= BLOCK_SIZE_1G) heads = 32; else if(capacity <= BLOCK_SIZE_21G) heads = 64; else if(capacity <= BLOCK_SIZE_42G) heads = 128; else heads = 255; cylinders = capacity / (heads * sectors); *cyls = (unsigned short) cylinders; /* Stuff return values */ *secs = (unsigned char) sectors;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -