📄 i2o_block.c
字号:
case CACHE_SMARTFETCH: if (req->nr_sectors > 16) writel(0x201F0008, &msg->body[0]); else writel(0x001F0000, &msg->body[0]); break; } } else { writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid, &msg->u.head[1]); sg_flags = 0x14000000; switch (dev->wcache) { case CACHE_NULL: writel(0, &msg->body[0]); break; case CACHE_WRITETHROUGH: writel(0x001F0008, &msg->body[0]); break; case CACHE_WRITEBACK: writel(0x001F0010, &msg->body[0]); break; case CACHE_SMARTBACK: if (req->nr_sectors > 16) writel(0x001F0004, &msg->body[0]); else writel(0x001F0010, &msg->body[0]); break; case CACHE_SMARTTHROUGH: if (req->nr_sectors > 16) writel(0x001F0004, &msg->body[0]); else writel(0x001F0010, &msg->body[0]); } } for (i = sgnum; i > 0; i--) { if (i == 1) sg_flags |= 0x80000000; writel(sg_flags | sg_dma_len(sg), mptr); writel(sg_dma_address(sg), mptr + 4); mptr += 8; sg++; } writel(I2O_MESSAGE_SIZE (((unsigned long)mptr - (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8, &msg->u.head[0]); list_add_tail(&ireq->queue, &dev->open_queue); dev->open_queue_depth++; i2o_msg_post(c, m); return 0; context_remove: i2o_cntxt_list_remove(c, req); nop_msg: i2o_msg_nop(c, m); exit: return rc;};/** * i2o_block_request_fn - request queue handling function * q: request queue from which the request could be fetched * * Takes the next request from the queue, transfers it and if no error * occurs dequeue it from the queue. On arrival of the reply the message * will be processed further. If an error occurs requeue the request. */static void i2o_block_request_fn(struct request_queue *q){ struct request *req; while (!blk_queue_plugged(q)) { req = elv_next_request(q); if (!req) break; if (blk_fs_request(req)) { struct i2o_block_delayed_request *dreq; struct i2o_block_request *ireq = req->special; unsigned int queue_depth; queue_depth = ireq->i2o_blk_dev->open_queue_depth; if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) if (!i2o_block_transfer(req)) { blkdev_dequeue_request(req); continue; } if (queue_depth) break; /* stop the queue and retry later */ dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC); if (!dreq) continue; dreq->queue = q; INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, dreq); osm_info("transfer error\n"); if (!queue_delayed_work(i2o_block_driver.event_queue, &dreq->work, I2O_BLOCK_RETRY_TIME)) kfree(dreq); else { blk_stop_queue(q); break; } } else end_request(req, 0); }};/* I2O Block device operations definition */static struct block_device_operations i2o_block_fops = { .owner = THIS_MODULE, .open = i2o_block_open, .release = i2o_block_release, .ioctl = i2o_block_ioctl, .media_changed = i2o_block_media_changed};/** * i2o_block_device_alloc - Allocate memory for a I2O Block device * * Allocate memory for the i2o_block_device struct, gendisk and request * queue and initialize them as far as no additional information is needed. * * Returns a pointer to the allocated I2O Block device on succes or a * negative error code on failure. */static struct i2o_block_device *i2o_block_device_alloc(void){ struct i2o_block_device *dev; struct gendisk *gd; struct request_queue *queue; int rc; dev = kmalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { osm_err("Insufficient memory to allocate I2O Block disk.\n"); rc = -ENOMEM; goto exit; } memset(dev, 0, sizeof(*dev)); INIT_LIST_HEAD(&dev->open_queue); spin_lock_init(&dev->lock); dev->rcache = CACHE_PREFETCH; dev->wcache = CACHE_WRITEBACK; /* allocate a gendisk with 16 partitions */ gd = alloc_disk(16); if (!gd) { osm_err("Insufficient memory to allocate gendisk.\n"); rc = -ENOMEM; goto cleanup_dev; } /* initialize the request queue */ queue = blk_init_queue(i2o_block_request_fn, &dev->lock); if (!queue) { osm_err("Insufficient memory to allocate request queue.\n"); rc = -ENOMEM; goto cleanup_queue; } blk_queue_prep_rq(queue, i2o_block_prep_req_fn); gd->major = I2O_MAJOR; gd->queue = queue; gd->fops = &i2o_block_fops; gd->private_data = dev; dev->gd = gd; return dev; cleanup_queue: put_disk(gd); cleanup_dev: kfree(dev); exit: return ERR_PTR(rc);};/** * i2o_block_probe - verify if dev is a I2O Block device and install it * @dev: device to verify if it is a I2O Block device * * We only verify if the user_tid of the device is 0xfff and then install * the device. Otherwise it is used by some other device (e. g. RAID). * * Returns 0 on success or negative error code on failure. */static int i2o_block_probe(struct device *dev){ struct i2o_device *i2o_dev = to_i2o_device(dev); struct i2o_block_device *i2o_blk_dev; struct i2o_controller *c = i2o_dev->iop; struct gendisk *gd; struct request_queue *queue; static int unit = 0; int rc; u64 size; u32 blocksize; u16 power; u32 flags, status; int segments; /* skip devices which are used by IOP */ if (i2o_dev->lct_data.user_tid != 0xfff) { osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid); return -ENODEV; } osm_info("New device detected (TID: %03x)\n", i2o_dev->lct_data.tid); if (i2o_device_claim(i2o_dev)) { osm_warn("Unable to claim device. Installation aborted\n"); rc = -EFAULT; goto exit; } i2o_blk_dev = i2o_block_device_alloc(); if (IS_ERR(i2o_blk_dev)) { osm_err("could not alloc a new I2O block device"); rc = PTR_ERR(i2o_blk_dev); goto claim_release; } i2o_blk_dev->i2o_dev = i2o_dev; dev_set_drvdata(dev, i2o_blk_dev); /* setup gendisk */ gd = i2o_blk_dev->gd; gd->first_minor = unit << 4; sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit); sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit); gd->driverfs_dev = &i2o_dev->device; /* setup request queue */ queue = gd->queue; queue->queuedata = i2o_blk_dev; blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS); blk_queue_max_sectors(queue, I2O_MAX_SECTORS); if (c->short_req) segments = 8; else { i2o_status_block *sb; sb = c->status_block.virt; segments = (sb->inbound_frame_size - sizeof(struct i2o_message) / 4 - 4) / 2; } blk_queue_max_hw_segments(queue, segments); osm_debug("max sectors = %d\n", I2O_MAX_SECTORS); osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS); osm_debug("hw segments = %d\n", segments); /* * Ask for the current media data. If that isn't supported * then we ask for the device capacity data */ if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0 || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) { i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4); i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8); } osm_debug("blocksize = %d\n", blocksize); if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) power = 0; i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); set_capacity(gd, size >> 9); i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); add_disk(gd); unit++; return 0; claim_release: i2o_device_claim_release(i2o_dev); exit: return rc;};/* Block OSM driver struct */static struct i2o_driver i2o_block_driver = { .name = OSM_NAME, .event = i2o_block_event, .reply = i2o_block_reply, .classes = i2o_block_class_id, .driver = { .probe = i2o_block_probe, .remove = i2o_block_remove, },};/** * i2o_block_init - Block OSM initialization function * * Allocate the slab and mempool for request structs, registers i2o_block * block device and finally register the Block OSM in the I2O core. * * Returns 0 on success or negative error code on failure. */static int __init i2o_block_init(void){ int rc; int size; printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); /* Allocate request mempool and slab */ size = sizeof(struct i2o_block_request); i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (!i2o_blk_req_pool.slab) { osm_err("can't init request slab\n"); rc = -ENOMEM; goto exit; } i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE, mempool_alloc_slab, mempool_free_slab, i2o_blk_req_pool.slab); if (!i2o_blk_req_pool.pool) { osm_err("can't init request mempool\n"); rc = -ENOMEM; goto free_slab; } /* Register the block device interfaces */ rc = register_blkdev(I2O_MAJOR, "i2o_block"); if (rc) { osm_err("unable to register block device\n"); goto free_mempool; }#ifdef MODULE osm_info("registered device at major %d\n", I2O_MAJOR);#endif /* Register Block OSM into I2O core */ rc = i2o_driver_register(&i2o_block_driver); if (rc) { osm_err("Could not register Block driver\n"); goto unregister_blkdev; } return 0; unregister_blkdev: unregister_blkdev(I2O_MAJOR, "i2o_block"); free_mempool: mempool_destroy(i2o_blk_req_pool.pool); free_slab: kmem_cache_destroy(i2o_blk_req_pool.slab); exit: return rc;};/** * i2o_block_exit - Block OSM exit function * * Unregisters Block OSM from I2O core, unregisters i2o_block block device * and frees the mempool and slab. */static void __exit i2o_block_exit(void){ /* Unregister I2O Block OSM from I2O core */ i2o_driver_unregister(&i2o_block_driver); /* Unregister block device */ unregister_blkdev(I2O_MAJOR, "i2o_block"); /* Free request mempool and slab */ mempool_destroy(i2o_blk_req_pool.pool); kmem_cache_destroy(i2o_blk_req_pool.slab);};MODULE_AUTHOR("Red Hat");MODULE_LICENSE("GPL");MODULE_DESCRIPTION(OSM_DESCRIPTION);MODULE_VERSION(OSM_VERSION);module_init(i2o_block_init);module_exit(i2o_block_exit);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -