⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 i2o_block.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 3 页
字号:
		writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid,		       &msg->u.head[1]);		writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++);		writel(tid, mptr++);		/*		 * ENABLE_DISCONNECT		 * SIMPLE_TAG		 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME		 */		if (rq_data_dir(req) == READ) {			cmd[0] = 0x28;			scsi_flags = 0x60a0000a;		} else {			cmd[0] = 0x2A;			scsi_flags = 0xa0a0000a;		}		writel(scsi_flags, mptr++);		*((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);		*((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);		memcpy_toio(mptr, cmd, 10);		mptr += 4;		writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);	} else#endif	{		writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]);		writel(ctl_flags, mptr++);		writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);		writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++);		writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++);	}	if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {		rc = -ENOMEM;		goto context_remove;	}	writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) |	       sgl_offset, &msg->u.head[0]);	list_add_tail(&ireq->queue, &dev->open_queue);	dev->open_queue_depth++;	i2o_msg_post(c, m);	return 0;      context_remove:	i2o_cntxt_list_remove(c, req);      nop_msg:	i2o_msg_nop(c, m);      exit:	return rc;};/** *	i2o_block_request_fn - request queue handling function *	q: request queue from which the request could be fetched * *	Takes the next request from the queue, transfers it and if no error *	occurs dequeue it from the queue. On arrival of the reply the message *	will be processed further. If an error occurs requeue the request. */static void i2o_block_request_fn(struct request_queue *q){	struct request *req;	while (!blk_queue_plugged(q)) {		req = elv_next_request(q);		if (!req)			break;		if (blk_fs_request(req)) {			struct i2o_block_delayed_request *dreq;			struct i2o_block_request *ireq = req->special;			unsigned int queue_depth;			queue_depth = ireq->i2o_blk_dev->open_queue_depth;			if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {				if (!i2o_block_transfer(req)) {					blkdev_dequeue_request(req);					continue;				} else					osm_info("transfer error\n");			}			if (queue_depth)				break;			/* stop the queue and retry later */			dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);			if (!dreq)				continue;			dreq->queue = q;			INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,				  dreq);			if (!queue_delayed_work(i2o_block_driver.event_queue,						&dreq->work,						I2O_BLOCK_RETRY_TIME))				kfree(dreq);			else {				blk_stop_queue(q);				break;			}		} else			end_request(req, 0);	}};/* I2O Block device operations definition */static struct block_device_operations i2o_block_fops = {	.owner = THIS_MODULE,	.open = i2o_block_open,	.release = i2o_block_release,	.ioctl = i2o_block_ioctl,	.media_changed = i2o_block_media_changed};/** *	i2o_block_device_alloc - Allocate memory for a I2O Block device * *	Allocate memory for the i2o_block_device struct, gendisk and request *	queue and initialize them as far as no additional information is needed. * *	Returns a pointer to the allocated I2O Block device on succes or a *	negative error code on failure. */static struct i2o_block_device *i2o_block_device_alloc(void){	struct i2o_block_device *dev;	struct gendisk *gd;	struct request_queue *queue;	int rc;	dev = kmalloc(sizeof(*dev), GFP_KERNEL);	if (!dev) {		osm_err("Insufficient memory to allocate I2O Block disk.\n");		rc = -ENOMEM;		goto exit;	}	memset(dev, 0, sizeof(*dev));	INIT_LIST_HEAD(&dev->open_queue);	spin_lock_init(&dev->lock);	dev->rcache = CACHE_PREFETCH;	dev->wcache = CACHE_WRITEBACK;	/* allocate a gendisk with 16 partitions */	gd = alloc_disk(16);	if (!gd) {		osm_err("Insufficient memory to allocate gendisk.\n");		rc = -ENOMEM;		goto cleanup_dev;	}	/* initialize the request queue */	queue = blk_init_queue(i2o_block_request_fn, &dev->lock);	if (!queue) {		osm_err("Insufficient memory to allocate request queue.\n");		rc = -ENOMEM;		goto cleanup_queue;	}	blk_queue_prep_rq(queue, i2o_block_prep_req_fn);	blk_queue_issue_flush_fn(queue, i2o_block_issue_flush);	gd->major = I2O_MAJOR;	gd->queue = queue;	gd->fops = &i2o_block_fops;	gd->private_data = dev;	dev->gd = gd;	return dev;      cleanup_queue:	put_disk(gd);      cleanup_dev:	kfree(dev);      exit:	return ERR_PTR(rc);};/** *	i2o_block_probe - verify if dev is a I2O Block device and install it *	@dev: device to verify if it is a I2O Block device * *	We only verify if the user_tid of the device is 0xfff and then install *	the device. Otherwise it is used by some other device (e. g. RAID). * *	Returns 0 on success or negative error code on failure. */static int i2o_block_probe(struct device *dev){	struct i2o_device *i2o_dev = to_i2o_device(dev);	struct i2o_controller *c = i2o_dev->iop;	struct i2o_block_device *i2o_blk_dev;	struct gendisk *gd;	struct request_queue *queue;	static int unit = 0;	int rc;	u64 size;	u32 blocksize;	u32 flags, status;	u16 body_size = 4;	unsigned short max_sectors;#ifdef CONFIG_I2O_EXT_ADAPTEC	if (c->adaptec)		body_size = 8;#endif	if (c->limit_sectors)		max_sectors = I2O_MAX_SECTORS_LIMITED;	else		max_sectors = I2O_MAX_SECTORS;	/* skip devices which are used by IOP */	if (i2o_dev->lct_data.user_tid != 0xfff) {		osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);		return -ENODEV;	}	if (i2o_device_claim(i2o_dev)) {		osm_warn("Unable to claim device. Installation aborted\n");		rc = -EFAULT;		goto exit;	}	i2o_blk_dev = i2o_block_device_alloc();	if (IS_ERR(i2o_blk_dev)) {		osm_err("could not alloc a new I2O block device");		rc = PTR_ERR(i2o_blk_dev);		goto claim_release;	}	i2o_blk_dev->i2o_dev = i2o_dev;	dev_set_drvdata(dev, i2o_blk_dev);	/* setup gendisk */	gd = i2o_blk_dev->gd;	gd->first_minor = unit << 4;	sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);	sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit);	gd->driverfs_dev = &i2o_dev->device;	/* setup request queue */	queue = gd->queue;	queue->queuedata = i2o_blk_dev;	blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS);	blk_queue_max_sectors(queue, max_sectors);	blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size));	osm_debug("max sectors = %d\n", queue->max_phys_segments);	osm_debug("phys segments = %d\n", queue->max_sectors);	osm_debug("max hw segments = %d\n", queue->max_hw_segments);	/*	 *      Ask for the current media data. If that isn't supported	 *      then we ask for the device capacity data	 */	if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||	    i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {		blk_queue_hardsect_size(queue, blocksize);	} else		osm_warn("unable to get blocksize of %s\n", gd->disk_name);	if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||	    i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {		set_capacity(gd, size >> KERNEL_SECTOR_SHIFT);	} else		osm_warn("could not get size of %s\n", gd->disk_name);	if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2))		i2o_blk_dev->power = 0;	i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);	i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);	i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);	add_disk(gd);	unit++;	osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid,		 i2o_blk_dev->gd->disk_name);	return 0;      claim_release:	i2o_device_claim_release(i2o_dev);      exit:	return rc;};/* Block OSM driver struct */static struct i2o_driver i2o_block_driver = {	.name = OSM_NAME,	.event = i2o_block_event,	.reply = i2o_block_reply,	.classes = i2o_block_class_id,	.driver = {		   .probe = i2o_block_probe,		   .remove = i2o_block_remove,		   },};/** *	i2o_block_init - Block OSM initialization function * *	Allocate the slab and mempool for request structs, registers i2o_block *	block device and finally register the Block OSM in the I2O core. * *	Returns 0 on success or negative error code on failure. */static int __init i2o_block_init(void){	int rc;	int size;	printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");	/* Allocate request mempool and slab */	size = sizeof(struct i2o_block_request);	i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,						  SLAB_HWCACHE_ALIGN, NULL,						  NULL);	if (!i2o_blk_req_pool.slab) {		osm_err("can't init request slab\n");		rc = -ENOMEM;		goto exit;	}	i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE,					       mempool_alloc_slab,					       mempool_free_slab,					       i2o_blk_req_pool.slab);	if (!i2o_blk_req_pool.pool) {		osm_err("can't init request mempool\n");		rc = -ENOMEM;		goto free_slab;	}	/* Register the block device interfaces */	rc = register_blkdev(I2O_MAJOR, "i2o_block");	if (rc) {		osm_err("unable to register block device\n");		goto free_mempool;	}#ifdef MODULE	osm_info("registered device at major %d\n", I2O_MAJOR);#endif	/* Register Block OSM into I2O core */	rc = i2o_driver_register(&i2o_block_driver);	if (rc) {		osm_err("Could not register Block driver\n");		goto unregister_blkdev;	}	return 0;      unregister_blkdev:	unregister_blkdev(I2O_MAJOR, "i2o_block");      free_mempool:	mempool_destroy(i2o_blk_req_pool.pool);      free_slab:	kmem_cache_destroy(i2o_blk_req_pool.slab);      exit:	return rc;};/** *	i2o_block_exit - Block OSM exit function * *	Unregisters Block OSM from I2O core, unregisters i2o_block block device *	and frees the mempool and slab. */static void __exit i2o_block_exit(void){	/* Unregister I2O Block OSM from I2O core */	i2o_driver_unregister(&i2o_block_driver);	/* Unregister block device */	unregister_blkdev(I2O_MAJOR, "i2o_block");	/* Free request mempool and slab */	mempool_destroy(i2o_blk_req_pool.pool);	kmem_cache_destroy(i2o_blk_req_pool.slab);};MODULE_AUTHOR("Red Hat");MODULE_LICENSE("GPL");MODULE_DESCRIPTION(OSM_DESCRIPTION);MODULE_VERSION(OSM_VERSION);module_init(i2o_block_init);module_exit(i2o_block_exit);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -