⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 i2o_block.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 4 页
字号:
	*hds  = (unsigned char) heads; }/* *	Rescan the partition tables */ static int do_i2ob_revalidate(kdev_t dev, int maxu){	int minor=MINOR(dev);	int i;		minor&=0xF0;	i2ob_dev[minor].refcnt++;	if(i2ob_dev[minor].refcnt>maxu+1)	{		i2ob_dev[minor].refcnt--;		return -EBUSY;	}		for( i = 15; i>=0 ; i--)	{		int m = minor+i;		invalidate_device(MKDEV(MAJOR_NR, m), 1);		i2ob_gendisk.part[m].start_sect = 0;		i2ob_gendisk.part[m].nr_sects = 0;	}	/*	 *	Do a physical check and then reconfigure	 */	 	i2ob_install_device(i2ob_dev[minor].controller, i2ob_dev[minor].i2odev,		minor);	i2ob_dev[minor].refcnt--;	return 0;}/* *	Issue device specific ioctl calls. */static int i2ob_ioctl(struct inode *inode, struct file *file,		     unsigned int cmd, unsigned long arg){	struct i2ob_device *dev;	int minor;	/* Anyone capable of this syscall can do *real bad* things */	if (!capable(CAP_SYS_ADMIN))		return -EPERM;	if (!inode)		return -EINVAL;	minor = MINOR(inode->i_rdev);	if (minor >= (MAX_I2OB<<4))		return -ENODEV;	dev = &i2ob_dev[minor];	switch (cmd) {		case HDIO_GETGEO:		{			struct hd_geometry g;			int u=minor&0xF0;			i2o_block_biosparam(i2ob_sizes[u]<<1, 				&g.cylinders, &g.heads, &g.sectors);			g.start = i2ob[minor].start_sect;			return copy_to_user((void *)arg,&g, sizeof(g))?-EFAULT:0;		}				case BLKI2OGRSTRAT:			return put_user(dev->rcache, (int *)arg);		case BLKI2OGWSTRAT:			return put_user(dev->wcache, (int *)arg);		case BLKI2OSRSTRAT:			if(arg<0||arg>CACHE_SMARTFETCH)				return -EINVAL;			dev->rcache = arg;			break;		case BLKI2OSWSTRAT:			if(arg!=0 && (arg<CACHE_WRITETHROUGH || arg>CACHE_SMARTBACK))				return -EINVAL;			dev->wcache = arg;			break;			case BLKRRPART:			if(!capable(CAP_SYS_ADMIN))				return -EACCES;			return do_i2ob_revalidate(inode->i_rdev,1);					default:			return blk_ioctl(inode->i_rdev, cmd, arg);	}	return 0;}/* *	Close the block device down */ static int i2ob_release(struct inode *inode, struct file *file){	struct i2ob_device *dev;	int minor;	minor = MINOR(inode->i_rdev);	if (minor >= (MAX_I2OB<<4))		return -ENODEV;	dev = &i2ob_dev[(minor&0xF0)];	/*	 * This is to deail with the case of an application	 * opening a device and then the device dissapears while	 * it's in use, and then the application tries to release	 * it.  ex: Unmounting a deleted RAID volume at reboot. 	 * If we send messages, it will just cause FAILs since	 * the TID no longer exists.	 */	if(!dev->i2odev)		return 0;	if (dev->refcnt <= 0)		printk(KERN_ALERT "i2ob_release: refcount(%d) <= 0\n", dev->refcnt);	dev->refcnt--;	if(dev->refcnt==0)	{		/*		 *	Flush the onboard cache on unmount		 */		u32 msg[5];		int *query_done = &dev->done_flag;		msg[0] = (FIVE_WORD_MSG_SIZE|SGL_OFFSET_0);		msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid;		msg[2] = i2ob_context|0x40000000;		msg[3] = (u32)query_done;		msg[4] = 60<<16;		DEBUG("Flushing...");		i2o_post_wait(dev->controller, msg, 20, 60);		/*		 *	Unlock the media		 */		msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;		msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid;		msg[2] = i2ob_context|0x40000000;		msg[3] = (u32)query_done;		msg[4] = -1;		DEBUG("Unlocking...");		i2o_post_wait(dev->controller, msg, 20, 2);		DEBUG("Unlocked.\n");		msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;		msg[1] = I2O_CMD_BLOCK_POWER<<24 | HOST_TID << 12 | dev->tid;		if(dev->flags & (1<<3|1<<4))	/* Removable */			msg[4] = 0x21 << 24;		else			msg[4] = 0x24 << 24;		if(i2o_post_wait(dev->controller, msg, 20, 60)==0)			dev->power = 0x24;		/* 		 * Now unclaim the device.		 */		if (i2o_release_device(dev->i2odev, &i2o_block_handler))			printk(KERN_ERR "i2ob_release: controller rejected unclaim.\n");				DEBUG("Unclaim\n");	}	return 0;}/* *	Open the block device. */ static int i2ob_open(struct inode *inode, struct file *file){	int minor;	struct i2ob_device *dev;		if (!inode)		return -EINVAL;	minor = MINOR(inode->i_rdev);	if (minor >= MAX_I2OB<<4)		return -ENODEV;	dev=&i2ob_dev[(minor&0xF0)];	if(!dev->i2odev)			return -ENODEV;		if(dev->refcnt++==0)	{ 		u32 msg[6];				DEBUG("Claim ");		if(i2o_claim_device(dev->i2odev, &i2o_block_handler))		{			dev->refcnt--;			printk(KERN_INFO "I2O Block: Could not open device\n");			return -EBUSY;		}		DEBUG("Claimed ");		/*	 	 *	Power up if needed	 	 */		if(dev->power > 0x1f)		{			msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;			msg[1] = I2O_CMD_BLOCK_POWER<<24 | HOST_TID << 12 | dev->tid;			msg[4] = 0x02 << 24;			if(i2o_post_wait(dev->controller, msg, 20, 60) == 0)				dev->power = 0x02;		}		/*		 *	Mount the media if needed. Note that we don't use		 *	the lock bit. Since we have to issue a lock if it		 *	refuses a mount (quite possible) then we might as		 *	well just send two messages out.		 */		msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;				msg[1] = I2O_CMD_BLOCK_MMOUNT<<24|HOST_TID<<12|dev->tid;		msg[4] = -1;		msg[5] = 0;		DEBUG("Mount ");		i2o_post_wait(dev->controller, msg, 24, 2);		/*		 *	Lock the media		 */		msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;		msg[1] = I2O_CMD_BLOCK_MLOCK<<24|HOST_TID<<12|dev->tid;		msg[4] = -1;		DEBUG("Lock ");		i2o_post_wait(dev->controller, msg, 20, 2);		DEBUG("Ready.\n");	}			return 0;}/* *	Issue a device query */ static int i2ob_query_device(struct i2ob_device *dev, int table, 	int field, void *buf, int buflen){	return i2o_query_scalar(dev->controller, dev->tid,		table, field, buf, buflen);}/* *	Install the I2O block device we found. */ static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, int unit){	u64 size;	u32 blocksize;	u8 type;	u16 power;	u32 flags, status;	struct i2ob_device *dev=&i2ob_dev[unit];	int i;	/*	 * For logging purposes...	 */	printk(KERN_INFO "i2ob: Installing tid %d device at unit %d\n", 			d->lct_data.tid, unit);		/*	 *	Ask for the current media data. If that isn't supported	 *	then we ask for the device capacity data	 */	if(i2ob_query_device(dev, 0x0004, 1, &blocksize, 4) != 0	  || i2ob_query_device(dev, 0x0004, 0, &size, 8) !=0 )	{		i2ob_query_device(dev, 0x0000, 3, &blocksize, 4);		i2ob_query_device(dev, 0x0000, 4, &size, 8);	}		if(i2ob_query_device(dev, 0x0000, 2, &power, 2)!=0)		power = 0;	i2ob_query_device(dev, 0x0000, 5, &flags, 4);	i2ob_query_device(dev, 0x0000, 6, &status, 4);	i2ob_sizes[unit] = (int)(size>>10);	for(i=unit; i <= unit+15 ; i++)		i2ob_hardsizes[i] = blocksize;	i2ob_gendisk.part[unit].nr_sects = size>>9;	i2ob[unit].nr_sects = (int)(size>>9);	/*	 * Max number of Scatter-Gather Elements	 */		i2ob_dev[unit].power = power;	/* Save power state in device proper */	i2ob_dev[unit].flags = flags;	for(i=unit;i<=unit+15;i++)	{		i2ob_dev[i].power = power;	/* Save power state */		i2ob_dev[unit].flags = flags;	/* Keep the type info */		i2ob_max_sectors[i] = 96;	/* 256 might be nicer but many controllers 						   explode on 65536 or higher */		i2ob_dev[i].max_segments = (d->controller->status_block->inbound_frame_size - 7) / 2;				i2ob_dev[i].rcache = CACHE_SMARTFETCH;		i2ob_dev[i].wcache = CACHE_WRITETHROUGH;				if(d->controller->battery == 0)			i2ob_dev[i].wcache = CACHE_WRITETHROUGH;		if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.promise)			i2ob_dev[i].wcache = CACHE_WRITETHROUGH;		if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.short_req)		{			i2ob_max_sectors[i] = 8;			i2ob_dev[i].max_segments = 8;		}	}	sprintf(d->dev_name, "%s%c", i2ob_gendisk.major_name, 'a' + (unit>>4));	printk(KERN_INFO "%s: Max segments %d, queue depth %d, byte limit %d.\n",		 d->dev_name, i2ob_dev[unit].max_segments, i2ob_dev[unit].depth, i2ob_max_sectors[unit]<<9);	i2ob_query_device(dev, 0x0000, 0, &type, 1);	printk(KERN_INFO "%s: ", d->dev_name);	switch(type)	{		case 0: printk("Disk Storage");break;		case 4: printk("WORM");break;		case 5: printk("CD-ROM");break;		case 7:	printk("Optical device");break;		default:			printk("Type %d", type);	}	if(status&(1<<10))		printk("(RAID)");	if((flags^status)&(1<<4|1<<3))	/* Missing media or device */	{		printk(KERN_INFO " Not loaded.\n");		/* Device missing ? */		if((flags^status)&(1<<4))			return 1;	}	else	{		printk(": %dMB, %d byte sectors",			(int)(size>>20), blocksize);	}	if(status&(1<<0))	{		u32 cachesize;		i2ob_query_device(dev, 0x0003, 0, &cachesize, 4);		cachesize>>=10;		if(cachesize>4095)			printk(", %dMb cache", cachesize>>10);		else			printk(", %dKb cache", cachesize);	}	printk(".\n");	printk(KERN_INFO "%s: Maximum sectors/read set to %d.\n", 		d->dev_name, i2ob_max_sectors[unit]);	/* 	 * If this is the first I2O block device found on this IOP,	 * we need to initialize all the queue data structures	 * before any I/O can be performed. If it fails, this	 * device is useless.	 */	if(!i2ob_queues[c->unit]) {		if(i2ob_init_iop(c->unit))			return 1;	}	/* 	 * This will save one level of lookup/indirection in critical 	 * code so that we can directly get the queue ptr from the	 * device instead of having to go the IOP data structure.	 */	dev->req_queue = &i2ob_queues[c->unit]->req_queue;	grok_partitions(&i2ob_gendisk, unit>>4, 1<<4, (long)(size>>9));	/*	 * Register for the events we're interested in and that the	 * device actually supports.	 */	i2o_event_register(c, d->lct_data.tid, i2ob_context, unit, 		(I2OB_EVENT_MASK & d->lct_data.event_capabilities));	return 0;}/* * Initialize IOP specific queue structures.  This is called * once for each IOP that has a block device sitting behind it. */static int i2ob_init_iop(unsigned int unit){	int i;	i2ob_queues[unit] = (struct i2ob_iop_queue *) kmalloc(sizeof(struct i2ob_iop_queue), GFP_ATOMIC);	if(!i2ob_queues[unit])	{		printk(KERN_WARNING "Could not allocate request queue for I2O block device!\n");		return -1;	}	for(i = 0; i< MAX_I2OB_DEPTH; i++)	{		i2ob_queues[unit]->request_queue[i].next =  &i2ob_queues[unit]->request_queue[i+1];		i2ob_queues[unit]->request_queue[i].num = i;	}		/* Queue is MAX_I2OB + 1... */	i2ob_queues[unit]->request_queue[i].next = NULL;	i2ob_queues[unit]->i2ob_qhead = &i2ob_queues[unit]->request_queue[0];	atomic_set(&i2ob_queues[unit]->queue_depth, 0);	blk_init_queue(&i2ob_queues[unit]->req_queue, i2ob_request);	blk_queue_headactive(&i2ob_queues[unit]->req_queue, 0);	i2ob_queues[unit]->req_queue.back_merge_fn = i2ob_back_merge;	i2ob_queues[unit]->req_queue.front_merge_fn = i2ob_front_merge;	i2ob_queues[unit]->req_queue.merge_requests_fn = i2ob_merge_requests;	i2ob_queues[unit]->req_queue.queuedata = &i2ob_queues[unit];	return 0;}/* * Get the request queue for the given device. */	static request_queue_t* i2ob_get_queue(kdev_t dev){	int unit = MINOR(dev)&0xF0;	return i2ob_dev[unit].req_queue;}/* * Probe the I2O subsytem for block class devices */static void i2ob_scan(int bios){	int i;	int warned = 0;	struct i2o_device *d, *b=NULL;	struct i2o_controller *c;	struct i2ob_device *dev;			for(i=0; i< MAX_I2O_CONTROLLERS; i++)	{		c=i2o_find_controller(i);			if(c==NULL)			continue;		/*		 *    The device list connected to the I2O Controller is doubly linked		 * Here we traverse the end of the list , and start claiming devices		 * from that end. This assures that within an I2O controller atleast		 * the newly created volumes get claimed after the older ones, thus		 * mapping to same major/minor (and hence device file name) after 		 * every reboot.		 * The exception being: 		 * 1. If there was a TID reuse.		 * 2. There was more than one I2O controller. 		 */		if(!bios)		{			for (d=c->devices;d!=NULL;d=d->next)			if(d->next == NULL)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -