⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 i2o_block.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	strcpy(current->comm, "i2oblock");	evt_running = 1;	while(1)	{#warning "RACE"		interruptible_sleep_on(&i2ob_evt_wait); 		if(signal_pending(current)) {			evt_running = 0;			return 0;		}		printk(KERN_INFO "Doing something in i2o_block event thread\n");				/*		 * Keep another CPU/interrupt from overwriting the 		 * message while we're reading it		 *		 * We stuffed the unit in the TxContext and grab the event mask		 * None of the BSA we care about events have EventData		 */		spin_lock_irqsave(&i2ob_evt_lock, flags);		unit = evt_msg[3];		evt = evt_msg[4];		spin_unlock_irqrestore(&i2ob_evt_lock, flags);		switch(evt)		{			/*			 * New volume loaded on same TID, so we just re-install.			 * The TID/controller don't change as it is the same			 * I2O device.  It's just new media that we have to			 * rescan.			 */			case I2O_EVT_IND_BSA_VOLUME_LOAD:			{				i2ob_install_device(i2ob_dev[unit].i2odev->controller, 					i2ob_dev[unit].i2odev, unit);				break;			}			/*			 * No media, so set all parameters to 0 and set the media			 * change flag. The I2O device is still valid, just doesn't			 * have media, so we don't want to clear the controller or			 * device pointer.			 */			case I2O_EVT_IND_BSA_VOLUME_UNLOAD:			{				for(i = unit; i <= unit+15; i++)				{					i2ob_sizes[i] = 0;					i2ob_hardsizes[i] = 0;					i2ob_max_sectors[i] = 0;					i2ob[i].nr_sects = 0;					i2ob_gendisk.part[i].nr_sects = 0;				}				i2ob_media_change_flag[unit] = 1;				break;			}			case I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ:				printk(KERN_WARNING "%s: Attempt to eject locked media\n", 					i2ob_dev[unit].i2odev->dev_name);				break;			/*			 * The capacity has changed and we are going to be			 * updating the max_sectors and other information 			 * about this disk.  We try a revalidate first. If			 * the block device is in use, we don't want to			 * do that as there may be I/Os bound for the disk			 * at the moment.  In that case we read the size 			 * from the device and update the information ourselves			 * and the user can later force a partition table			 * update through an ioctl.			 */			case I2O_EVT_IND_BSA_CAPACITY_CHANGE:			{				u64 size;				if(do_i2ob_revalidate(MKDEV(MAJOR_NR, unit),0) != -EBUSY)					continue;	  			if(i2ob_query_device(&i2ob_dev[unit], 0x0004, 0, &size, 8) !=0 )					i2ob_query_device(&i2ob_dev[unit], 0x0000, 4, &size, 8);				spin_lock_irqsave(&io_request_lock, flags);					i2ob_sizes[unit] = (int)(size>>10);				i2ob_gendisk.part[unit].nr_sects = size>>9;				i2ob[unit].nr_sects = (int)(size>>9);				spin_unlock_irqrestore(&io_request_lock, flags);					break;			}			/*			 * An event we didn't ask for.  Call the card manufacturer			 * and tell them to fix their firmware :)			 */			default:				printk(KERN_INFO "%s: Received event we didn't register for\n"					KERN_INFO "   Call I2O card manufacturer\n", 					i2ob_dev[unit].i2odev->dev_name);				break;		}	};	return 0;}/* * The timer handler will attempt to restart requests  * that are queued to the driver.  This handler * currently only gets called if the controller * had no more room in its inbound fifo.   */static void i2ob_timer_handler(unsigned long q){	unsigned long flags;	/*	 * We cannot touch the request queue or the timer         * flag without holding the io_request_lock.	 */	spin_lock_irqsave(&io_request_lock,flags);	/* 	 * Clear the timer started flag so that 	 * the timer can be queued again.	 */	i2ob_timer_started = 0;	/* 	 * Restart any requests.	 */	i2ob_request((request_queue_t*)q);	/* 	 * Free the lock.	 */	spin_unlock_irqrestore(&io_request_lock,flags);}/* *	The I2O block driver is listed as one of those that pulls the *	front entry off the queue before processing it. This is important *	to remember here. If we drop the io lock then CURRENT will change *	on us. We must unlink CURRENT in this routine before we return, if *	we use it. */static void i2ob_request(request_queue_t *q){	struct request *req;	struct i2ob_request *ireq;	int unit;	struct i2ob_device *dev;	u32 m;	// printk(KERN_INFO "i2ob_request() called with queue %p\n", q);	while (!list_empty(&q->queue_head)) {		/*		 *	On an IRQ completion if there is an inactive		 *	request on the queue head it means it isnt yet		 *	ready to dispatch.		 */		req = blkdev_entry_next_request(&q->queue_head);		if(req->rq_status == RQ_INACTIVE)			return;					unit = MINOR(req->rq_dev);		dev = &i2ob_dev[(unit&0xF0)];		/* 		 *	Queue depths probably belong with some kind of 		 *	generic IOP commit control. Certainly its not right 		 *	its global!  		 */		if(atomic_read(&i2ob_queues[dev->unit]->queue_depth)>=MAX_I2OB_DEPTH)			break;		/* Get a message */		m = i2ob_get(dev);		if(m==0xFFFFFFFF)		{			/* 			 * See if the timer has already been queued.			 */			if (!i2ob_timer_started)			{				printk(KERN_ERR "i2ob: starting timer\n");				/*				 * Set the timer_started flag to insure				 * that the timer is only queued once.				 * Queing it more than once will corrupt				 * the timer queue.				 */				i2ob_timer_started = 1;				/* 				 * Set up the timer to expire in				 * 500ms.				 */				i2ob_timer.expires = jiffies + (HZ >> 1);				i2ob_timer.data = (unsigned int)q;				/*				 * Start it.				 */				 				add_timer(&i2ob_timer);			}		}		/*		 * Everything ok, so pull from kernel queue onto our queue		 */		req->errors = 0;		blkdev_dequeue_request(req);			req->sem = NULL;		ireq = i2ob_queues[dev->unit]->i2ob_qhead;		i2ob_queues[dev->unit]->i2ob_qhead = ireq->next;		ireq->req = req;		i2ob_send(m, dev, ireq, i2ob[unit].start_sect, (unit&0xF0));	}}/* *	SCSI-CAM for ioctl geometry mapping *	Duplicated with SCSI - this should be moved into somewhere common *	perhaps genhd ? * * LBA -> CHS mapping table taken from: * * "Incorporating the I2O Architecture into BIOS for Intel Architecture  *  Platforms"  * * This is an I2O document that is only available to I2O members, * not developers. * * From my understanding, this is how all the I2O cards do this * * Disk Size      | Sectors | Heads | Cylinders * ---------------+---------+-------+------------------- * 1 < X <= 528M  | 63      | 16    | X/(63 * 16 * 512) * 528M < X <= 1G | 63      | 32    | X/(63 * 32 * 512) * 1 < X <528M    | 63      | 16    | X/(63 * 16 * 512) * 1 < X <528M    | 63      | 16    | X/(63 * 16 * 512) * */#define	BLOCK_SIZE_528M		1081344#define	BLOCK_SIZE_1G		2097152#define	BLOCK_SIZE_21G		4403200#define	BLOCK_SIZE_42G		8806400#define	BLOCK_SIZE_84G		17612800static void i2o_block_biosparam(	unsigned long capacity,	unsigned short *cyls,	unsigned char *hds,	unsigned char *secs) { 	unsigned long heads, sectors, cylinders; 	sectors = 63L;      			/* Maximize sectors per track */ 	if(capacity <= BLOCK_SIZE_528M)		heads = 16;	else if(capacity <= BLOCK_SIZE_1G)		heads = 32;	else if(capacity <= BLOCK_SIZE_21G)		heads = 64;	else if(capacity <= BLOCK_SIZE_42G)		heads = 128;	else		heads = 255;	cylinders = capacity / (heads * sectors);	*cyls = (unsigned short) cylinders;	/* Stuff return values */ 	*secs = (unsigned char) sectors; 	*hds  = (unsigned char) heads; }/* *	Rescan the partition tables */ static int do_i2ob_revalidate(kdev_t dev, int maxu){	int minor=MINOR(dev);	int i;		minor&=0xF0;	i2ob_dev[minor].refcnt++;	if(i2ob_dev[minor].refcnt>maxu+1)	{		i2ob_dev[minor].refcnt--;		return -EBUSY;	}		for( i = 15; i>=0 ; i--)	{		int m = minor+i;		kdev_t d = MKDEV(MAJOR_NR, m);		struct super_block *sb = get_super(d);				sync_dev(d);		if(sb)			invalidate_inodes(sb);		invalidate_buffers(d);		i2ob_gendisk.part[m].start_sect = 0;		i2ob_gendisk.part[m].nr_sects = 0;	}	/*	 *	Do a physical check and then reconfigure	 */	 	i2ob_install_device(i2ob_dev[minor].controller, i2ob_dev[minor].i2odev,		minor);	i2ob_dev[minor].refcnt--;	return 0;}/* *	Issue device specific ioctl calls. */static int i2ob_ioctl(struct inode *inode, struct file *file,		     unsigned int cmd, unsigned long arg){	struct i2ob_device *dev;	int minor;	/* Anyone capable of this syscall can do *real bad* things */	if (!capable(CAP_SYS_ADMIN))		return -EPERM;	if (!inode)		return -EINVAL;	minor = MINOR(inode->i_rdev);	if (minor >= (MAX_I2OB<<4))		return -ENODEV;	dev = &i2ob_dev[minor];	switch (cmd) {		case BLKGETSIZE:			return put_user(i2ob[minor].nr_sects, (long *) arg);		case HDIO_GETGEO:		{			struct hd_geometry g;			int u=minor&0xF0;			i2o_block_biosparam(i2ob_sizes[u]<<1, 				&g.cylinders, &g.heads, &g.sectors);			g.start = i2ob[minor].start_sect;			return copy_to_user((void *)arg,&g, sizeof(g))?-EFAULT:0;		}			case BLKRRPART:			if(!capable(CAP_SYS_ADMIN))				return -EACCES;			return do_i2ob_revalidate(inode->i_rdev,1);					case BLKFLSBUF:		case BLKROSET:		case BLKROGET:		case BLKRASET:		case BLKRAGET:		case BLKPG:			return blk_ioctl(inode->i_rdev, cmd, arg);					default:			return -EINVAL;	}}/* *	Close the block device down */ static int i2ob_release(struct inode *inode, struct file *file){	struct i2ob_device *dev;	int minor;	minor = MINOR(inode->i_rdev);	if (minor >= (MAX_I2OB<<4))		return -ENODEV;	dev = &i2ob_dev[(minor&0xF0)];	/*	 * This is to deail with the case of an application	 * opening a device and then the device dissapears while	 * it's in use, and then the application tries to release	 * it.  ex: Unmounting a deleted RAID volume at reboot. 	 * If we send messages, it will just cause FAILs since	 * the TID no longer exists.	 */	if(!dev->i2odev)		return 0;	/* Sync the device so we don't get errors */	fsync_dev(inode->i_rdev);	if (dev->refcnt <= 0)		printk(KERN_ALERT "i2ob_release: refcount(%d) <= 0\n", dev->refcnt);	dev->refcnt--;	if(dev->refcnt==0)	{		/*		 *	Flush the onboard cache on unmount		 */		u32 msg[5];		int *query_done = &dev->done_flag;		msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;		msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid;		msg[2] = i2ob_context|0x40000000;		msg[3] = (u32)query_done;		msg[4] = 60<<16;		i2o_post_wait(dev->controller, msg, 20, 2);		/*		 *	Unlock the media		 */		msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;		msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid;		msg[2] = i2ob_context|0x40000000;		msg[3] = (u32)query_done;		msg[4] = -1;		i2o_post_wait(dev->controller, msg, 20, 2);			/* 		 * Now unclaim the device.		 */		if (i2o_release_device(dev->i2odev, &i2o_block_handler))			printk(KERN_ERR "i2ob_release: controller rejected unclaim.\n");	}	MOD_DEC_USE_COUNT;	return 0;}/* *	Open the block device. */ static int i2ob_open(struct inode *inode, struct file *file){	int minor;	struct i2ob_device *dev;		if (!inode)		return -EINVAL;	minor = MINOR(inode->i_rdev);	if (minor >= MAX_I2OB<<4)		return -ENODEV;	dev=&i2ob_dev[(minor&0xF0)];	if(!dev->i2odev)			return -ENODEV;		if(dev->refcnt++==0)	{ 		u32 msg[6];				if(i2o_claim_device(dev->i2odev, &i2o_block_handler))		{			dev->refcnt--;			printk(KERN_INFO "I2O Block: Could not open device\n");			return -EBUSY;		}				/*		 *	Mount the media if needed. Note that we don't use		 *	the lock bit. Since we have to issue a lock if it		 *	refuses a mount (quite possible) then we might as		 *	well just send two messages out.		 */		msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;				msg[1] = I2O_CMD_BLOCK_MMOUNT<<24|HOST_TID<<12|dev->tid;		msg[4] = -1;		msg[5] = 0;		i2o_post_wait(dev->controller, msg, 24, 2);		/*		 *	Lock the media		 */		msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;		msg[1] = I2O_CMD_BLOCK_MLOCK<<24|HOST_TID<<12|dev->tid;		msg[4] = -1;		i2o_post_wait(dev->controller, msg, 20, 2);	}			MOD_INC_USE_COUNT;	return 0;}/* *	Issue a device query */ static int i2ob_query_device(struct i2ob_device *dev, int table, 	int field, void *buf, int buflen){	return i2o_query_scalar(dev->controller, dev->tid,		table, field, buf, buflen);}/* *	Install the I2O block device we found. */ static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, int unit){	u64 size;	u32 blocksize;	u32 limit;	u8 type;	u32 flags, status;	struct i2ob_device *dev=&i2ob_dev[unit];	int i;	/*	 * For logging purposes...	 */	printk(KERN_INFO "i2ob: Installing tid %d device at unit %d\n", 			d->lct_data.tid, unit);		/*	 *	Ask for the current media data. If that isn't supported	 *	then we ask for the device capacity data	 */	if(i2ob_query_device(dev, 0x0004, 1, &blocksize, 4) != 0	  || i2ob_query_device(dev, 0x0004, 0, &size, 8) !=0 )	{		i2ob_query_device(dev, 0x0000, 3, &blocksize, 4);		i2ob_query_device(dev, 0x0000, 4, &size, 8);	}		i2ob_query_device(dev, 0x0000, 5, &flags, 4);	i2ob_query_device(dev, 0x0000, 6, &status, 4);	i2ob_sizes[unit] = (int)(size>>10);	i2ob_hardsizes[unit] = blocksize;	i2ob_gendisk.part[unit].nr_sects = size>>9;	i2ob[unit].nr_sects = (int)(size>>9);	/* Set limit based on inbound frame size */	limit = (d->controller->status_block->inbound_frame_size - 8)/2;	limit = limit<<9;	/*	 * Max number of Scatter-Gather Elements	 */		i2ob_dev[unit].max_segments = 		(d->controller->status_block->inbound_frame_size - 8)/2;	printk(KERN_INFO "Max Segments set to %d\n", 				i2ob_dev[unit].max_segments);	printk(KERN_INFO "Byte limit is %d.\n", limit);		for(i=unit;i<=unit+15;i++)	{		i2ob_max_sectors[i]=MAX_SECTORS;		i2ob_dev[i].max_segments = 			(d->controller->status_block->inbound_frame_size - 8)/2;	}	i2ob_query_device(dev, 0x0000, 0, &type, 1);		sprintf(d->dev_name, "%s%c", i2ob_gendisk.major_name, 'a' + (unit>>4));	printk(KERN_INFO "%s: ", d->dev_name);	switch(type)	{		case 0: printk("Disk Storage");break;		case 4: printk("WORM");break;		case 5: printk("CD-ROM");break;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -