⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 i2o_block.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 4 页
字号:
	struct i2ob_request *ireq = NULL;	u8 st;	u32 *m = (u32 *)msg;	u8 unit = (m[2]>>8)&0xF0;	/* low 4 bits are partition */	struct i2ob_device *dev = &i2ob_dev[(unit&0xF0)];	/*	 *	Pull the lock over ready	 */		 	spin_lock_prefetch(&io_request_lock);			/*	 * FAILed message	 */	if(m[0] & (1<<13))	{		DEBUG("FAIL");		/*		 * FAILed message from controller		 * We increment the error count and abort it		 *		 * In theory this will never happen.  The I2O block class		 * speficiation states that block devices never return		 * FAILs but instead use the REQ status field...but		 * better be on the safe side since no one really follows		 * the spec to the book :)		 */		ireq=&i2ob_queues[c->unit]->request_queue[m[3]];		ireq->req->errors++;		spin_lock_irqsave(&io_request_lock, flags);		i2ob_unhook_request(ireq, c->unit);		i2ob_end_request(ireq->req);		spin_unlock_irqrestore(&io_request_lock, flags);			/* Now flush the message by making it a NOP */		m[0]&=0x00FFFFFF;		m[0]|=(I2O_CMD_UTIL_NOP)<<24;		i2o_post_message(c,virt_to_bus(m));		return;	}	if(msg->function == I2O_CMD_UTIL_EVT_REGISTER)	{		spin_lock(&i2ob_evt_lock);		memcpy(evt_msg, msg, (m[0]>>16)<<2);		spin_unlock(&i2ob_evt_lock);		up(&i2ob_evt_sem);		return;	}	if(msg->function == I2O_CMD_BLOCK_CFLUSH)	{		spin_lock_irqsave(&io_request_lock, flags);		dev->constipated=0;		DEBUG("unconstipated\n");		if(i2ob_backlog_request(c, dev)==0)			i2ob_request(dev->req_queue);		spin_unlock_irqrestore(&io_request_lock, flags);		return;	}	if(!dev->i2odev)	{		/*		 * This is HACK, but Intel Integrated RAID allows user		 * to delete a volume that is claimed, locked, and in use 		 * by the OS. We have to check for a reply from a		 * non-existent device and flag it as an error or the system 		 * goes kaput...		 */		ireq=&i2ob_queues[c->unit]->request_queue[m[3]];		ireq->req->errors++;		printk(KERN_WARNING "I2O Block: Data transfer to deleted device!\n");		spin_lock_irqsave(&io_request_lock, flags);		i2ob_unhook_request(ireq, c->unit);		i2ob_end_request(ireq->req);		spin_unlock_irqrestore(&io_request_lock, flags);		return;	}		/*	 *	Lets see what is cooking. We stuffed the	 *	request in the context.	 */		 	ireq=&i2ob_queues[c->unit]->request_queue[m[3]];	st=m[4]>>24;	if(st!=0)	{		int err;		char *bsa_errors[] = 		{ 			"Success", 			"Media Error", 			"Failure communicating to device",			"Device Failure",			"Device is not ready",			"Media not present",			"Media is locked by another user",			"Media has failed",			"Failure communicating to device",			"Device bus failure",			"Device is locked by another user",			"Device is write protected",			"Device has reset",			"Volume has changed, waiting for acknowledgement"		};						err = m[4]&0xFFFF;				/*		 *	Device not ready means two things. One is that the		 *	the thing went offline (but not a removal media)		 *		 *	The second is that you have a SuperTrak 100 and the		 *	firmware got constipated. Unlike standard i2o card		 *	setups the supertrak returns an error rather than		 *	blocking for the timeout in these cases.		 */		 				spin_lock_irqsave(&io_request_lock, flags);		if(err==4)		{			/*			 *	Time to uncork stuff			 */						if(!dev->constipated)			{				dev->constipated = 1;				DEBUG(("constipated\n"));				/* Now pull the chain */				if(i2ob_flush(c, dev, unit)<0)				{					DEBUG(("i2ob: Unable to queue flush. Retrying I/O immediately.\n"));					dev->constipated=0;				}				DEBUG(("flushing\n"));			}						/*			 *	Recycle the request			 */			 //			i2ob_unhook_request(ireq, c->unit);						/*			 *	Place it on the recycle queue			 */			 			ireq->next = NULL;			if(i2ob_backlog_tail[c->unit]!=NULL)				i2ob_backlog_tail[c->unit]->next = ireq;			else				i2ob_backlog[c->unit] = ireq;						i2ob_backlog_tail[c->unit] = ireq;						atomic_dec(&i2ob_queues[c->unit]->queue_depth);			/*			 *	If the constipator flush failed we want to			 *	poke the queue again. 			 */			 			i2ob_request(dev->req_queue);			spin_unlock_irqrestore(&io_request_lock, flags);						/*			 *	and out			 */			 			return;			}		spin_unlock_irqrestore(&io_request_lock, flags);		printk(KERN_ERR "\n/dev/%s error: %s", dev->i2odev->dev_name, 			bsa_errors[m[4]&0XFFFF]);		if(m[4]&0x00FF0000)			printk(" - DDM attempted %d retries", (m[4]>>16)&0x00FF );		printk(".\n");		ireq->req->errors++;		}	else		ireq->req->errors = 0;	/*	 *	Dequeue the request. We use irqsave locks as one day we	 *	may be running polled controllers from a BH...	 */		spin_lock_irqsave(&io_request_lock, flags);	i2ob_unhook_request(ireq, c->unit);	i2ob_end_request(ireq->req);	atomic_dec(&i2ob_queues[c->unit]->queue_depth);	/*	 *	We may be able to do more I/O	 */	if(i2ob_backlog_request(c, dev)==0)		i2ob_request(dev->req_queue);	spin_unlock_irqrestore(&io_request_lock, flags);}/*  * Event handler.  Needs to be a separate thread b/c we may have * to do things like scan a partition table, or query parameters * which cannot be done from an interrupt or from a bottom half. */static int i2ob_evt(void *dummy){	unsigned int evt;	unsigned long flags;	int unit;	int i;	//The only event that has data is the SCSI_SMART event.	struct i2o_reply {		u32 header[4];		u32 evt_indicator;		u8 ASC;		u8 ASCQ;		u8 data[16];		} *evt_local;	lock_kernel();	daemonize();	unlock_kernel();	strcpy(current->comm, "i2oblock");	evt_running = 1;	while(1)	{		if(down_interruptible(&i2ob_evt_sem))		{			evt_running = 0;			printk("exiting...");			break;		}		/*		 * Keep another CPU/interrupt from overwriting the 		 * message while we're reading it		 *		 * We stuffed the unit in the TxContext and grab the event mask		 * None of the BSA we care about events have EventData		 */		spin_lock_irqsave(&i2ob_evt_lock, flags);		evt_local = (struct i2o_reply *)evt_msg;		spin_unlock_irqrestore(&i2ob_evt_lock, flags);		unit = evt_local->header[3];		evt = evt_local->evt_indicator;		switch(evt)		{			/*			 * New volume loaded on same TID, so we just re-install.			 * The TID/controller don't change as it is the same			 * I2O device.  It's just new media that we have to			 * rescan.			 */			case I2O_EVT_IND_BSA_VOLUME_LOAD:			{				i2ob_install_device(i2ob_dev[unit].i2odev->controller, 					i2ob_dev[unit].i2odev, unit);				break;			}			/*			 * No media, so set all parameters to 0 and set the media			 * change flag. The I2O device is still valid, just doesn't			 * have media, so we don't want to clear the controller or			 * device pointer.			 */			case I2O_EVT_IND_BSA_VOLUME_UNLOAD:			{				for(i = unit; i <= unit+15; i++)				{					i2ob_sizes[i] = 0;					i2ob_hardsizes[i] = 0;					i2ob_max_sectors[i] = 0;					i2ob[i].nr_sects = 0;					i2ob_gendisk.part[i].nr_sects = 0;				}				i2ob_media_change_flag[unit] = 1;				break;			}			case I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ:				printk(KERN_WARNING "%s: Attempt to eject locked media\n", 					i2ob_dev[unit].i2odev->dev_name);				break;			/*			 * The capacity has changed and we are going to be			 * updating the max_sectors and other information 			 * about this disk.  We try a revalidate first. If			 * the block device is in use, we don't want to			 * do that as there may be I/Os bound for the disk			 * at the moment.  In that case we read the size 			 * from the device and update the information ourselves			 * and the user can later force a partition table			 * update through an ioctl.			 */			case I2O_EVT_IND_BSA_CAPACITY_CHANGE:			{				u64 size;				if(do_i2ob_revalidate(MKDEV(MAJOR_NR, unit),0) != -EBUSY)					continue;	  			if(i2ob_query_device(&i2ob_dev[unit], 0x0004, 0, &size, 8) !=0 )					i2ob_query_device(&i2ob_dev[unit], 0x0000, 4, &size, 8);				spin_lock_irqsave(&io_request_lock, flags);					i2ob_sizes[unit] = (int)(size>>10);				i2ob_gendisk.part[unit].nr_sects = size>>9;				i2ob[unit].nr_sects = (int)(size>>9);				spin_unlock_irqrestore(&io_request_lock, flags);					break;			}			/* 			 * We got a SCSI SMART event, we just log the relevant			 * information and let the user decide what they want			 * to do with the information.			 */			case I2O_EVT_IND_BSA_SCSI_SMART:			{				char buf[16];				printk(KERN_INFO "I2O Block: %s received a SCSI SMART Event\n",i2ob_dev[unit].i2odev->dev_name);				evt_local->data[16]='\0';				sprintf(buf,"%s",&evt_local->data[0]);				printk(KERN_INFO "      Disk Serial#:%s\n",buf);				printk(KERN_INFO "      ASC 0x%02x \n",evt_local->ASC);				printk(KERN_INFO "      ASCQ 0x%02x \n",evt_local->ASCQ);				break;			}					/*			 *	Non event			 */			 			case 0:				break;							/*			 * An event we didn't ask for.  Call the card manufacturer			 * and tell them to fix their firmware :)			 */			default:				printk(KERN_INFO "%s: Received event 0x%X we didn't register for\n"					KERN_INFO "   Blame the I2O card manufacturer 8)\n", 					i2ob_dev[unit].i2odev->dev_name, evt);				break;		}	};	complete_and_exit(&i2ob_thread_dead,0);	return 0;}/* * The timer handler will attempt to restart requests  * that are queued to the driver.  This handler * currently only gets called if the controller * had no more room in its inbound fifo.   */static void i2ob_timer_handler(unsigned long q){	unsigned long flags;	/*	 * We cannot touch the request queue or the timer         * flag without holding the io_request_lock.	 */	spin_lock_irqsave(&io_request_lock,flags);	/* 	 * Clear the timer started flag so that 	 * the timer can be queued again.	 */	i2ob_timer_started = 0;	/* 	 * Restart any requests.	 */	i2ob_request((request_queue_t*)q);	/* 	 * Free the lock.	 */	spin_unlock_irqrestore(&io_request_lock,flags);}static int i2ob_backlog_request(struct i2o_controller *c, struct i2ob_device *dev){	u32 m;	struct i2ob_request *ireq;		while((ireq=i2ob_backlog[c->unit])!=NULL)	{		int unit;		if(atomic_read(&i2ob_queues[c->unit]->queue_depth) > dev->depth/4)			break;		m = i2ob_get(dev);		if(m == 0xFFFFFFFF)			break;		i2ob_backlog[c->unit] = ireq->next;		if(i2ob_backlog[c->unit] == NULL)			i2ob_backlog_tail[c->unit] = NULL;					unit = MINOR(ireq->req->rq_dev);		i2ob_send(m, dev, ireq, i2ob[unit].start_sect, unit);	}	if(i2ob_backlog[c->unit])		return 1;	return 0;}/* *	The I2O block driver is listed as one of those that pulls the *	front entry off the queue before processing it. This is important *	to remember here. If we drop the io lock then CURRENT will change *	on us. We must unlink CURRENT in this routine before we return, if *	we use it. */static void i2ob_request(request_queue_t *q){	struct request *req;	struct i2ob_request *ireq;	int unit;	struct i2ob_device *dev;	u32 m;			while (!list_empty(&q->queue_head)) {		/*		 *	On an IRQ completion if there is an inactive		 *	request on the queue head it means it isnt yet		 *	ready to dispatch.		 */		req = blkdev_entry_next_request(&q->queue_head);		if(req->rq_status == RQ_INACTIVE)			return;					unit = MINOR(req->rq_dev);		dev = &i2ob_dev[(unit&0xF0)];		/* 		 *	Queue depths probably belong with some kind of 		 *	generic IOP commit control. Certainly its not right 		 *	its global!  		 */		if(atomic_read(&i2ob_queues[dev->unit]->queue_depth) >= dev->depth)			break;				/*		 *	Is the channel constipated ?		 */		if(i2ob_backlog[dev->unit]!=NULL)			break;					/* Get a message */		m = i2ob_get(dev);		if(m==0xFFFFFFFF)		{			/* 			 * See if the timer has already been queued.			 */			if (!i2ob_timer_started)			{				DEBUG((KERN_ERR "i2ob: starting timer\n"));				/*				 * Set the timer_started flag to insure				 * that the timer is only queued once.				 * Queing it more than once will corrupt				 * the timer queue.				 */				i2ob_timer_started = 1;				/* 				 * Set up the timer to expire in				 * 500ms.				 */				i2ob_timer.expires = jiffies + (HZ >> 1);				i2ob_timer.data = (unsigned int)q;				/*				 * Start it.				 */				 				add_timer(&i2ob_timer);				return;			}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -