i2o_scsi.c

来自「优龙2410linux2.6.8内核源代码」· C语言 代码 · 共 1,048 行 · 第 1/2 页

C
1,048
字号
/*  * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU * General Public License for more details. * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. * *  Complications for I2O scsi * *	o	Each (bus,lun) is a logical device in I2O. We keep a map *		table. We spoof failed selection for unmapped units *	o	Request sense buffers can come back for free.  *	o	Scatter gather is a bit dynamic. We have to investigate at *		setup time. *	o	Some of our resources are dynamically shared. The i2o core *		needs a message reservation protocol to avoid swap v net *		deadlocking. We need to back off queue requests. *	 *	In general the firmware wants to help. Where its help isn't performance *	useful we just ignore the aid. Its not worth the code in truth. * * Fixes/additions: *	Steve Ralston: *		Scatter gather now works *	Markus Lidel <Markus.Lidel@shadowconnect.com>: *		Minor fixes for 2.6. * * To Do: *	64bit cleanups *	Fix the resource management problems. */#include <linux/module.h>#include <linux/kernel.h>#include <linux/types.h>#include <linux/string.h>#include <linux/ioport.h>#include <linux/jiffies.h>#include <linux/interrupt.h>#include <linux/timer.h>#include <linux/delay.h>#include <linux/proc_fs.h>#include <linux/prefetch.h>#include <linux/pci.h>#include <asm/dma.h>#include <asm/system.h>#include <asm/io.h>#include <asm/atomic.h>#include <linux/blkdev.h>#include <linux/i2o.h>#include <scsi/scsi.h>#include <scsi/scsi_cmnd.h>#include <scsi/scsi_device.h>#include <scsi/scsi_host.h>#define VERSION_STRING        "Version 0.1.2"//#define DRIVERDEBUG#ifdef DRIVERDEBUG#define dprintk(s, args...) printk(s, ## args)#else#define dprintk(s, args...)#endif#define I2O_SCSI_CAN_QUEUE	4#define MAXHOSTS		32struct i2o_scsi_host{	struct i2o_controller *controller;	s16 task[16][8];		/* Allow 16 devices for now */	unsigned long tagclock[16][8];	/* Tag clock for queueing */	s16 bus_task;		/* The adapter TID */};static int scsi_context;static int lun_done;static int i2o_scsi_hosts;static u32 *retry[32];static struct i2o_controller *retry_ctrl[32];static struct timer_list retry_timer;static spinlock_t retry_lock = SPIN_LOCK_UNLOCKED;static int retry_ct = 0;static atomic_t queue_depth;/* *	SG Chain buffer support... */#define SG_MAX_FRAGS		64/* *	FIXME: we should allocate one of these per bus we find as we *	locate them not in a lump at boot. */ typedef struct _chain_buf{	u32 sg_flags_cnt[SG_MAX_FRAGS];	u32 sg_buf[SG_MAX_FRAGS];} chain_buf;#define SG_CHAIN_BUF_SZ sizeof(chain_buf)#define SG_MAX_BUFS		(i2o_num_controllers * I2O_SCSI_CAN_QUEUE)#define SG_CHAIN_POOL_SZ	(SG_MAX_BUFS * SG_CHAIN_BUF_SZ)static int max_sg_len = 0;static chain_buf *sg_chain_pool = NULL;static int sg_chain_tag = 0;static int sg_max_frags = SG_MAX_FRAGS;/** *	i2o_retry_run		-	retry on timeout *	@f: unused * *	Retry congested frames. This actually needs pushing down into *	i2o core. We should only bother the OSM with this when we can't *	queue and retry the frame. Or perhaps we should call the OSM *	and its default handler should be this in the core, and this *	call a 2nd "I give up" handler in the OSM ? */ static void i2o_retry_run(unsigned long f){	int i;	unsigned long flags;		spin_lock_irqsave(&retry_lock, flags);	for(i=0;i<retry_ct;i++)		i2o_post_message(retry_ctrl[i], virt_to_bus(retry[i]));	retry_ct=0;	spin_unlock_irqrestore(&retry_lock, flags);}/** *	flush_pending		-	empty the retry queue * *	Turn each of the pending commands into a NOP and post it back *	to the controller to clear it. */ static void flush_pending(void){	int i;	unsigned long flags;		spin_lock_irqsave(&retry_lock, flags);	for(i=0;i<retry_ct;i++)	{		retry[i][0]&=~0xFFFFFF;		retry[i][0]|=I2O_CMD_UTIL_NOP<<24;		i2o_post_message(retry_ctrl[i],virt_to_bus(retry[i]));	}	retry_ct=0;	spin_unlock_irqrestore(&retry_lock, flags);}/** *	i2o_scsi_reply		-	scsi message reply processor *	@h: our i2o handler *	@c: controller issuing the reply *	@msg: the message from the controller (mapped) * *	Process reply messages (interrupts in normal scsi controller think). *	We can get a variety of messages to process. The normal path is *	scsi command completions. We must also deal with IOP failures, *	the reply to a bus reset and the reply to a LUN query. * *	Locks: the queue lock is taken to call the completion handler */static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg){	struct scsi_cmnd *current_command;	spinlock_t *lock;	u32 *m = (u32 *)msg;	u8 as,ds,st;	unsigned long flags;	if(m[0] & (1<<13))	{		printk("IOP fail.\n");		printk("From %d To %d Cmd %d.\n",			(m[1]>>12)&0xFFF,			m[1]&0xFFF,			m[1]>>24);		printk("Failure Code %d.\n", m[4]>>24);		if(m[4]&(1<<16))			printk("Format error.\n");		if(m[4]&(1<<17))			printk("Path error.\n");		if(m[4]&(1<<18))			printk("Path State.\n");		if(m[4]&(1<<18))			printk("Congestion.\n");				m=(u32 *)bus_to_virt(m[7]);		printk("Failing message is %p.\n", m);				/* This isnt a fast path .. */		spin_lock_irqsave(&retry_lock, flags);				if((m[4]&(1<<18)) && retry_ct < 32)		{			retry_ctrl[retry_ct]=c;			retry[retry_ct]=m;			if(!retry_ct++)			{				retry_timer.expires=jiffies+1;				add_timer(&retry_timer);			}			spin_unlock_irqrestore(&retry_lock, flags);		}		else		{			spin_unlock_irqrestore(&retry_lock, flags);			/* Create a scsi error for this */			current_command = (struct scsi_cmnd *)i2o_context_list_get(m[3], c);			if(!current_command)				return;			lock = current_command->device->host->host_lock;			printk("Aborted %ld\n", current_command->serial_number);			spin_lock_irqsave(lock, flags);			current_command->result = DID_ERROR << 16;			current_command->scsi_done(current_command);			spin_unlock_irqrestore(lock, flags);						/* Now flush the message by making it a NOP */			m[0]&=0x00FFFFFF;			m[0]|=(I2O_CMD_UTIL_NOP)<<24;			i2o_post_message(c,virt_to_bus(m));		}		return;	}		prefetchw(&queue_depth);				/*	 *	Low byte is device status, next is adapter status,	 *	(then one byte reserved), then request status.	 */	ds=(u8)le32_to_cpu(m[4]);	as=(u8)le32_to_cpu(m[4]>>8);	st=(u8)le32_to_cpu(m[4]>>24);		dprintk(KERN_INFO "i2o got a scsi reply %08X: ", m[0]);	dprintk(KERN_INFO "m[2]=%08X: ", m[2]);	dprintk(KERN_INFO "m[4]=%08X\n", m[4]); 	if(m[2]&0x80000000)	{		if(m[2]&0x40000000)		{			dprintk(KERN_INFO "Event.\n");			lun_done=1;			return;		}		printk(KERN_INFO "i2o_scsi: bus reset completed.\n");		return;	}	current_command = (struct scsi_cmnd *)i2o_context_list_get(m[3], c);		/*	 *	Is this a control request coming back - eg an abort ?	 */	 	atomic_dec(&queue_depth);	if(current_command==NULL)	{		if(st)			dprintk(KERN_WARNING "SCSI abort: %08X", m[4]);		dprintk(KERN_INFO "SCSI abort completed.\n");		return;	}		dprintk(KERN_INFO "Completed %ld\n", current_command->serial_number);		if(st == 0x06)	{		if(le32_to_cpu(m[5]) < current_command->underflow)		{			int i;			printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X\n",				le32_to_cpu(m[5]), current_command->underflow);			printk("Cmd: ");			for(i=0;i<15;i++)				printk("%02X ", current_command->cmnd[i]);			printk(".\n");		}		else st=0;	}		if(st)	{		/* An error has occurred */		dprintk(KERN_WARNING "SCSI error %08X", m[4]);					if (as == 0x0E) 			/* SCSI Reset */			current_command->result = DID_RESET << 16;		else if (as == 0x0F)			current_command->result = DID_PARITY << 16;		else			current_command->result = DID_ERROR << 16;	}	else		/*		 *	It worked maybe ?		 */				current_command->result = DID_OK << 16 | ds;	if (current_command->use_sg) {		pci_unmap_sg(c->pdev,			(struct scatterlist *)current_command->buffer,			current_command->use_sg,			current_command->sc_data_direction);	} else if (current_command->request_bufflen) {		pci_unmap_single(c->pdev,			(dma_addr_t)((long)current_command->SCp.ptr),			current_command->request_bufflen,			current_command->sc_data_direction);	}	lock = current_command->device->host->host_lock;	spin_lock_irqsave(lock, flags);	current_command->scsi_done(current_command);	spin_unlock_irqrestore(lock, flags);	return;}struct i2o_handler i2o_scsi_handler = {	.reply	= i2o_scsi_reply,	.name	= "I2O SCSI OSM",	.class	= I2O_CLASS_SCSI_PERIPHERAL,};/** *	i2o_find_lun		-	report the lun of an i2o device *	@c: i2o controller owning the device *	@d: i2o disk device *	@target: filled in with target id *	@lun: filled in with target lun * *	Query an I2O device to find out its SCSI lun and target numbering. We *	don't currently handle some of the fancy SCSI-3 stuff although our *	querying is sufficient to do so. */ static int i2o_find_lun(struct i2o_controller *c, struct i2o_device *d, int *target, int *lun){	u8 reply[8];		if(i2o_query_scalar(c, d->lct_data.tid, 0, 3, reply, 4)<0)		return -1;			*target=reply[0];		if(i2o_query_scalar(c, d->lct_data.tid, 0, 4, reply, 8)<0)		return -1;	*lun=reply[1];	dprintk(KERN_INFO "SCSI (%d,%d)\n", *target, *lun);	return 0;}/** *	i2o_scsi_init		-	initialize an i2o device for scsi *	@c: i2o controller owning the device *	@d: scsi controller *	@shpnt: scsi device we wish it to become * *	Enumerate the scsi peripheral/fibre channel peripheral class *	devices that are children of the controller. From that we build *	a translation map for the command queue code. Since I2O works on *	its own tid's we effectively have to think backwards to get what *	the midlayer wants */ static void i2o_scsi_init(struct i2o_controller *c, struct i2o_device *d, struct Scsi_Host *shpnt){	struct i2o_device *unit;	struct i2o_scsi_host *h =(struct i2o_scsi_host *)shpnt->hostdata;	int lun;	int target;		h->controller=c;	h->bus_task=d->lct_data.tid;		for(target=0;target<16;target++)		for(lun=0;lun<8;lun++)			h->task[target][lun] = -1;				for(unit=c->devices;unit!=NULL;unit=unit->next)	{		dprintk(KERN_INFO "Class %03X, parent %d, want %d.\n",			unit->lct_data.class_id, unit->lct_data.parent_tid, d->lct_data.tid);					/* Only look at scsi and fc devices */		if (    (unit->lct_data.class_id != I2O_CLASS_SCSI_PERIPHERAL)		     && (unit->lct_data.class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL)		   )			continue;		/* On our bus ? */		dprintk(KERN_INFO "Found a disk (%d).\n", unit->lct_data.tid);		if ((unit->lct_data.parent_tid == d->lct_data.tid)		     || (unit->lct_data.parent_tid == d->lct_data.parent_tid)		   )		{			u16 limit;			dprintk(KERN_INFO "Its ours.\n");			if(i2o_find_lun(c, unit, &target, &lun)==-1)			{				printk(KERN_ERR "i2o_scsi: Unable to get lun for tid %d.\n", unit->lct_data.tid);				continue;			}			dprintk(KERN_INFO "Found disk %d %d.\n", target, lun);			h->task[target][lun]=unit->lct_data.tid;			h->tagclock[target][lun]=jiffies;			/* Get the max fragments/request */			i2o_query_scalar(c, d->lct_data.tid, 0xF103, 3, &limit, 2);						/* sanity */			if ( limit == 0 )			{				printk(KERN_WARNING "i2o_scsi: Ignoring unreasonable SG limit of 0 from IOP!\n");				limit = 1;			}						shpnt->sg_tablesize = limit;			dprintk(KERN_INFO "i2o_scsi: set scatter-gather to %d.\n",				shpnt->sg_tablesize);		}	}		}/** *	i2o_scsi_detect		-	probe for I2O scsi devices *	@tpnt: scsi layer template * *	I2O is a little odd here. The I2O core already knows what the *	devices are. It also knows them by disk and tape as well as *	by controller. We register each I2O scsi class object as a *	scsi controller and then let the enumeration fake up the rest */ static int i2o_scsi_detect(struct scsi_host_template * tpnt){	struct Scsi_Host *shpnt = NULL;	int i;	int count;	printk(KERN_INFO "i2o_scsi.c: %s\n", VERSION_STRING);	if(i2o_install_handler(&i2o_scsi_handler)<0)	{		printk(KERN_ERR "i2o_scsi: Unable to install OSM handler.\n");		return 0;	}	scsi_context = i2o_scsi_handler.context;		if((sg_chain_pool = kmalloc(SG_CHAIN_POOL_SZ, GFP_KERNEL)) == NULL)	{		printk(KERN_INFO "i2o_scsi: Unable to alloc %d byte SG chain buffer pool.\n", SG_CHAIN_POOL_SZ);		printk(KERN_INFO "i2o_scsi: SG chaining DISABLED!\n");		sg_max_frags = 11;	}	else	{		printk(KERN_INFO "  chain_pool: %d bytes @ %p\n", SG_CHAIN_POOL_SZ, sg_chain_pool);		printk(KERN_INFO "  (%d byte buffers X %d can_queue X %d i2o controllers)\n",				SG_CHAIN_BUF_SZ, I2O_SCSI_CAN_QUEUE, i2o_num_controllers);		sg_max_frags = SG_MAX_FRAGS;    // 64	}		init_timer(&retry_timer);	retry_timer.data = 0UL;	retry_timer.function = i2o_retry_run;	//	printk("SCSI OSM at %d.\n", scsi_context);	for (count = 0, i = 0; i < MAX_I2O_CONTROLLERS; i++)	{		struct i2o_controller *c=i2o_find_controller(i);		struct i2o_device *d;		/*		 *	This controller doesn't exist.		 */				if(c==NULL)			continue;					/*		 *	Fixme - we need some altered device locking. This		 *	is racing with device addition in theory. Easy to fix.		 */				for(d=c->devices;d!=NULL;d=d->next)

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?