sym_glue.c

来自「优龙2410linux2.6.8内核源代码」· C语言 代码 · 共 2,552 行 · 第 1/5 页

C
2,552
字号
/* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family  * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000  Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been  * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by *         Wolfgang Stanglmeier        <wolf@cologne.de> *         Stefan Esser                <se@mi.Uni-Koeln.de> * Copyright (C) 1994  Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> * *----------------------------------------------------------------------------- * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products *    derived from this software without specific prior written permission. * * Where this Software is combined with software released under the terms of  * the GNU Public License ("GPL") and the terms of the GPL would require the  * combined work to also be released under the terms of the GPL, the terms * and conditions of this License will apply in addition to those of the * GPL with the exception of any terms or conditions of this License that * conflict with, or are expressly prohibited by, the GPL. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */#define SYM_GLUE_C#include <linux/ctype.h>#include <linux/init.h>#include <linux/interrupt.h>#include <linux/module.h>#include <linux/spinlock.h>#include <scsi/scsi.h>#include <scsi/scsi_tcq.h>#include <scsi/scsi_device.h>#include <scsi/scsi_transport.h>#include <scsi/scsi_transport_spi.h>#include "sym_glue.h"#include "sym_nvram.h"#define NAME53C		"sym53c"#define NAME53C8XX	"sym53c8xx"static int __devinitpci_get_base_address(struct pci_dev *pdev, int index, u_long *base){	u32 tmp;#define PCI_BAR_OFFSET(index) (PCI_BASE_ADDRESS_0 + (index<<2))	pci_read_config_dword(pdev, PCI_BAR_OFFSET(index), &tmp);	*base = tmp;	++index;	if ((tmp & 0x7) == PCI_BASE_ADDRESS_MEM_TYPE_64) {#if BITS_PER_LONG > 32		pci_read_config_dword(pdev, PCI_BAR_OFFSET(index), &tmp);		*base |= (((u_long)tmp) << 32);#endif		++index;	}	return index;#undef PCI_BAR_OFFSET}/* This lock protects only the memory allocation/free.  */spinlock_t sym53c8xx_lock = SPIN_LOCK_UNLOCKED;static struct scsi_transport_template *sym2_transport_template = NULL;/* *  Wrappers to the generic memory allocator. */void *sym_calloc(int size, char *name){	unsigned long flags;	void *m;	spin_lock_irqsave(&sym53c8xx_lock, flags);	m = sym_calloc_unlocked(size, name);	spin_unlock_irqrestore(&sym53c8xx_lock, flags);	return m;}void sym_mfree(void *m, int size, char *name){	unsigned long flags;	spin_lock_irqsave(&sym53c8xx_lock, flags);	sym_mfree_unlocked(m, size, name);	spin_unlock_irqrestore(&sym53c8xx_lock, flags);}void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name){	unsigned long flags;	void *m;	spin_lock_irqsave(&sym53c8xx_lock, flags);	m = __sym_calloc_dma_unlocked(dev_dmat, size, name);	spin_unlock_irqrestore(&sym53c8xx_lock, flags);	return m;}void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name){	unsigned long flags;	spin_lock_irqsave(&sym53c8xx_lock, flags);	__sym_mfree_dma_unlocked(dev_dmat, m, size, name);	spin_unlock_irqrestore(&sym53c8xx_lock, flags);}m_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m){	unsigned long flags;	m_addr_t b;	spin_lock_irqsave(&sym53c8xx_lock, flags);	b = __vtobus_unlocked(dev_dmat, m);	spin_unlock_irqrestore(&sym53c8xx_lock, flags);	return b;}/* *  Driver host data structure. */struct host_data {	struct sym_hcb *ncb;};/* *  Used by the eh thread to wait for command completion. *  It is allocated on the eh thread stack. */struct sym_eh_wait {	struct semaphore sem;	struct timer_list timer;	void (*old_done)(struct scsi_cmnd *);	int to_do;	int timed_out;};/* *  Driver private area in the SCSI command structure. */struct sym_ucmd {		/* Override the SCSI pointer structure */	SYM_QUEHEAD link_cmdq;	/* Must stay at offset ZERO */	dma_addr_t data_mapping;	u_char	data_mapped;	struct sym_eh_wait *eh_wait;};#define SYM_UCMD_PTR(cmd)  ((struct sym_ucmd *)(&(cmd)->SCp))#define SYM_SCMD_PTR(ucmd) sym_que_entry(ucmd, struct scsi_cmnd, SCp)#define SYM_SOFTC_PTR(cmd) (((struct host_data *)cmd->device->host->hostdata)->ncb)static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd){	int dma_dir = cmd->sc_data_direction;	switch(SYM_UCMD_PTR(cmd)->data_mapped) {	case 2:		pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);		break;	case 1:		pci_unmap_single(pdev, SYM_UCMD_PTR(cmd)->data_mapping,				 cmd->request_bufflen, dma_dir);		break;	}	SYM_UCMD_PTR(cmd)->data_mapped = 0;}static dma_addr_t __map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd){	dma_addr_t mapping;	int dma_dir = cmd->sc_data_direction;	mapping = pci_map_single(pdev, cmd->request_buffer,				 cmd->request_bufflen, dma_dir);	if (mapping) {		SYM_UCMD_PTR(cmd)->data_mapped  = 1;		SYM_UCMD_PTR(cmd)->data_mapping = mapping;	}	return mapping;}static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd){	int use_sg;	int dma_dir = cmd->sc_data_direction;	use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);	if (use_sg > 0) {		SYM_UCMD_PTR(cmd)->data_mapped  = 2;		SYM_UCMD_PTR(cmd)->data_mapping = use_sg;	}	return use_sg;}static void __sync_scsi_data_for_cpu(struct pci_dev *pdev, struct scsi_cmnd *cmd){	int dma_dir = cmd->sc_data_direction;	switch(SYM_UCMD_PTR(cmd)->data_mapped) {	case 2:		pci_dma_sync_sg_for_cpu(pdev, cmd->buffer, cmd->use_sg, dma_dir);		break;	case 1:		pci_dma_sync_single_for_cpu(pdev, SYM_UCMD_PTR(cmd)->data_mapping,					    cmd->request_bufflen, dma_dir);		break;	}}static void __sync_scsi_data_for_device(struct pci_dev *pdev, struct scsi_cmnd *cmd){	int dma_dir = cmd->sc_data_direction;	switch(SYM_UCMD_PTR(cmd)->data_mapped) {	case 2:		pci_dma_sync_sg_for_device(pdev, cmd->buffer, cmd->use_sg, dma_dir);		break;	case 1:		pci_dma_sync_single_for_device(pdev, SYM_UCMD_PTR(cmd)->data_mapping,					       cmd->request_bufflen, dma_dir);		break;	}}#define unmap_scsi_data(np, cmd)	\		__unmap_scsi_data(np->s.device, cmd)#define map_scsi_single_data(np, cmd)	\		__map_scsi_single_data(np->s.device, cmd)#define map_scsi_sg_data(np, cmd)	\		__map_scsi_sg_data(np->s.device, cmd)#define sync_scsi_data_for_cpu(np, cmd)		\		__sync_scsi_data_for_cpu(np->s.device, cmd)#define sync_scsi_data_for_device(np, cmd)		\		__sync_scsi_data_for_device(np->s.device, cmd)/* *  Complete a pending CAM CCB. */void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *ccb){	sym_remque(&SYM_UCMD_PTR(ccb)->link_cmdq);	unmap_scsi_data(np, ccb);	ccb->scsi_done(ccb);}void sym_xpt_done2(struct sym_hcb *np, struct scsi_cmnd *ccb, int cam_status){	sym_set_cam_status(ccb, cam_status);	sym_xpt_done(np, ccb);}/* *  Print something that identifies the IO. */void sym_print_addr(struct sym_ccb *cp){	struct scsi_cmnd *cmd = cp->cam_ccb;	if (cmd)		printf("%s:%d:%d:", sym_name(SYM_SOFTC_PTR(cmd)),				cmd->device->id, cmd->device->lun);}/* *  Tell the SCSI layer about a BUS RESET. */void sym_xpt_async_bus_reset(struct sym_hcb *np){	printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np));	np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ;	np->s.settle_time_valid = 1;	if (sym_verbose >= 2)		printf_info("%s: command processing suspended for %d seconds\n",			    sym_name(np), sym_driver_setup.settle_delay);}/* *  Tell the SCSI layer about a BUS DEVICE RESET message sent. */void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target){	printf_notice("%s: TARGET %d has been reset.\n", sym_name(np), target);}/* *  Tell the SCSI layer about the new transfer parameters. */void sym_xpt_async_nego_wide(struct sym_hcb *np, int target){	if (sym_verbose < 3)		return;	sym_announce_transfer_rate(np, target);}/* *  Choose the more appropriate CAM status if  *  the IO encountered an extended error. */static int sym_xerr_cam_status(int cam_status, int x_status){	if (x_status) {		if	(x_status & XE_PARITY_ERR)			cam_status = DID_PARITY;		else if	(x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))			cam_status = DID_ERROR;		else if	(x_status & XE_BAD_PHASE)			cam_status = DID_ERROR;		else			cam_status = DID_ERROR;	}	return cam_status;}/* *  Build CAM result for a failed or auto-sensed IO. */void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid){	struct scsi_cmnd *csio = cp->cam_ccb;	u_int cam_status, scsi_status, drv_status;	drv_status  = 0;	cam_status  = DID_OK;	scsi_status = cp->ssss_status;	if (cp->host_flags & HF_SENSE) {		scsi_status = cp->sv_scsi_status;		resid = cp->sv_resid;		if (sym_verbose && cp->sv_xerr_status)			sym_print_xerr(cp, cp->sv_xerr_status);		if (cp->host_status == HS_COMPLETE &&		    cp->ssss_status == S_GOOD &&		    cp->xerr_status == 0) {			cam_status = sym_xerr_cam_status(DID_OK,							 cp->sv_xerr_status);			drv_status = DRIVER_SENSE;			/*			 *  Bounce back the sense data to user.			 */			bzero(&csio->sense_buffer, sizeof(csio->sense_buffer));			memcpy(csio->sense_buffer, cp->sns_bbuf,			      min(sizeof(csio->sense_buffer),				  (size_t)SYM_SNS_BBUF_LEN));#if 0			/*			 *  If the device reports a UNIT ATTENTION condition 			 *  due to a RESET condition, we should consider all 			 *  disconnect CCBs for this unit as aborted.			 */			if (1) {				u_char *p;				p  = (u_char *) csio->sense_data;				if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)					sym_clear_tasks(np, DID_ABORT,							cp->target,cp->lun, -1);			}#endif		} else {			/*			 * Error return from our internal request sense.  This			 * is bad: we must clear the contingent allegiance			 * condition otherwise the device will always return			 * BUSY.  Use a big stick.			 */			sym_reset_scsi_target(np, csio->device->id);			cam_status = DID_ERROR;		}	} else if (cp->host_status == HS_COMPLETE) 	/* Bad SCSI status */		cam_status = DID_OK;	else if (cp->host_status == HS_SEL_TIMEOUT)	/* Selection timeout */		cam_status = DID_NO_CONNECT;	else if (cp->host_status == HS_UNEXPECTED)	/* Unexpected BUS FREE*/		cam_status = DID_ERROR;	else {						/* Extended error */		if (sym_verbose) {			PRINT_ADDR(cp);			printf ("COMMAND FAILED (%x %x %x).\n",				cp->host_status, cp->ssss_status,				cp->xerr_status);		}		/*		 *  Set the most appropriate value for CAM status.		 */		cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);	}	csio->resid = resid;	csio->result = (drv_status << 24) + (cam_status << 16) + scsi_status;}/* *  Called on successfull INQUIRY response. */void sym_sniff_inquiry(struct sym_hcb *np, struct scsi_cmnd *cmd, int resid){	int retv;	if (!cmd || cmd->use_sg)		return;	sync_scsi_data_for_cpu(np, cmd);	retv = __sym_sniff_inquiry(np, cmd->device->id, cmd->device->lun,				   (u_char *) cmd->request_buffer,				   cmd->request_bufflen - resid);	sync_scsi_data_for_device(np, cmd);	if (retv < 0)		return;	else if (retv)		sym_update_trans_settings(np, &np->target[cmd->device->id]);}/* *  Build the scatter/gather array for an I/O. */static int sym_scatter_no_sglist(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd){	struct sym_tblmove *data = &cp->phys.data[SYM_CONF_MAX_SG-1];	int segment;	cp->data_len = cmd->request_bufflen;	if (cmd->request_bufflen) {		dma_addr_t baddr = map_scsi_single_data(np, cmd);		if (baddr) {			sym_build_sge(np, data, baddr, cmd->request_bufflen);			segment = 1;		} else {			segment = -2;		}	} else {		segment = 0;	}	return segment;}static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd){	int segment;	int use_sg = (int) cmd->use_sg;	cp->data_len = 0;	if (!use_sg)		segment = sym_scatter_no_sglist(np, cp, cmd);	else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {		struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;		struct sym_tblmove *data;		if (use_sg > SYM_CONF_MAX_SG) {			unmap_scsi_data(np, cmd);			return -1;		}		data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];		for (segment = 0; segment < use_sg; segment++) {			dma_addr_t baddr = sg_dma_address(&scatter[segment]);			unsigned int len = sg_dma_len(&scatter[segment]);			sym_build_sge(np, &data[segment], baddr, len);			cp->data_len += len;		}	} else {		segment = -2;	}	return segment;}/* *  Queue a SCSI command. */static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *ccb){/*	struct scsi_device        *device    = ccb->device; */	struct sym_tcb *tp;	struct sym_lcb *lp;	struct sym_ccb *cp;	int	order;	/*

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?