⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 megaraid_mbox.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 5 页
字号:
			"megaraid: out of memory, %s %d\n", __FUNCTION__,			__LINE__));		goto out_free_common_mbox;	}	memset(adapter->ibuf, 0, MBOX_IBUF_SIZE);	// Allocate memory for our SCSI Command Blocks and their associated	// memory	/*	 * Allocate memory for the base list of scb. Later allocate memory for	 * CCBs and embedded components of each CCB and point the pointers in	 * scb to the allocated components	 * NOTE: The code to allocate SCB will be duplicated in all the LLD	 * since the calling routine does not yet know the number of available	 * commands.	 */	adapter->kscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_SCSI_CMDS,			GFP_KERNEL);	if (adapter->kscb_list == NULL) {		con_log(CL_ANN, (KERN_WARNING			"megaraid: out of memory, %s %d\n", __FUNCTION__,			__LINE__));		goto out_free_ibuf;	}	memset(adapter->kscb_list, 0, sizeof(scb_t) * MBOX_MAX_SCSI_CMDS);	// memory allocation for our command packets	if (megaraid_mbox_setup_dma_pools(adapter) != 0) {		con_log(CL_ANN, (KERN_WARNING			"megaraid: out of memory, %s %d\n", __FUNCTION__,			__LINE__));		goto out_free_scb_list;	}	// Adjust the scb pointers and link in the free pool	epthru_pci_blk	= raid_dev->epthru_pool;	sg_pci_blk	= raid_dev->sg_pool;	mbox_pci_blk	= raid_dev->mbox_pool;	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {		scb			= adapter->kscb_list + i;		ccb			= raid_dev->ccb_list + i;		ccb->mbox	= (mbox_t *)(mbox_pci_blk[i].vaddr + 16);		ccb->raw_mbox	= (uint8_t *)ccb->mbox;		ccb->mbox64	= (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);		ccb->mbox_dma_h	= (unsigned long)mbox_pci_blk[i].dma_addr + 16;		// make sure the mailbox is aligned properly		if (ccb->mbox_dma_h & 0x0F) {			con_log(CL_ANN, (KERN_CRIT				"megaraid mbox: not aligned on 16-bytes\n"));			goto out_teardown_dma_pools;		}		ccb->epthru		= (mraid_epassthru_t *)						epthru_pci_blk[i].vaddr;		ccb->epthru_dma_h	= epthru_pci_blk[i].dma_addr;		ccb->pthru		= (mraid_passthru_t *)ccb->epthru;		ccb->pthru_dma_h	= ccb->epthru_dma_h;		ccb->sgl64		= (mbox_sgl64 *)sg_pci_blk[i].vaddr;		ccb->sgl_dma_h		= sg_pci_blk[i].dma_addr;		ccb->sgl32		= (mbox_sgl32 *)ccb->sgl64;		scb->ccb		= (caddr_t)ccb;		scb->gp			= 0;		scb->sno		= i;	// command index		scb->scp		= NULL;		scb->state		= SCB_FREE;		scb->dma_direction	= PCI_DMA_NONE;		scb->dma_type		= MRAID_DMA_NONE;		scb->dev_channel	= -1;		scb->dev_target		= -1;		// put scb in the free pool		list_add_tail(&scb->list, &adapter->kscb_pool);	}	return 0;out_teardown_dma_pools:	megaraid_mbox_teardown_dma_pools(adapter);out_free_scb_list:	kfree(adapter->kscb_list);out_free_ibuf:	pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,		adapter->ibuf_dma_h);out_free_common_mbox:	pci_free_consistent(adapter->pdev, sizeof(mbox64_t),		(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);	return -1;}/** * megaraid_free_cmd_packets - free memory * @param adapter	: soft state of the raid controller * * Release memory resources allocated for commands */static voidmegaraid_free_cmd_packets(adapter_t *adapter){	mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);	megaraid_mbox_teardown_dma_pools(adapter);	kfree(adapter->kscb_list);	pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,		(void *)adapter->ibuf, adapter->ibuf_dma_h);	pci_free_consistent(adapter->pdev, sizeof(mbox64_t),		(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);	return;}/** * megaraid_mbox_setup_dma_pools - setup dma pool for command packets * @param adapter	: HBA soft state * * setup the dma pools for mailbox, passthru and extended passthru structures, * and scatter-gather lists */static intmegaraid_mbox_setup_dma_pools(adapter_t *adapter){	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);	struct mraid_pci_blk	*epthru_pci_blk;	struct mraid_pci_blk	*sg_pci_blk;	struct mraid_pci_blk	*mbox_pci_blk;	int			i;	// Allocate memory for 16-bytes aligned mailboxes	raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",						adapter->pdev,						sizeof(mbox64_t) + 16,						16, 0);	if (raid_dev->mbox_pool_handle == NULL) {		goto fail_setup_dma_pool;	}	mbox_pci_blk = raid_dev->mbox_pool;	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {		mbox_pci_blk[i].vaddr = pci_pool_alloc(						raid_dev->mbox_pool_handle,						GFP_KERNEL,						&mbox_pci_blk[i].dma_addr);		if (!mbox_pci_blk[i].vaddr) {			goto fail_setup_dma_pool;		}	}	/*	 * Allocate memory for each embedded passthru strucuture pointer	 * Request for a 128 bytes aligned structure for each passthru command	 * structure	 * Since passthru and extended passthru commands are exclusive, they	 * share common memory pool. Passthru structures piggyback on memory	 * allocted to extended passthru since passthru is smaller of the two	 */	raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",			adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);	if (raid_dev->epthru_pool_handle == NULL) {		goto fail_setup_dma_pool;	}	epthru_pci_blk = raid_dev->epthru_pool;	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {		epthru_pci_blk[i].vaddr = pci_pool_alloc(						raid_dev->epthru_pool_handle,						GFP_KERNEL,						&epthru_pci_blk[i].dma_addr);		if (!epthru_pci_blk[i].vaddr) {			goto fail_setup_dma_pool;		}	}	// Allocate memory for each scatter-gather list. Request for 512 bytes	// alignment for each sg list	raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",					adapter->pdev,					sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,					512, 0);	if (raid_dev->sg_pool_handle == NULL) {		goto fail_setup_dma_pool;	}	sg_pci_blk = raid_dev->sg_pool;	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {		sg_pci_blk[i].vaddr = pci_pool_alloc(						raid_dev->sg_pool_handle,						GFP_KERNEL,						&sg_pci_blk[i].dma_addr);		if (!sg_pci_blk[i].vaddr) {			goto fail_setup_dma_pool;		}	}	return 0;fail_setup_dma_pool:	megaraid_mbox_teardown_dma_pools(adapter);	return -1;}/** * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets * @param adapter	: HBA soft state * * teardown the dma pool for mailbox, passthru and extended passthru * structures, and scatter-gather lists */static voidmegaraid_mbox_teardown_dma_pools(adapter_t *adapter){	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);	struct mraid_pci_blk	*epthru_pci_blk;	struct mraid_pci_blk	*sg_pci_blk;	struct mraid_pci_blk	*mbox_pci_blk;	int			i;	sg_pci_blk = raid_dev->sg_pool;	for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {		pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,			sg_pci_blk[i].dma_addr);	}	if (raid_dev->sg_pool_handle)		pci_pool_destroy(raid_dev->sg_pool_handle);	epthru_pci_blk = raid_dev->epthru_pool;	for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {		pci_pool_free(raid_dev->epthru_pool_handle,			epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);	}	if (raid_dev->epthru_pool_handle)		pci_pool_destroy(raid_dev->epthru_pool_handle);	mbox_pci_blk = raid_dev->mbox_pool;	for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {		pci_pool_free(raid_dev->mbox_pool_handle,			mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);	}	if (raid_dev->mbox_pool_handle)		pci_pool_destroy(raid_dev->mbox_pool_handle);	return;}/** * megaraid_alloc_scb - detach and return a scb from the free list * @adapter	: controller's soft state * * return the scb from the head of the free list. NULL if there are none * available **/static inline scb_t *megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp){	struct list_head	*head = &adapter->kscb_pool;	scb_t			*scb = NULL;	unsigned long		flags;	// detach scb from free pool	spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);	if (list_empty(head)) {		spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);		return NULL;	}	scb = list_entry(head->next, scb_t, list);	list_del_init(&scb->list);	spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);	scb->state	= SCB_ACTIVE;	scb->scp	= scp;	scb->dma_type	= MRAID_DMA_NONE;	return scb;}/** * megaraid_dealloc_scb - return the scb to the free pool * @adapter	: controller's soft state * @scb		: scb to be freed * * return the scb back to the free list of scbs. The caller must 'flush' the * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc. * NOTE NOTE: Make sure the scb is not on any list before calling this * routine. **/static inline voidmegaraid_dealloc_scb(adapter_t *adapter, scb_t *scb){	unsigned long		flags;	// put scb in the free pool	scb->state	= SCB_FREE;	scb->scp	= NULL;	spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);	list_add(&scb->list, &adapter->kscb_pool);	spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);	return;}/** * megaraid_mbox_mksgl - make the scatter-gather list * @adapter	- controller's soft state * @scb		- scsi control block * * prepare the scatter-gather list */static inline intmegaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb){	struct scatterlist	*sgl;	mbox_ccb_t		*ccb;	struct page		*page;	unsigned long		offset;	struct scsi_cmnd	*scp;	int			sgcnt;	int			i;	scp	= scb->scp;	ccb	= (mbox_ccb_t *)scb->ccb;	// no mapping required if no data to be transferred	if (!scp->request_buffer || !scp->request_bufflen)		return 0;	if (!scp->use_sg) {	/* scatter-gather list not used */		page = virt_to_page(scp->request_buffer);		offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK);		ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset,						  scp->request_bufflen,						  scb->dma_direction);		scb->dma_type = MRAID_DMA_WBUF;		/*		 * We need to handle special 64-bit commands that need a		 * minimum of 1 SG		 */		sgcnt = 1;		ccb->sgl64[0].address	= ccb->buf_dma_h;		ccb->sgl64[0].length	= scp->request_bufflen;		return sgcnt;	}	sgl = (struct scatterlist *)scp->request_buffer;	// The number of sg elements returned must not exceed our limit	sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg,			scb->dma_direction);	if (sgcnt > adapter->sglen) {		con_log(CL_ANN, (KERN_CRIT			"megaraid critical: too many sg elements:%d\n",			sgcnt));		BUG();	}	scb->dma_type = MRAID_DMA_WSG;	for (i = 0; i < sgcnt; i++, sgl++) {		ccb->sgl64[i].address	= sg_dma_address(sgl);		ccb->sgl64[i].length	= sg_dma_len(sgl);	}	// Return count of SG nodes	return sgcnt;}/** * mbox_post_cmd - issue a mailbox command * @adapter	- controller's soft state * @scb		- command to be issued * * post the command to the controller if mailbox is availble. */static inline intmbox_post_cmd(adapter_t *adapter, scb_t *scb){	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);	mbox64_t	*mbox64;	mbox_t		*mbox;	mbox_ccb_t	*ccb;	unsigned long	flags;	unsigned int	i = 0;	ccb	= (mbox_ccb_t *)scb->ccb;	mbox	= raid_dev->mbox;	mbox64	= raid_dev->mbox64;	/*	 * Check for busy mailbox. If it is, return failure - the caller	 * should retry later.	 */	spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);	if (unlikely(mbox->busy)) {		do {			udelay(1);			i++;			rmb();		} while(mbox->busy && (i < max_mbox_busy_wait));		if (mbox->busy) {			spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);			return -1;		}	}	// Copy this command's mailbox data into "adapter's" mailbox	memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);	mbox->cmdid = scb->sno;	adapter->outstanding_cmds++;	if (scb->dma_direction == PCI_DMA_TODEVICE) {		if (!scb->scp->use_sg) {	// sg list not used			pci_dma_sync_single_for_device(adapter->pdev,					ccb->buf_dma_h,					scb->scp->request_bufflen,					PCI_DMA_TODEVICE);		}		else {			pci_dma_sync_sg_for_device(adapter->pdev,				scb->scp->request_buffer,				scb->scp->use_sg, PCI_DMA_TODEVICE);		}	}	mbox->busy	= 1;	// Set busy	mbox->poll	= 0;	mbox->ack	= 0;	wmb();	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);	spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);	return 0;}/** * megaraid_queue_command - generic queue entry point for all LLDs * @scp		: pointer to the scsi command to be executed * @done	: callback routine to be called after the cmd has be completed * * Queue entry point for mailbox based controllers. */static intmegaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *)){	adapter_t	*adapter;	scb_t		*scb;	int		if_busy;	adapter		= SCP2ADAPTER(scp);	scp->scsi_done	= done;	scp->result	= 0;	/*	 * Allocate and build a SCB request	 * if_busy flag will be set if megaraid_mbox_build_cmd() command could	 * not allocate scb. We will return non-zero status in that case.	 * NOTE: scb can be null even though certain commands completed	 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would	 * return 0 in that case, and we would do the callback right away.	 */	if_busy	= 0;	scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy);	if (!scb) {	// command already completed		done(scp);		return 0;	}	megaraid_mbox_runpendq(adapter, scb);	return if_busy;}/** * megaraid_mbox_build_cmd - transform the mid-layer scsi command to megaraid * firmware lingua * @adapter	- controller's soft state * @scp		- mid-layer scsi command pointer * @busy	- set if request could not be completed because of lack of *		resources *

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -