📄 megaraid_mbox.c
字号:
return -1;}/** * megaraid_fini_mbox - undo controller initialization * @param adapter : our soft state */static voidmegaraid_fini_mbox(adapter_t *adapter){ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); // flush all caches megaraid_mbox_flush_cache(adapter); tasklet_kill(&adapter->dpc_h); megaraid_sysfs_free_resources(adapter); megaraid_free_cmd_packets(adapter); free_irq(adapter->irq, adapter); iounmap(raid_dev->baseaddr); pci_release_regions(adapter->pdev); kfree(raid_dev); return;}/** * megaraid_alloc_cmd_packets - allocate shared mailbox * @param adapter : soft state of the raid controller * * Allocate and align the shared mailbox. This maibox is used to issue * all the commands. For IO based controllers, the mailbox is also regsitered * with the FW. Allocate memory for all commands as well. * This is our big allocator */static intmegaraid_alloc_cmd_packets(adapter_t *adapter){ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); struct pci_dev *pdev; unsigned long align; scb_t *scb; mbox_ccb_t *ccb; struct mraid_pci_blk *epthru_pci_blk; struct mraid_pci_blk *sg_pci_blk; struct mraid_pci_blk *mbox_pci_blk; int i; pdev = adapter->pdev; /* * Setup the mailbox * Allocate the common 16-byte aligned memory for the handshake * mailbox. */ raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev, sizeof(mbox64_t), &raid_dev->una_mbox64_dma); if (!raid_dev->una_mbox64) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __FUNCTION__, __LINE__)); return -1; } memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t)); /* * Align the mailbox at 16-byte boundary */ raid_dev->mbox = &raid_dev->una_mbox64->mbox32; raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) & (~0UL ^ 0xFUL)); raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8); align = ((void *)raid_dev->mbox - ((void *)&raid_dev->una_mbox64->mbox32)); raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 + align; // Allocate memory for commands issued internally adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE, &adapter->ibuf_dma_h); if (!adapter->ibuf) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __FUNCTION__, __LINE__)); goto out_free_common_mbox; } memset(adapter->ibuf, 0, MBOX_IBUF_SIZE); // Allocate memory for our SCSI Command Blocks and their associated // memory /* * Allocate memory for the base list of scb. Later allocate memory for * CCBs and embedded components of each CCB and point the pointers in * scb to the allocated components * NOTE: The code to allocate SCB will be duplicated in all the LLD * since the calling routine does not yet know the number of available * commands. */ adapter->kscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_SCSI_CMDS, GFP_KERNEL); if (adapter->kscb_list == NULL) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __FUNCTION__, __LINE__)); goto out_free_ibuf; } memset(adapter->kscb_list, 0, sizeof(scb_t) * MBOX_MAX_SCSI_CMDS); // memory allocation for our command packets if (megaraid_mbox_setup_dma_pools(adapter) != 0) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __FUNCTION__, __LINE__)); goto out_free_scb_list; } // Adjust the scb pointers and link in the free pool epthru_pci_blk = raid_dev->epthru_pool; sg_pci_blk = raid_dev->sg_pool; mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { scb = adapter->kscb_list + i; ccb = raid_dev->ccb_list + i; ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16); ccb->raw_mbox = (uint8_t *)ccb->mbox; ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8); ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16; // make sure the mailbox is aligned properly if (ccb->mbox_dma_h & 0x0F) { con_log(CL_ANN, (KERN_CRIT "megaraid mbox: not aligned on 16-bytes\n")); goto out_teardown_dma_pools; } ccb->epthru = (mraid_epassthru_t *) epthru_pci_blk[i].vaddr; ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr; ccb->pthru = (mraid_passthru_t *)ccb->epthru; ccb->pthru_dma_h = ccb->epthru_dma_h; ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr; ccb->sgl_dma_h = sg_pci_blk[i].dma_addr; ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64; scb->ccb = (caddr_t)ccb; scb->gp = 0; scb->sno = i; // command index scb->scp = NULL; scb->state = SCB_FREE; scb->dma_direction = PCI_DMA_NONE; scb->dma_type = MRAID_DMA_NONE; scb->dev_channel = -1; scb->dev_target = -1; // put scb in the free pool list_add_tail(&scb->list, &adapter->kscb_pool); } return 0;out_teardown_dma_pools: megaraid_mbox_teardown_dma_pools(adapter);out_free_scb_list: kfree(adapter->kscb_list);out_free_ibuf: pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf, adapter->ibuf_dma_h);out_free_common_mbox: pci_free_consistent(adapter->pdev, sizeof(mbox64_t), (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); return -1;}/** * megaraid_free_cmd_packets - free memory * @param adapter : soft state of the raid controller * * Release memory resources allocated for commands */static voidmegaraid_free_cmd_packets(adapter_t *adapter){ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); megaraid_mbox_teardown_dma_pools(adapter); kfree(adapter->kscb_list); pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf, adapter->ibuf_dma_h); pci_free_consistent(adapter->pdev, sizeof(mbox64_t), (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); return;}/** * megaraid_mbox_setup_dma_pools - setup dma pool for command packets * @param adapter : HBA soft state * * setup the dma pools for mailbox, passthru and extended passthru structures, * and scatter-gather lists */static intmegaraid_mbox_setup_dma_pools(adapter_t *adapter){ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); struct mraid_pci_blk *epthru_pci_blk; struct mraid_pci_blk *sg_pci_blk; struct mraid_pci_blk *mbox_pci_blk; int i; // Allocate memory for 16-bytes aligned mailboxes raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool", adapter->pdev, sizeof(mbox64_t) + 16, 16, 0); if (raid_dev->mbox_pool_handle == NULL) { goto fail_setup_dma_pool; } mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { mbox_pci_blk[i].vaddr = pci_pool_alloc( raid_dev->mbox_pool_handle, GFP_KERNEL, &mbox_pci_blk[i].dma_addr); if (!mbox_pci_blk[i].vaddr) { goto fail_setup_dma_pool; } } /* * Allocate memory for each embedded passthru strucuture pointer * Request for a 128 bytes aligned structure for each passthru command * structure * Since passthru and extended passthru commands are exclusive, they * share common memory pool. Passthru structures piggyback on memory * allocted to extended passthru since passthru is smaller of the two */ raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru", adapter->pdev, sizeof(mraid_epassthru_t), 128, 0); if (raid_dev->epthru_pool_handle == NULL) { goto fail_setup_dma_pool; } epthru_pci_blk = raid_dev->epthru_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { epthru_pci_blk[i].vaddr = pci_pool_alloc( raid_dev->epthru_pool_handle, GFP_KERNEL, &epthru_pci_blk[i].dma_addr); if (!epthru_pci_blk[i].vaddr) { goto fail_setup_dma_pool; } } // Allocate memory for each scatter-gather list. Request for 512 bytes // alignment for each sg list raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg", adapter->pdev, sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE, 512, 0); if (raid_dev->sg_pool_handle == NULL) { goto fail_setup_dma_pool; } sg_pci_blk = raid_dev->sg_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { sg_pci_blk[i].vaddr = pci_pool_alloc( raid_dev->sg_pool_handle, GFP_KERNEL, &sg_pci_blk[i].dma_addr); if (!sg_pci_blk[i].vaddr) { goto fail_setup_dma_pool; } } return 0;fail_setup_dma_pool: megaraid_mbox_teardown_dma_pools(adapter); return -1;}/** * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets * @param adapter : HBA soft state * * teardown the dma pool for mailbox, passthru and extended passthru * structures, and scatter-gather lists */static voidmegaraid_mbox_teardown_dma_pools(adapter_t *adapter){ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); struct mraid_pci_blk *epthru_pci_blk; struct mraid_pci_blk *sg_pci_blk; struct mraid_pci_blk *mbox_pci_blk; int i; sg_pci_blk = raid_dev->sg_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) { pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr, sg_pci_blk[i].dma_addr); } if (raid_dev->sg_pool_handle) pci_pool_destroy(raid_dev->sg_pool_handle); epthru_pci_blk = raid_dev->epthru_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) { pci_pool_free(raid_dev->epthru_pool_handle, epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr); } if (raid_dev->epthru_pool_handle) pci_pool_destroy(raid_dev->epthru_pool_handle); mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) { pci_pool_free(raid_dev->mbox_pool_handle, mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr); } if (raid_dev->mbox_pool_handle) pci_pool_destroy(raid_dev->mbox_pool_handle); return;}/** * megaraid_alloc_scb - detach and return a scb from the free list * @adapter : controller's soft state * * return the scb from the head of the free list. NULL if there are none * available **/static inline scb_t *megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp){ struct list_head *head = &adapter->kscb_pool; scb_t *scb = NULL; unsigned long flags; // detach scb from free pool spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags); if (list_empty(head)) { spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); return NULL; } scb = list_entry(head->next, scb_t, list); list_del_init(&scb->list); spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); scb->state = SCB_ACTIVE; scb->scp = scp; scb->dma_type = MRAID_DMA_NONE; return scb;}/** * megaraid_dealloc_scb - return the scb to the free pool * @adapter : controller's soft state * @scb : scb to be freed * * return the scb back to the free list of scbs. The caller must 'flush' the * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc. * NOTE NOTE: Make sure the scb is not on any list before calling this * routine. **/static inline voidmegaraid_dealloc_scb(adapter_t *adapter, scb_t *scb){ unsigned long flags; // put scb in the free pool scb->state = SCB_FREE; scb->scp = NULL; spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags); list_add(&scb->list, &adapter->kscb_pool); spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); return;}/** * megaraid_mbox_mksgl - make the scatter-gather list * @adapter - controller's soft state * @scb - scsi control block * * prepare the scatter-gather list */static inline intmegaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb){ struct scatterlist *sgl; mbox_ccb_t *ccb; struct page *page; unsigned long offset; struct scsi_cmnd *scp; int sgcnt; int i; scp = scb->scp; ccb = (mbox_ccb_t *)scb->ccb; // no mapping required if no data to be transferred if (!scp->request_buffer || !scp->request_bufflen) return 0; if (!scp->use_sg) { /* scatter-gather list not used */ page = virt_to_page(scp->request_buffer); offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK); ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset, scp->request_bufflen, scb->dma_direction); scb->dma_type = MRAID_DMA_WBUF; /* * We need to handle special 64-bit commands that need a * minimum of 1 SG */ sgcnt = 1; ccb->sgl64[0].address = ccb->buf_dma_h; ccb->sgl64[0].length = scp->request_bufflen; return sgcnt; } sgl = (struct scatterlist *)scp->request_buffer; // The number of sg elements returned must not exceed our limit sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg, scb->dma_direction); if (sgcnt > adapter->sglen) { con_log(CL_ANN, (KERN_CRIT "megaraid critical: too many sg elements:%d\n", sgcnt)); BUG(); } scb->dma_type = MRAID_DMA_WSG; for (i = 0; i < sgcnt; i++, sgl++) { ccb->sgl64[i].address = sg_dma_address(sgl); ccb->sgl64[i].length = sg_dma_len(sgl); } // Return count of SG nodes return sgcnt;}/** * mbox_post_cmd - issue a mailbox command * @adapter - controller's soft state * @scb - command to be issued * * post the command to the controller if mailbox is availble. */static inline intmbox_post_cmd(adapter_t *adapter, scb_t *scb){ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); mbox64_t *mbox64; mbox_t *mbox; mbox_ccb_t *ccb; unsigned long flags; unsigned int i = 0; ccb = (mbox_ccb_t *)scb->ccb; mbox = raid_dev->mbox; mbox64 = raid_dev->mbox64; /* * Check for busy mailbox. If it is, return failure - the caller * should retry later. */ spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -