📄 aic7xxx_linux.c
字号:
static __inline void ahc_linux_run_complete_queue(struct ahc_softc *ahc, struct ahc_cmd *acmd);static __inline void ahc_linux_check_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev);static __inline void ahc_linux_sniff_command(struct ahc_softc*, Scsi_Cmnd*, struct scb*);static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, struct ahc_dma_seg *sg, bus_addr_t addr, bus_size_t len);static __inline struct ahc_linux_device*ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target, u_int lun, int alloc){ struct ahc_linux_target *targ; struct ahc_linux_device *dev; u_int target_offset; target_offset = target; if (channel != 0) target_offset += 8; targ = ahc->platform_data->targets[target_offset]; if (targ == NULL) { if (alloc != 0) { targ = ahc_linux_alloc_target(ahc, channel, target); if (targ == NULL) return (NULL); } else return (NULL); } dev = targ->devices[lun]; if (dev == NULL && alloc != 0) dev = ahc_linux_alloc_device(ahc, targ, lun); return (dev);}static __inline voidahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd){ /* * Typically, the complete queue has very few entries * queued to it before the queue is emptied by * ahc_linux_run_complete_queue, so sorting the entries * by generation number should be inexpensive. * We perform the sort so that commands that complete * with an error are retuned in the order origionally * queued to the controller so that any subsequent retries * are performed in order. The underlying ahc routines do * not guarantee the order that aborted commands will be * returned to us. */ struct ahc_completeq *completeq; struct ahc_cmd *list_cmd; struct ahc_cmd *acmd; /* * If we want the request requeued, make sure there * are sufficent retries. In the old scsi error code, * we used to be able to specify a result code that * bypassed the retry count. Now we must use this * hack. */ if (cmd->result == (CAM_REQUEUE_REQ << 16)) cmd->retries--; completeq = &ahc->platform_data->completeq; list_cmd = TAILQ_FIRST(completeq); acmd = (struct ahc_cmd *)cmd; while (list_cmd != NULL && acmd_scsi_cmd(list_cmd).serial_number < acmd_scsi_cmd(acmd).serial_number) list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe); if (list_cmd != NULL) TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe); else TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);}static __inline voidahc_linux_run_complete_queue(struct ahc_softc *ahc, struct ahc_cmd *acmd){ u_long done_flags; ahc_done_lock(ahc, &done_flags); while (acmd != NULL) { Scsi_Cmnd *cmd; cmd = &acmd_scsi_cmd(acmd); acmd = TAILQ_NEXT(acmd, acmd_links.tqe); cmd->host_scribble = NULL; cmd->scsi_done(cmd); } ahc_done_unlock(ahc, &done_flags);}static __inline voidahc_linux_check_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev){ if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) != 0 && dev->active == 0) { dev->flags &= ~AHC_DEV_FREEZE_TIL_EMPTY; dev->qfrozen--; } if (TAILQ_FIRST(&dev->busyq) == NULL || dev->openings == 0 || dev->qfrozen != 0) return; ahc_linux_run_device_queue(ahc, dev);}static __inline voidahc_linux_run_device_queues(struct ahc_softc *ahc){ struct ahc_linux_device *dev; while ((ahc->flags & AHC_RESOURCE_SHORTAGE) == 0 && ahc->platform_data->qfrozen == 0 && (dev = TAILQ_FIRST(&ahc->platform_data->device_runq)) != NULL) { TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links); dev->flags &= ~AHC_DEV_ON_RUN_LIST; ahc_linux_check_device_queue(ahc, dev); }}static __inline voidahc_linux_sniff_command(struct ahc_softc *ahc, Scsi_Cmnd *cmd, struct scb *scb){ /* * Determine whether we care to filter * information out of this command. If so, * pass it on to ahc_linux_filter_command() for more * heavy weight processing. */ if (cmd->cmnd[0] == INQUIRY) ahc_linux_filter_command(ahc, cmd, scb);}static __inline voidahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb){ Scsi_Cmnd *cmd; cmd = scb->io_ctx; ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE); if (cmd->use_sg != 0) { struct scatterlist *sg; sg = (struct scatterlist *)cmd->request_buffer; pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg, scsi_to_pci_dma_dir(cmd->sc_data_direction)); } else if (cmd->request_bufflen != 0) { u_int32_t high_addr; high_addr = ahc_le32toh(scb->sg_list[0].len) & AHC_SG_HIGH_ADDR_MASK; pci_unmap_single(ahc->dev_softc, ahc_le32toh(scb->sg_list[0].addr) | (((dma_addr_t)high_addr) << 8), cmd->request_bufflen, scsi_to_pci_dma_dir(cmd->sc_data_direction)); }}static __inline intahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, struct ahc_dma_seg *sg, bus_addr_t addr, bus_size_t len){ int consumed; if ((scb->sg_count + 1) > AHC_NSEG) panic("Too few segs for dma mapping. " "Increase AHC_NSEG\n"); consumed = 1; sg->addr = ahc_htole32(addr & 0xFFFFFFFF); scb->platform_data->xfer_len += len; if (sizeof(bus_addr_t) > 4 && (ahc->flags & AHC_39BIT_ADDRESSING) != 0) { /* * Due to DAC restrictions, we can't * cross a 4GB boundary. */ if ((addr ^ (addr + len - 1)) & ~0xFFFFFFFF) { struct ahc_dma_seg *next_sg; uint32_t next_len; printf("Crossed Seg\n"); if ((scb->sg_count + 2) > AHC_NSEG) panic("Too few segs for dma mapping. " "Increase AHC_NSEG\n"); consumed++; next_sg = sg + 1; next_sg->addr = 0; next_len = 0x100000000 - (addr & 0xFFFFFFFF); len -= next_len; next_len |= ((addr >> 8) + 0x1000000) & 0x7F000000; next_sg->len = ahc_htole32(next_len); } len |= (addr >> 8) & 0x7F000000; } sg->len = ahc_htole32(len); return (consumed);}/************************ Shutdown/halt/reboot hook ***************************/#include <linux/notifier.h>#include <linux/reboot.h>static struct notifier_block ahc_linux_notifier = { ahc_linux_halt, NULL, 0};static int ahc_linux_halt(struct notifier_block *nb, u_long event, void *buf){ struct ahc_softc *ahc; if (event == SYS_DOWN || event == SYS_HALT) { TAILQ_FOREACH(ahc, &ahc_tailq, links) { ahc_shutdown(ahc); } } return (NOTIFY_OK);}/******************************** Macros **************************************/#define BUILD_SCSIID(ahc, cmd) \ ((((cmd)->target << TID_SHIFT) & TID) \ | (((cmd)->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \ | (((cmd)->channel == 0) ? 0 : TWIN_CHNLB))/******************************** Bus DMA *************************************/intahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag){ bus_dma_tag_t dmat; dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT); if (dmat == NULL) return (ENOMEM); /* * Linux is very simplistic about DMA memory. For now don't * maintain all specification information. Once Linux supplies * better facilities for doing these operations, or the * needs of this particular driver change, we might need to do * more here. */ dmat->alignment = alignment; dmat->boundary = boundary; dmat->maxsize = maxsize; *ret_tag = dmat; return (0);}voidahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat){ free(dmat, M_DEVBUF);}intahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp){ bus_dmamap_t map;#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT); if (map == NULL) return (ENOMEM); /* * Although we can dma data above 4GB, our * "consistent" memory is below 4GB for * space efficiency reasons (only need a 4byte * address). For this reason, we have to reset * our dma mask when doing allocations. */ if(ahc->dev_softc)#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,3) pci_set_dma_mask(ahc->dev_softc, 0xFFFFFFFF);#else ahc->dev_softc->dma_mask = 0xFFFFFFFF;#endif *vaddr = pci_alloc_consistent(ahc->dev_softc, dmat->maxsize, &map->bus_addr); if (ahc->dev_softc)#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,3) pci_set_dma_mask(ahc->dev_softc, ahc->platform_data->hw_dma_mask);#else ahc->dev_softc->dma_mask = ahc->platform_data->hw_dma_mask;#endif#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) */ /* * At least in 2.2.14, malloc is a slab allocator so all * allocations are aligned. We assume for these kernel versions * that all allocations will be bellow 4Gig, physically contiguous, * and accessable via DMA by the controller. */ map = NULL; /* No additional information to store */ *vaddr = malloc(dmat->maxsize, M_DEVBUF, M_NOWAIT);#endif if (*vaddr == NULL) return (ENOMEM); *mapp = map; return(0);}voidahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map){#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) pci_free_consistent(ahc->dev_softc, dmat->maxsize, vaddr, map->bus_addr);#else free(vaddr, M_DEVBUF);#endif}intahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb, void *cb_arg, int flags){ /* * Assume for now that this will only be used during * initialization and not for per-transaction buffer mapping. */ bus_dma_segment_t stack_sg;#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) stack_sg.ds_addr = map->bus_addr;#else stack_sg.ds_addr = VIRT_TO_BUS(buf);#endif stack_sg.ds_len = dmat->maxsize; cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); return (0);}voidahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map){ /* * The map may is NULL in our < 2.3.X implementation. */ if (map != NULL) free(map, M_DEVBUF);}intahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map){ /* Nothing to do */ return (0);}/********************* Platform Dependent Functions ***************************/intahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc){ int value; int rvalue; int lvalue; /* * Under Linux, cards are ordered as follows: * 1) VLB/EISA BIOS enabled devices sorted by BIOS address. * 2) PCI devices with BIOS enabled sorted by bus/slot/func. * 3) All remaining VLB/EISA devices sorted by ioport. * 4) All remaining PCI devices sorted by bus/slot/func. */ value = (lahc->flags & AHC_BIOS_ENABLED) - (rahc->flags & AHC_BIOS_ENABLED); if (value != 0) /* Controllers with BIOS enabled have a *higher* priority */ return (-value); /* * Same BIOS setting, now sort based on bus type. * EISA and VL controllers sort together. EISA/VL * have higher priority than PCI. */ rvalue = (rahc->chip & AHC_BUS_MASK); if (rvalue == AHC_VL) rvalue = AHC_EISA; lvalue = (lahc->chip & AHC_BUS_MASK); if (lvalue == AHC_VL) lvalue = AHC_EISA; value = lvalue - rvalue; if (value != 0) return (value); /* Still equal. Sort by BIOS address, ioport, or bus/slot/func. */ switch (rvalue) { case AHC_PCI: { char primary_channel; if (aic7xxx_reverse_scan != 0) value = ahc_get_pci_bus(rahc->dev_softc) - ahc_get_pci_bus(lahc->dev_softc); else value = ahc_get_pci_bus(lahc->dev_softc) - ahc_get_pci_bus(rahc->dev_softc); if (value != 0) break; if (aic7xxx_reverse_scan != 0) value = ahc_get_pci_slot(rahc->dev_softc) - ahc_get_pci_slot(lahc->dev_softc); else value = ahc_get_pci_slot(lahc->dev_softc) - ahc_get_pci_slot(rahc->dev_softc); if (value != 0) break; /* * On multi-function devices, the user can choose * to have function 1 probed before function 0. * Give whichever channel is the primary channel * the lowest priority. */ primary_channel = (lahc->flags & AHC_PRIMARY_CHANNEL) + 'A'; value = 1; if (lahc->channel == primary_channel) value = -1; break; } case AHC_EISA: if ((rahc->flags & AHC_BIOS_ENABLED) != 0) { value = lahc->platform_data->bios_address - rahc->platform_data->bios_address; } else { value = lahc->bsh.ioport - rahc->bsh.ioport; } break; default: panic("ahc_softc_sort: invalid bus type"); } return (value);}static voidahc_linux_setup_tag_info(char *p, char *end){ char *base;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -