📄 sata_nv.c
字号:
static int nv_swncq_slave_config(struct scsi_device *sdev){ struct ata_port *ap = ata_shost_to_port(sdev->host); struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct ata_device *dev; int rc; u8 rev; u8 check_maxtor = 0; unsigned char model_num[ATA_ID_PROD_LEN + 1]; rc = ata_scsi_slave_config(sdev); if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) /* Not a proper libata device, ignore */ return rc; dev = &ap->link.device[sdev->id]; if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI) return rc; /* if MCP51 and Maxtor, then disable ncq */ if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA || pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2) check_maxtor = 1; /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */ if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA || pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) { pci_read_config_byte(pdev, 0x8, &rev); if (rev <= 0xa2) check_maxtor = 1; } if (!check_maxtor) return rc; ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (strncmp(model_num, "Maxtor", 6) == 0) { ata_scsi_change_queue_depth(sdev, 1); ata_dev_printk(dev, KERN_NOTICE, "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth); } return rc;}static int nv_swncq_port_start(struct ata_port *ap){ struct device *dev = ap->host->dev; void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; struct nv_swncq_port_priv *pp; int rc; rc = ata_port_start(ap); if (rc) return rc; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE, &pp->prd_dma, GFP_KERNEL); if (!pp->prd) return -ENOMEM; memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE); ap->private_data = pp; pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE; pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2; pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2; return 0;}static void nv_swncq_qc_prep(struct ata_queued_cmd *qc){ if (qc->tf.protocol != ATA_PROT_NCQ) { ata_qc_prep(qc); return; } if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return; nv_swncq_fill_sg(qc);}static void nv_swncq_fill_sg(struct ata_queued_cmd *qc){ struct ata_port *ap = qc->ap; struct scatterlist *sg; unsigned int idx; struct nv_swncq_port_priv *pp = ap->private_data; struct ata_prd *prd; WARN_ON(qc->__sg == NULL); WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); prd = pp->prd + ATA_MAX_PRD * qc->tag; idx = 0; ata_for_each_sg(sg, qc) { u32 addr, offset; u32 sg_len, len; addr = (u32)sg_dma_address(sg); sg_len = sg_dma_len(sg); while (sg_len) { offset = addr & 0xffff; len = sg_len; if ((offset + sg_len) > 0x10000) len = 0x10000 - offset; prd[idx].addr = cpu_to_le32(addr); prd[idx].flags_len = cpu_to_le32(len & 0xffff); idx++; sg_len -= len; addr += len; } } if (idx) prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);}static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, struct ata_queued_cmd *qc){ struct nv_swncq_port_priv *pp = ap->private_data; if (qc == NULL) return 0; DPRINTK("Enter\n"); writel((1 << qc->tag), pp->sactive_block); pp->last_issue_tag = qc->tag; pp->dhfis_bits &= ~(1 << qc->tag); pp->dmafis_bits &= ~(1 << qc->tag); pp->qc_active |= (0x1 << qc->tag); ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ ap->ops->exec_command(ap, &qc->tf); DPRINTK("Issued tag %u\n", qc->tag); return 0;}static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc){ struct ata_port *ap = qc->ap; struct nv_swncq_port_priv *pp = ap->private_data; if (qc->tf.protocol != ATA_PROT_NCQ) return ata_qc_issue_prot(qc); DPRINTK("Enter\n"); if (!pp->qc_active) nv_swncq_issue_atacmd(ap, qc); else nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ return 0;}static void nv_swncq_hotplug(struct ata_port *ap, u32 fis){ u32 serror; struct ata_eh_info *ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); /* AHCI needs SError cleared; otherwise, it might lock up */ sata_scr_read(&ap->link, SCR_ERROR, &serror); sata_scr_write(&ap->link, SCR_ERROR, serror); /* analyze @irq_stat */ if (fis & NV_SWNCQ_IRQ_ADDED) ata_ehi_push_desc(ehi, "hot plug"); else if (fis & NV_SWNCQ_IRQ_REMOVED) ata_ehi_push_desc(ehi, "hot unplug"); ata_ehi_hotplugged(ehi); /* okay, let's hand over to EH */ ehi->serror |= serror; ata_port_freeze(ap);}static int nv_swncq_sdbfis(struct ata_port *ap){ struct ata_queued_cmd *qc; struct nv_swncq_port_priv *pp = ap->private_data; struct ata_eh_info *ehi = &ap->link.eh_info; u32 sactive; int nr_done = 0; u32 done_mask; int i; u8 host_stat; u8 lack_dhfis = 0; host_stat = ap->ops->bmdma_status(ap); if (unlikely(host_stat & ATA_DMA_ERR)) { /* error when transfering data to/from memory */ ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); ehi->err_mask |= AC_ERR_HOST_BUS; ehi->action |= ATA_EH_SOFTRESET; return -EINVAL; } ap->ops->irq_clear(ap); __ata_bmdma_stop(ap); sactive = readl(pp->sactive_block); done_mask = pp->qc_active ^ sactive; if (unlikely(done_mask & sactive)) { ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition" "(%08x->%08x)", pp->qc_active, sactive); ehi->err_mask |= AC_ERR_HSM; ehi->action |= ATA_EH_HARDRESET; return -EINVAL; } for (i = 0; i < ATA_MAX_QUEUE; i++) { if (!(done_mask & (1 << i))) continue; qc = ata_qc_from_tag(ap, i); if (qc) { ata_qc_complete(qc); pp->qc_active &= ~(1 << i); pp->dhfis_bits &= ~(1 << i); pp->dmafis_bits &= ~(1 << i); pp->sdbfis_bits |= (1 << i); nr_done++; } } if (!ap->qc_active) { DPRINTK("over\n"); nv_swncq_pp_reinit(ap); return nr_done; } if (pp->qc_active & pp->dhfis_bits) return nr_done; if ((pp->ncq_flags & ncq_saw_backout) || (pp->qc_active ^ pp->dhfis_bits)) /* if the controller cann't get a device to host register FIS, * The driver needs to reissue the new command. */ lack_dhfis = 1; DPRINTK("id 0x%x QC: qc_active 0x%x," "SWNCQ:qc_active 0x%X defer_bits %X " "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n", ap->print_id, ap->qc_active, pp->qc_active, pp->defer_queue.defer_bits, pp->dhfis_bits, pp->dmafis_bits, pp->last_issue_tag); nv_swncq_fis_reinit(ap); if (lack_dhfis) { qc = ata_qc_from_tag(ap, pp->last_issue_tag); nv_swncq_issue_atacmd(ap, qc); return nr_done; } if (pp->defer_queue.defer_bits) { /* send deferral queue command */ qc = nv_swncq_qc_from_dq(ap); WARN_ON(qc == NULL); nv_swncq_issue_atacmd(ap, qc); } return nr_done;}static inline u32 nv_swncq_tag(struct ata_port *ap){ struct nv_swncq_port_priv *pp = ap->private_data; u32 tag; tag = readb(pp->tag_block) >> 2; return (tag & 0x1f);}static int nv_swncq_dmafis(struct ata_port *ap){ struct ata_queued_cmd *qc; unsigned int rw; u8 dmactl; u32 tag; struct nv_swncq_port_priv *pp = ap->private_data; __ata_bmdma_stop(ap); tag = nv_swncq_tag(ap); DPRINTK("dma setup tag 0x%x\n", tag); qc = ata_qc_from_tag(ap, tag); if (unlikely(!qc)) return 0; rw = qc->tf.flags & ATA_TFLAG_WRITE; /* load PRD table addr. */ iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); /* specify data direction, triple-check start bit is clear */ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); dmactl &= ~ATA_DMA_WR; if (!rw) dmactl |= ATA_DMA_WR; iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); return 1;}static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis){ struct nv_swncq_port_priv *pp = ap->private_data; struct ata_queued_cmd *qc; struct ata_eh_info *ehi = &ap->link.eh_info; u32 serror; u8 ata_stat; int rc = 0; ata_stat = ap->ops->check_status(ap); nv_swncq_irq_clear(ap, fis); if (!fis) return; if (ap->pflags & ATA_PFLAG_FROZEN) return; if (fis & NV_SWNCQ_IRQ_HOTPLUG) { nv_swncq_hotplug(ap, fis); return; } if (!pp->qc_active) return; if (ap->ops->scr_read(ap, SCR_ERROR, &serror)) return; ap->ops->scr_write(ap, SCR_ERROR, serror); if (ata_stat & ATA_ERR) { ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis); ehi->err_mask |= AC_ERR_DEV; ehi->serror |= serror; ehi->action |= ATA_EH_SOFTRESET; ata_port_freeze(ap); return; } if (fis & NV_SWNCQ_IRQ_BACKOUT) { /* If the IRQ is backout, driver must issue * the new command again some time later. */ pp->ncq_flags |= ncq_saw_backout; } if (fis & NV_SWNCQ_IRQ_SDBFIS) { pp->ncq_flags |= ncq_saw_sdb; DPRINTK("id 0x%x SWNCQ: qc_active 0x%X " "dhfis 0x%X dmafis 0x%X sactive 0x%X\n", ap->print_id, pp->qc_active, pp->dhfis_bits, pp->dmafis_bits, readl(pp->sactive_block)); rc = nv_swncq_sdbfis(ap); if (rc < 0) goto irq_error; } if (fis & NV_SWNCQ_IRQ_DHREGFIS) { /* The interrupt indicates the new command * was transmitted correctly to the drive. */ pp->dhfis_bits |= (0x1 << pp->last_issue_tag); pp->ncq_flags |= ncq_saw_d2h; if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) { ata_ehi_push_desc(ehi, "illegal fis transaction"); ehi->err_mask |= AC_ERR_HSM; ehi->action |= ATA_EH_HARDRESET; goto irq_error; } if (!(fis & NV_SWNCQ_IRQ_DMASETUP) && !(pp->ncq_flags & ncq_saw_dmas)) { ata_stat = ap->ops->check_status(ap); if (ata_stat & ATA_BUSY) goto irq_exit; if (pp->defer_queue.defer_bits) { DPRINTK("send next command\n"); qc = nv_swncq_qc_from_dq(ap); nv_swncq_issue_atacmd(ap, qc); } } } if (fis & NV_SWNCQ_IRQ_DMASETUP) { /* program the dma controller with appropriate PRD buffers * and start the DMA transfer for requested command. */ pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); pp->ncq_flags |= ncq_saw_dmas; rc = nv_swncq_dmafis(ap); }irq_exit: return;irq_error: ata_ehi_push_desc(ehi, "fis:0x%x", fis); ata_port_freeze(ap); return;}static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance){ struct ata_host *host = dev_instance; unsigned int i; unsigned int handled = 0; unsigned long flags; u32 irq_stat; spin_lock_irqsave(&host->lock, flags); irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { if (ap->link.sactive) { nv_swncq_host_interrupt(ap, (u16)irq_stat); handled = 1; } else { if (irq_stat) /* reserve Hotplug */ nv_swncq_irq_clear(ap, 0xfff0); handled += nv_host_intr(ap, (u8)irq_stat); } } irq_stat >>= NV_INT_PORT_SHIFT_MCP55; } spin_unlock_irqrestore(&host->lock, flags); return IRQ_RETVAL(handled);}static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent){ static int printed_version; const struct ata_port_info *ppi[] = { NULL, NULL }; struct ata_host *host; struct nv_hos
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -