📄 sata_mv.c
字号:
sata_scr_read(&ap->link, SCR_ERROR, &serr); sata_scr_write_flush(&ap->link, SCR_ERROR, serr); err_mask = AC_ERR_ATA_BUS; action |= ATA_EH_HARDRESET; } } /* Clear EDMA now that SERR cleanup done */ writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); if (!err_mask) { err_mask = AC_ERR_OTHER; action |= ATA_EH_HARDRESET; } ehi->serror |= serr; ehi->action |= action; if (qc) qc->err_mask |= err_mask; else ehi->err_mask |= err_mask; if (edma_err_cause & eh_freeze_mask) ata_port_freeze(ap); else ata_port_abort(ap);}static void mv_intr_pio(struct ata_port *ap){ struct ata_queued_cmd *qc; u8 ata_status; /* ignore spurious intr if drive still BUSY */ ata_status = readb(ap->ioaddr.status_addr); if (unlikely(ata_status & ATA_BUSY)) return; /* get active ATA command */ qc = ata_qc_from_tag(ap, ap->link.active_tag); if (unlikely(!qc)) /* no active tag */ return; if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */ return; /* and finally, complete the ATA command */ qc->err_mask |= ac_err_mask(ata_status); ata_qc_complete(qc);}static void mv_intr_edma(struct ata_port *ap){ void __iomem *port_mmio = mv_ap_base(ap); struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp = ap->private_data; struct ata_queued_cmd *qc; u32 out_index, in_index; bool work_done = false; /* get h/w response queue pointer */ in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; while (1) { u16 status; unsigned int tag; /* get s/w response queue last-read pointer, and compare */ out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK; if (in_index == out_index) break; /* 50xx: get active ATA command */ if (IS_GEN_I(hpriv)) tag = ap->link.active_tag; /* Gen II/IIE: get active ATA command via tag, to enable * support for queueing. this works transparently for * queued and non-queued modes. */ else if (IS_GEN_II(hpriv)) tag = (le16_to_cpu(pp->crpb[out_index].id) >> CRPB_IOID_SHIFT_6) & 0x3f; else /* IS_GEN_IIE */ tag = (le16_to_cpu(pp->crpb[out_index].id) >> CRPB_IOID_SHIFT_7) & 0x3f; qc = ata_qc_from_tag(ap, tag); /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS * bits (WARNING: might not necessarily be associated * with this command), which -should- be clear * if all is well */ status = le16_to_cpu(pp->crpb[out_index].flags); if (unlikely(status & 0xff)) { mv_err_intr(ap, qc); return; } /* and finally, complete the ATA command */ if (qc) { qc->err_mask |= ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT); ata_qc_complete(qc); } /* advance software response queue pointer, to * indicate (after the loop completes) to hardware * that we have consumed a response queue entry. */ work_done = true; pp->resp_idx++; } if (work_done) writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | (out_index << EDMA_RSP_Q_PTR_SHIFT), port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);}/** * mv_host_intr - Handle all interrupts on the given host controller * @host: host specific structure * @relevant: port error bits relevant to this host controller * @hc: which host controller we're to look at * * Read then write clear the HC interrupt status then walk each * port connected to the HC and see if it needs servicing. Port * success ints are reported in the HC interrupt status reg, the * port error ints are reported in the higher level main * interrupt status register and thus are passed in via the * 'relevant' argument. * * LOCKING: * Inherited from caller. */static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc){ void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; void __iomem *hc_mmio = mv_hc_base(mmio, hc); u32 hc_irq_cause; int port, port0; if (hc == 0) port0 = 0; else port0 = MV_PORTS_PER_HC; /* we'll need the HC success int register in most cases */ hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); if (!hc_irq_cause) return; writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", hc, relevant, hc_irq_cause); for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { struct ata_port *ap = host->ports[port]; struct mv_port_priv *pp = ap->private_data; int have_err_bits, hard_port, shift; if ((!ap) || (ap->flags & ATA_FLAG_DISABLED)) continue; shift = port << 1; /* (port * 2) */ if (port >= MV_PORTS_PER_HC) { shift++; /* skip bit 8 in the HC Main IRQ reg */ } have_err_bits = ((PORT0_ERR << shift) & relevant); if (unlikely(have_err_bits)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) continue; mv_err_intr(ap, qc); continue; } hard_port = mv_hardport_from_port(port); /* range 0..3 */ if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) mv_intr_edma(ap); } else { if ((DEV_IRQ << hard_port) & hc_irq_cause) mv_intr_pio(ap); } } VPRINTK("EXIT\n");}static void mv_pci_error(struct ata_host *host, void __iomem *mmio){ struct mv_host_priv *hpriv = host->private_data; struct ata_port *ap; struct ata_queued_cmd *qc; struct ata_eh_info *ehi; unsigned int i, err_mask, printed = 0; u32 err_cause; err_cause = readl(mmio + hpriv->irq_cause_ofs); dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause); DPRINTK("All regs @ PCI error\n"); mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); writelfl(0, mmio + hpriv->irq_cause_ofs); for (i = 0; i < host->n_ports; i++) { ap = host->ports[i]; if (!ata_link_offline(&ap->link)) { ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); if (!printed++) ata_ehi_push_desc(ehi, "PCI err cause 0x%08x", err_cause); err_mask = AC_ERR_HOST_BUS; ehi->action = ATA_EH_HARDRESET; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) qc->err_mask |= err_mask; else ehi->err_mask |= err_mask; ata_port_freeze(ap); } }}/** * mv_interrupt - Main interrupt event handler * @irq: unused * @dev_instance: private data; in this case the host structure * * Read the read only register to determine if any host * controllers have pending interrupts. If so, call lower level * routine to handle. Also check for PCI errors which are only * reported here. * * LOCKING: * This routine holds the host lock while processing pending * interrupts. */static irqreturn_t mv_interrupt(int irq, void *dev_instance){ struct ata_host *host = dev_instance; unsigned int hc, handled = 0, n_hcs; void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; u32 irq_stat; irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); /* check the cases where we either have nothing pending or have read * a bogus register value which can indicate HW removal or PCI fault */ if (!irq_stat || (0xffffffffU == irq_stat)) return IRQ_NONE; n_hcs = mv_get_hc_count(host->ports[0]->flags); spin_lock(&host->lock); if (unlikely(irq_stat & PCI_ERR)) { mv_pci_error(host, mmio); handled = 1; goto out_unlock; /* skip all other HC irq handling */ } for (hc = 0; hc < n_hcs; hc++) { u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); if (relevant) { mv_host_intr(host, relevant, hc); handled = 1; } }out_unlock: spin_unlock(&host->lock); return IRQ_RETVAL(handled);}static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port){ void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; return hc_mmio + ofs;}static unsigned int mv5_scr_offset(unsigned int sc_reg_in){ unsigned int ofs; switch (sc_reg_in) { case SCR_STATUS: case SCR_ERROR: case SCR_CONTROL: ofs = sc_reg_in * sizeof(u32); break; default: ofs = 0xffffffffU; break; } return ofs;}static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val){ void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; void __iomem *addr = mv5_phy_base(mmio, ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { *val = readl(addr + ofs); return 0; } else return -EINVAL;}static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val){ void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; void __iomem *addr = mv5_phy_base(mmio, ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { writelfl(val, addr + ofs); return 0; } else return -EINVAL;}static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio){ int early_5080; early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); if (!early_5080) { u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); tmp |= (1 << 0); writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); } mv_reset_pci_bus(pdev, mmio);}static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio){ writel(0x0fcfffff, mmio + MV_FLASH_CTL);}static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio){ void __iomem *phy_mmio = mv5_phy_base(mmio, idx); u32 tmp; tmp = readl(phy_mmio + MV5_PHY_MODE); hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */}static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio){ u32 tmp; writel(0, mmio + MV_GPIO_PORT_CTL); /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); tmp |= ~(1 << 0); writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);}static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port){ void __iomem *phy_mmio = mv5_phy_base(mmio, port); const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); u32 tmp; int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); if (fix_apm_sq) { tmp = readl(phy_mmio + MV5_LT_MODE); tmp |= (1 << 19); writel(tmp, phy_mmio + MV5_LT_MODE); tmp = readl(phy_mmio + MV5_PHY_CTL); tmp &= ~0x3; tmp |= 0x1; writel(tmp, phy_mmio + MV5_PHY_CTL); } tmp = readl(phy_mmio + MV5_PHY_MODE); tmp &= ~mask; tmp |= hpriv->signal[port].pre; tmp |= hpriv->signal[port].amps; writel(tmp, phy_mmio + MV5_PHY_MODE);}#undef ZERO#define ZERO(reg) writel(0, port_mmio + (reg))static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port){ void __iomem *port_mmio = mv_port_base(mmio, port); writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); mv_channel_reset(hpriv, mmio, port); ZERO(0x028); /* command */ writel(0x11f, port_mmio + EDMA_CFG_OFS); ZERO(0x004); /* timer */ ZERO(0x008); /* irq err cause */ ZERO(0x00c); /* irq err mask */ ZERO(0x010); /* rq bah */ ZERO(0x014); /* rq inp */ ZERO(0x018); /* rq outp */ ZERO(0x01c); /* respq bah */ ZERO(0x024); /* respq outp */ ZERO(0x020); /* respq inp */ ZERO(0x02c); /* test control */ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);}#undef ZERO#define ZERO(reg) writel(0, hc_mmio + (reg))static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int hc){ void __iomem *hc_mmio = mv_hc_base(mmio, hc); u32 tmp; ZERO(0x00c); ZERO(0x010); ZERO(0x014); ZERO(0x018); tmp = readl(hc_mmio + 0x20); tmp &= 0x1c1c1c1c; tmp |= 0x03030303; writel(tmp, hc_mmio + 0x20);}#undef ZEROstatic int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc){ unsigned int hc, port; for (hc = 0; hc < n_hc; hc++) { for (port = 0; port < MV_PORTS_PER_HC; port++) mv5_reset_hc_port(hpriv, mmio, (hc * MV_PORTS_PER_HC) + port); mv5_reset_one_hc(hpriv, mmio, hc); } return 0;}#undef ZERO#define ZERO(reg) writel(0, mmio + (reg))static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio){ struct ata_host *host = dev_get_drvdata(&pdev->dev); struct mv_host_priv *hpriv = host->private_data; u32 tmp; tmp = readl(mmio + MV_PCI_MODE); tmp &= 0xff00ffff;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -