📄 sata_nv.c
字号:
/* We can't handle result taskfile with NCQ commands, since retrieving the taskfile switches us out of ADMA mode and would abort existing commands. */ if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && (qc->flags & ATA_QCFLAG_RESULT_TF))) { ata_dev_printk(qc->dev, KERN_ERR, "NCQ w/ RESULT_TF not allowed\n"); return AC_ERR_SYSTEM; } if (nv_adma_use_reg_mode(qc)) { /* use ATA register mode */ VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); return ata_qc_issue_prot(qc); } else nv_adma_mode(qc->ap); /* write append register, command tag in lower 8 bits and (number of cpbs to append -1) in top 8 bits */ wmb(); if (curr_ncq != pp->last_issue_ncq) { /* Seems to need some delay before switching between NCQ and non-NCQ commands, else we get command timeouts and such. */ udelay(20); pp->last_issue_ncq = curr_ncq; } writew(qc->tag, mmio + NV_ADMA_APPEND); DPRINTK("Issued tag %u\n", qc->tag); return 0;}static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance){ struct ata_host *host = dev_instance; unsigned int i; unsigned int handled = 0; unsigned long flags; spin_lock_irqsave(&host->lock, flags); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap; ap = host->ports[i]; if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) handled += ata_host_intr(ap, qc); else // No request pending? Clear interrupt status // anyway, in case there's one pending. ap->ops->check_status(ap); } } spin_unlock_irqrestore(&host->lock, flags); return IRQ_RETVAL(handled);}static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat){ int i, handled = 0; for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ap && !(ap->flags & ATA_FLAG_DISABLED)) handled += nv_host_intr(ap, irq_stat); irq_stat >>= NV_INT_PORT_SHIFT; } return IRQ_RETVAL(handled);}static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance){ struct ata_host *host = dev_instance; u8 irq_stat; irqreturn_t ret; spin_lock(&host->lock); irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); ret = nv_do_interrupt(host, irq_stat); spin_unlock(&host->lock); return ret;}static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance){ struct ata_host *host = dev_instance; u8 irq_stat; irqreturn_t ret; spin_lock(&host->lock); irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); ret = nv_do_interrupt(host, irq_stat); spin_unlock(&host->lock); return ret;}static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val){ if (sc_reg > SCR_CONTROL) return -EINVAL; *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); return 0;}static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val){ if (sc_reg > SCR_CONTROL) return -EINVAL; iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); return 0;}static void nv_nf2_freeze(struct ata_port *ap){ void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; mask = ioread8(scr_addr + NV_INT_ENABLE); mask &= ~(NV_INT_ALL << shift); iowrite8(mask, scr_addr + NV_INT_ENABLE);}static void nv_nf2_thaw(struct ata_port *ap){ void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS); mask = ioread8(scr_addr + NV_INT_ENABLE); mask |= (NV_INT_MASK << shift); iowrite8(mask, scr_addr + NV_INT_ENABLE);}static void nv_ck804_freeze(struct ata_port *ap){ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; mask = readb(mmio_base + NV_INT_ENABLE_CK804); mask &= ~(NV_INT_ALL << shift); writeb(mask, mmio_base + NV_INT_ENABLE_CK804);}static void nv_ck804_thaw(struct ata_port *ap){ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804); mask = readb(mmio_base + NV_INT_ENABLE_CK804); mask |= (NV_INT_MASK << shift); writeb(mask, mmio_base + NV_INT_ENABLE_CK804);}static void nv_mcp55_freeze(struct ata_port *ap){ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; u32 mask; writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55); mask = readl(mmio_base + NV_INT_ENABLE_MCP55); mask &= ~(NV_INT_ALL_MCP55 << shift); writel(mask, mmio_base + NV_INT_ENABLE_MCP55); ata_bmdma_freeze(ap);}static void nv_mcp55_thaw(struct ata_port *ap){ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; u32 mask; writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55); mask = readl(mmio_base + NV_INT_ENABLE_MCP55); mask |= (NV_INT_MASK_MCP55 << shift); writel(mask, mmio_base + NV_INT_ENABLE_MCP55); ata_bmdma_thaw(ap);}static int nv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline){ unsigned int dummy; /* SATA hardreset fails to retrieve proper device signature on * some controllers. Don't classify on hardreset. For more * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352 */ return sata_std_hardreset(link, &dummy, deadline);}static void nv_error_handler(struct ata_port *ap){ ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, nv_hardreset, ata_std_postreset);}static void nv_adma_error_handler(struct ata_port *ap){ struct nv_adma_port_priv *pp = ap->private_data; if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { void __iomem *mmio = pp->ctl_block; int i; u16 tmp; if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); u32 status = readw(mmio + NV_ADMA_STAT); u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT); u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX); ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X " "notifier_error 0x%X gen_ctl 0x%X status 0x%X " "next cpb count 0x%X next cpb idx 0x%x\n", notifier, notifier_error, gen_ctl, status, cpb_count, next_cpb_idx); for (i = 0; i < NV_ADMA_MAX_CPBS; i++) { struct nv_adma_cpb *cpb = &pp->cpb[i]; if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || ap->link.sactive & (1 << i)) ata_port_printk(ap, KERN_ERR, "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n", i, cpb->ctl_flags, cpb->resp_flags); } } /* Push us back into port register mode for error handling. */ nv_adma_register_mode(ap); /* Mark all of the CPBs as invalid to prevent them from being executed */ for (i = 0; i < NV_ADMA_MAX_CPBS; i++) pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; /* clear CPB fetch count */ writew(0, mmio + NV_ADMA_CPB_COUNT); /* Reset channel */ tmp = readw(mmio + NV_ADMA_CTL); writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw(mmio + NV_ADMA_CTL); /* flush posted write */ udelay(1); writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw(mmio + NV_ADMA_CTL); /* flush posted write */ } ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, nv_hardreset, ata_std_postreset);}static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc){ struct nv_swncq_port_priv *pp = ap->private_data; struct defer_queue *dq = &pp->defer_queue; /* queue is full */ WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE); dq->defer_bits |= (1 << qc->tag); dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;}static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap){ struct nv_swncq_port_priv *pp = ap->private_data; struct defer_queue *dq = &pp->defer_queue; unsigned int tag; if (dq->head == dq->tail) /* null queue */ return NULL; tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)]; dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON; WARN_ON(!(dq->defer_bits & (1 << tag))); dq->defer_bits &= ~(1 << tag); return ata_qc_from_tag(ap, tag);}static void nv_swncq_fis_reinit(struct ata_port *ap){ struct nv_swncq_port_priv *pp = ap->private_data; pp->dhfis_bits = 0; pp->dmafis_bits = 0; pp->sdbfis_bits = 0; pp->ncq_flags = 0;}static void nv_swncq_pp_reinit(struct ata_port *ap){ struct nv_swncq_port_priv *pp = ap->private_data; struct defer_queue *dq = &pp->defer_queue; dq->head = 0; dq->tail = 0; dq->defer_bits = 0; pp->qc_active = 0; pp->last_issue_tag = ATA_TAG_POISON; nv_swncq_fis_reinit(ap);}static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis){ struct nv_swncq_port_priv *pp = ap->private_data; writew(fis, pp->irq_block);}static void __ata_bmdma_stop(struct ata_port *ap){ struct ata_queued_cmd qc; qc.ap = ap; ata_bmdma_stop(&qc);}static void nv_swncq_ncq_stop(struct ata_port *ap){ struct nv_swncq_port_priv *pp = ap->private_data; unsigned int i; u32 sactive; u32 done_mask; ata_port_printk(ap, KERN_ERR, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n", ap->qc_active, ap->link.sactive); ata_port_printk(ap, KERN_ERR, "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n " "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n", pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag, pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits); ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n", ap->ops->check_status(ap), ioread8(ap->ioaddr.error_addr)); sactive = readl(pp->sactive_block); done_mask = pp->qc_active ^ sactive; ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n"); for (i = 0; i < ATA_MAX_QUEUE; i++) { u8 err = 0; if (pp->qc_active & (1 << i)) err = 0; else if (done_mask & (1 << i)) err = 1; else continue; ata_port_printk(ap, KERN_ERR, "tag 0x%x: %01x %01x %01x %01x %s\n", i, (pp->dhfis_bits >> i) & 0x1, (pp->dmafis_bits >> i) & 0x1, (pp->sdbfis_bits >> i) & 0x1, (sactive >> i) & 0x1, (err ? "error! tag doesn't exit" : " ")); } nv_swncq_pp_reinit(ap); ap->ops->irq_clear(ap); __ata_bmdma_stop(ap); nv_swncq_irq_clear(ap, 0xffff);}static void nv_swncq_error_handler(struct ata_port *ap){ struct ata_eh_context *ehc = &ap->link.eh_context; if (ap->link.sactive) { nv_swncq_ncq_stop(ap); ehc->i.action |= ATA_EH_HARDRESET; } ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, nv_hardreset, ata_std_postreset);}#ifdef CONFIG_PMstatic int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg){ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; u32 tmp; /* clear irq */ writel(~0, mmio + NV_INT_STATUS_MCP55); /* disable irq */ writel(0, mmio + NV_INT_ENABLE_MCP55); /* disable swncq */ tmp = readl(mmio + NV_CTL_MCP55); tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ); writel(tmp, mmio + NV_CTL_MCP55); return 0;}static int nv_swncq_port_resume(struct ata_port *ap){ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; u32 tmp; /* clear irq */ writel(~0, mmio + NV_INT_STATUS_MCP55); /* enable irq */ writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55); /* enable swncq */ tmp = readl(mmio + NV_CTL_MCP55); writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55); return 0;}#endifstatic void nv_swncq_host_init(struct ata_host *host){ u32 tmp; void __iomem *mmio = host->iomap[NV_MMIO_BAR]; struct pci_dev *pdev = to_pci_dev(host->dev); u8 regval; /* disable ECO 398 */ pci_read_config_byte(pdev, 0x7f, ®val); regval &= ~(1 << 7); pci_write_config_byte(pdev, 0x7f, regval); /* enable swncq */ tmp = readl(mmio + NV_CTL_MCP55); VPRINTK("HOST_CTL:0x%X\n", tmp); writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55); /* enable irq intr */ tmp = readl(mmio + NV_INT_ENABLE_MCP55); VPRINTK("HOST_ENABLE:0x%X\n", tmp); writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55); /* clear port irq */ writel(~0x0, mmio + NV_INT_STATUS_MCP55);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -