📄 sata_nv.c
字号:
} } if(notifier_clears[0] || notifier_clears[1]) { /* Note: Both notifier clear registers must be written if either is set, even if one is zero, according to NVIDIA. */ struct nv_adma_port_priv *pp = host->ports[0]->private_data; writel(notifier_clears[0], pp->notifier_clear_block); pp = host->ports[1]->private_data; writel(notifier_clears[1], pp->notifier_clear_block); } spin_unlock(&host->lock); return IRQ_RETVAL(handled);}static void nv_adma_irq_clear(struct ata_port *ap){ struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 status = readw(mmio + NV_ADMA_STAT); u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; /* clear ADMA status */ writew(status, mmio + NV_ADMA_STAT); writel(notifier | notifier_error, pp->notifier_clear_block); /** clear legacy status */ iowrite8(ioread8(dma_stat_addr), dma_stat_addr);}static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc){ struct nv_adma_port_priv *pp = qc->ap->private_data; if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) ata_bmdma_post_internal_cmd(qc);}static int nv_adma_port_start(struct ata_port *ap){ struct device *dev = ap->host->dev; struct nv_adma_port_priv *pp; int rc; void *mem; dma_addr_t mem_dma; void __iomem *mmio; u16 tmp; VPRINTK("ENTER\n"); rc = ata_port_start(ap); if (rc) return rc; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; pp->ctl_block = mmio; pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; pp->notifier_clear_block = pp->gen_block + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); if (!mem) return -ENOMEM; memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ); /* * First item in chunk of DMA memory: * 128-byte command parameter block (CPB) * one for each command tag */ pp->cpb = mem; pp->cpb_dma = mem_dma; writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; /* * Second item: block of ADMA_SGTBL_LEN s/g entries */ pp->aprd = mem; pp->aprd_dma = mem_dma; ap->private_data = pp; /* clear any outstanding interrupt conditions */ writew(0xffff, mmio + NV_ADMA_STAT); /* initialize port variables */ pp->flags = NV_ADMA_PORT_REGISTER_MODE; /* clear CPB fetch count */ writew(0, mmio + NV_ADMA_CPB_COUNT); /* clear GO for register mode, enable interrupt */ tmp = readw(mmio + NV_ADMA_CTL); writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL); tmp = readw(mmio + NV_ADMA_CTL); writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw( mmio + NV_ADMA_CTL ); /* flush posted write */ udelay(1); writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw( mmio + NV_ADMA_CTL ); /* flush posted write */ return 0;}static void nv_adma_port_stop(struct ata_port *ap){ struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; VPRINTK("ENTER\n"); writew(0, mmio + NV_ADMA_CTL);}#ifdef CONFIG_PMstatic int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg){ struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; /* Go to register mode - clears GO */ nv_adma_register_mode(ap); /* clear CPB fetch count */ writew(0, mmio + NV_ADMA_CPB_COUNT); /* disable interrupt, shut down port */ writew(0, mmio + NV_ADMA_CTL); return 0;}static int nv_adma_port_resume(struct ata_port *ap){ struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 tmp; /* set CPB block location */ writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); /* clear any outstanding interrupt conditions */ writew(0xffff, mmio + NV_ADMA_STAT); /* initialize port variables */ pp->flags |= NV_ADMA_PORT_REGISTER_MODE; /* clear CPB fetch count */ writew(0, mmio + NV_ADMA_CPB_COUNT); /* clear GO for register mode, enable interrupt */ tmp = readw(mmio + NV_ADMA_CTL); writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL); tmp = readw(mmio + NV_ADMA_CTL); writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw( mmio + NV_ADMA_CTL ); /* flush posted write */ udelay(1); writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw( mmio + NV_ADMA_CTL ); /* flush posted write */ return 0;}#endifstatic void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port){ void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR]; struct ata_ioports *ioport = &probe_ent->port[port]; VPRINTK("ENTER\n"); mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE; ioport->cmd_addr = mmio; ioport->data_addr = mmio + (ATA_REG_DATA * 4); ioport->error_addr = ioport->feature_addr = mmio + (ATA_REG_ERR * 4); ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4); ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4); ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4); ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4); ioport->device_addr = mmio + (ATA_REG_DEVICE * 4); ioport->status_addr = ioport->command_addr = mmio + (ATA_REG_STATUS * 4); ioport->altstatus_addr = ioport->ctl_addr = mmio + 0x20;}static int nv_adma_host_init(struct ata_probe_ent *probe_ent){ struct pci_dev *pdev = to_pci_dev(probe_ent->dev); unsigned int i; u32 tmp32; VPRINTK("ENTER\n"); /* enable ADMA on the ports */ pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN | NV_MCP_SATA_CFG_20_PORT0_PWB_EN | NV_MCP_SATA_CFG_20_PORT1_EN | NV_MCP_SATA_CFG_20_PORT1_PWB_EN; pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); for (i = 0; i < probe_ent->n_ports; i++) nv_adma_setup_port(probe_ent, i); return 0;}static void nv_adma_fill_aprd(struct ata_queued_cmd *qc, struct scatterlist *sg, int idx, struct nv_adma_prd *aprd){ u8 flags = 0; if (qc->tf.flags & ATA_TFLAG_WRITE) flags |= NV_APRD_WRITE; if (idx == qc->n_elem - 1) flags |= NV_APRD_END; else if (idx != 4) flags |= NV_APRD_CONT; aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg))); aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */ aprd->flags = flags; aprd->packet_len = 0;}static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb){ struct nv_adma_port_priv *pp = qc->ap->private_data; unsigned int idx; struct nv_adma_prd *aprd; struct scatterlist *sg; VPRINTK("ENTER\n"); idx = 0; ata_for_each_sg(sg, qc) { aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)]; nv_adma_fill_aprd(qc, sg, idx, aprd); idx++; } if (idx > 5) cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); else cpb->next_aprd = cpu_to_le64(0);}static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc){ struct nv_adma_port_priv *pp = qc->ap->private_data; /* ADMA engine can only be used for non-ATAPI DMA commands, or interrupt-driven no-data commands, where a result taskfile is not required. */ if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || (qc->tf.flags & ATA_TFLAG_POLLING) || (qc->flags & ATA_QCFLAG_RESULT_TF)) return 1; if((qc->flags & ATA_QCFLAG_DMAMAP) || (qc->tf.protocol == ATA_PROT_NODATA)) return 0; return 1;}static void nv_adma_qc_prep(struct ata_queued_cmd *qc){ struct nv_adma_port_priv *pp = qc->ap->private_data; struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; u8 ctl_flags = NV_CPB_CTL_CPB_VALID | NV_CPB_CTL_IEN; if (nv_adma_use_reg_mode(qc)) { nv_adma_register_mode(qc->ap); ata_qc_prep(qc); return; } cpb->resp_flags = NV_CPB_RESP_DONE; wmb(); cpb->ctl_flags = 0; wmb(); cpb->len = 3; cpb->tag = qc->tag; cpb->next_cpb_idx = 0; /* turn on NCQ flags for NCQ commands */ if (qc->tf.protocol == ATA_PROT_NCQ) ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA; VPRINTK("qc->flags = 0x%lx\n", qc->flags); nv_adma_tf_to_cpb(&qc->tf, cpb->tf); if(qc->flags & ATA_QCFLAG_DMAMAP) { nv_adma_fill_sg(qc, cpb); ctl_flags |= NV_CPB_CTL_APRD_VALID; } else memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5); /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are finished filling in all of the contents */ wmb(); cpb->ctl_flags = ctl_flags; wmb(); cpb->resp_flags = 0;}static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc){ struct nv_adma_port_priv *pp = qc->ap->private_data; void __iomem *mmio = pp->ctl_block; int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); VPRINTK("ENTER\n"); if (nv_adma_use_reg_mode(qc)) { /* use ATA register mode */ VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); nv_adma_register_mode(qc->ap); return ata_qc_issue_prot(qc); } else nv_adma_mode(qc->ap); /* write append register, command tag in lower 8 bits and (number of cpbs to append -1) in top 8 bits */ wmb(); if(curr_ncq != pp->last_issue_ncq) { /* Seems to need some delay before switching between NCQ and non-NCQ commands, else we get command timeouts and such. */ udelay(20); pp->last_issue_ncq = curr_ncq; } writew(qc->tag, mmio + NV_ADMA_APPEND); DPRINTK("Issued tag %u\n",qc->tag); return 0;}static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance){ struct ata_host *host = dev_instance; unsigned int i; unsigned int handled = 0; unsigned long flags; spin_lock_irqsave(&host->lock, flags); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap; ap = host->ports[i]; if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) handled += ata_host_intr(ap, qc); else // No request pending? Clear interrupt status // anyway, in case there's one pending. ap->ops->check_status(ap); } } spin_unlock_irqrestore(&host->lock, flags); return IRQ_RETVAL(handled);}static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat){ int i, handled = 0; for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ap && !(ap->flags & ATA_FLAG_DISABLED)) handled += nv_host_intr(ap, irq_stat); irq_stat >>= NV_INT_PORT_SHIFT; } return IRQ_RETVAL(handled);}static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance){ struct ata_host *host = dev_instance; u8 irq_stat; irqreturn_t ret; spin_lock(&host->lock); irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); ret = nv_do_interrupt(host, irq_stat); spin_unlock(&host->lock); return ret;}static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance){ struct ata_host *host = dev_instance; u8 irq_stat; irqreturn_t ret; spin_lock(&host->lock); irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); ret = nv_do_interrupt(host, irq_stat); spin_unlock(&host->lock); return ret;}static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg){ if (sc_reg > SCR_CONTROL) return 0xffffffffU; return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));}static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val){ if (sc_reg > SCR_CONTROL) return; iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));}static void nv_nf2_freeze(struct ata_port *ap){ void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; mask = ioread8(scr_addr + NV_INT_ENABLE); mask &= ~(NV_INT_ALL << shift); iowrite8(mask, scr_addr + NV_INT_ENABLE);}static void nv_nf2_thaw(struct ata_port *ap){ void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS); mask = ioread8(scr_addr + NV_INT_ENABLE); mask |= (NV_INT_MASK << shift); iowrite8(mask, scr_addr + NV_INT_ENABLE);}static void nv_ck804_freeze(struct ata_port *ap){ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; mask = readb(mmio_base + NV_INT_ENABLE_CK804); mask &= ~(NV_INT_ALL << shift); writeb(mask, mmio_base + NV_INT_ENABLE_CK804);}static void nv_ck804_thaw(struct ata_port *ap){ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804); mask = readb(mmio_base + NV_INT_ENABLE_CK804); mask |= (NV_INT_MASK << shift); writeb(mask, mmio_base + NV_INT_ENABLE_CK804);}static int nv_hardreset(struct ata_port *ap, unsigned int *class){ unsigned int dummy; /* SATA hardreset fails to retrieve proper device signature on * some controllers. Don't classify on hardreset. For more * info, see http://bugme.osdl.org/show_bug.cgi?id=3352 */ return sata_std_hardreset(ap, &dummy);}static void nv_error_handler(struct ata_port *ap){ ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, nv_hardreset, ata_std_postreset);}static void nv_adma_error_handler(struct ata_port *ap){ struct nv_adma_port_priv *pp = ap->private_data; if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { void __iomem *mmio = pp->ctl_block; int i; u16 tmp; if(ata_tag_valid(ap->active_tag) || ap->sactive) { u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); u32 status = readw(mmio + NV_ADMA_STAT); u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT); u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX); ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X " "notifier_error 0x%X gen_ctl 0x%X status 0x%X " "next cpb count 0x%X next cpb idx 0x%x\n", notifier, notifier_error, gen_ctl, status, cpb_count, next_cpb_idx); for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -