📄 sata_nv.c
字号:
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN, .cmd_per_lun = ATA_SHT_CMD_PER_LUN, .emulated = ATA_SHT_EMULATED, .use_clustering = ATA_SHT_USE_CLUSTERING, .proc_name = DRV_NAME, .dma_boundary = NV_ADMA_DMA_BOUNDARY, .slave_configure = nv_adma_slave_config, .slave_destroy = ata_scsi_slave_destroy, .bios_param = ata_std_bios_param, .dump_sanity_check = ata_scsi_dump_sanity_check, .dump_quiesce = ata_scsi_dump_quiesce, .dump_poll = ata_scsi_dump_poll,};static const struct ata_port_operations nv_generic_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .exec_command = ata_exec_command, .check_status = ata_check_status, .dev_select = ata_std_dev_select, .bmdma_setup = ata_bmdma_setup, .bmdma_start = ata_bmdma_start, .bmdma_stop = ata_bmdma_stop, .bmdma_status = ata_bmdma_status, .qc_prep = ata_qc_prep, .qc_issue = nv_qc_issue, .freeze = ata_bmdma_freeze, .thaw = ata_bmdma_thaw, .error_handler = nv_error_handler, .post_internal_cmd = ata_bmdma_post_internal_cmd, .data_xfer = ata_pio_data_xfer, .irq_handler = nv_generic_interrupt, .irq_clear = ata_bmdma_irq_clear, .scr_read = nv_scr_read, .scr_write = nv_scr_write, .port_start = nv_port_start, .port_stop = nv_port_stop, .host_stop = nv_host_stop,};static const struct ata_port_operations nv_nf2_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .exec_command = ata_exec_command, .check_status = ata_check_status, .dev_select = ata_std_dev_select, .bmdma_setup = ata_bmdma_setup, .bmdma_start = ata_bmdma_start, .bmdma_stop = ata_bmdma_stop, .bmdma_status = ata_bmdma_status, .qc_prep = ata_qc_prep, .qc_issue = nv_qc_issue, .freeze = nv_nf2_freeze, .thaw = nv_nf2_thaw, .error_handler = nv_error_handler, .post_internal_cmd = ata_bmdma_post_internal_cmd, .data_xfer = ata_pio_data_xfer, .irq_handler = nv_nf2_interrupt, .irq_clear = ata_bmdma_irq_clear, .scr_read = nv_scr_read, .scr_write = nv_scr_write, .port_start = nv_port_start, .port_stop = nv_port_stop, .host_stop = nv_host_stop,};static const struct ata_port_operations nv_ck804_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .exec_command = ata_exec_command, .check_status = ata_check_status, .dev_select = ata_std_dev_select, .bmdma_setup = ata_bmdma_setup, .bmdma_start = ata_bmdma_start, .bmdma_stop = ata_bmdma_stop, .bmdma_status = ata_bmdma_status, .qc_prep = ata_qc_prep, .qc_issue = nv_qc_issue, .freeze = nv_ck804_freeze, .thaw = nv_ck804_thaw, .error_handler = nv_error_handler, .post_internal_cmd = ata_bmdma_post_internal_cmd, .data_xfer = ata_pio_data_xfer, .irq_handler = nv_ck804_interrupt, .irq_clear = ata_bmdma_irq_clear, .scr_read = nv_scr_read, .scr_write = nv_scr_write, .port_start = nv_port_start, .port_stop = nv_port_stop, .host_stop = nv_ck804_host_stop,};static const struct ata_port_operations nv_adma_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .check_atapi_dma = nv_adma_check_atapi_dma, .exec_command = ata_exec_command, .check_status = ata_check_status, .dev_select = ata_std_dev_select, .bmdma_setup = nv_adma_bmdma_setup, .bmdma_start = nv_adma_bmdma_start, .bmdma_stop = nv_adma_bmdma_stop, .bmdma_status = nv_adma_bmdma_status, .qc_prep = nv_adma_qc_prep, .qc_issue = nv_adma_qc_issue, .freeze = nv_ck804_freeze, .thaw = nv_ck804_thaw, .error_handler = nv_adma_error_handler, .post_internal_cmd = nv_adma_bmdma_stop, .data_xfer = ata_mmio_data_xfer, .irq_handler = nv_adma_interrupt, .irq_clear = nv_adma_irq_clear, .scr_read = nv_scr_read, .scr_write = nv_scr_write, .port_start = nv_adma_port_start, .port_stop = nv_adma_port_stop, .host_stop = nv_adma_host_stop,};static struct ata_port_info nv_port_info[] = { /* generic */ { .sht = &nv_sht, .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_HRST_TO_RESUME, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_generic_ops, }, /* nforce2/3 */ { .sht = &nv_sht, .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_HRST_TO_RESUME, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_nf2_ops, }, /* ck804 */ { .sht = &nv_sht, .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_HRST_TO_RESUME, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_ck804_ops, }, /* ADMA */ { .sht = &nv_adma_sht, .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | ATA_FLAG_NCQ, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_adma_ops, },};MODULE_AUTHOR("NVIDIA");MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");MODULE_LICENSE("GPL");MODULE_DEVICE_TABLE(pci, nv_pci_tbl);MODULE_VERSION(DRV_VERSION);static int adma_enabled = 1;static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio, unsigned int port_no){ mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE; return mmio;}static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap){ return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);}static inline void __iomem *nv_adma_gen_block(struct ata_port *ap){ return (ap->host->mmio_base + NV_ADMA_GEN);}static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap){ return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));}static void nv_adma_register_mode(struct ata_port *ap){ void __iomem *mmio = nv_adma_ctl_block(ap); struct nv_adma_port_priv *pp = ap->private_data; u16 tmp; if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) return; tmp = readw(mmio + NV_ADMA_CTL); writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); pp->flags |= NV_ADMA_PORT_REGISTER_MODE;}static void nv_adma_mode(struct ata_port *ap){ void __iomem *mmio = nv_adma_ctl_block(ap); struct nv_adma_port_priv *pp = ap->private_data; u16 tmp; if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) return; WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); tmp = readw(mmio + NV_ADMA_CTL); writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;}static int nv_adma_slave_config(struct scsi_device *sdev){ struct ata_port *ap = ata_shost_to_port(sdev->host); struct nv_adma_port_priv *pp = ap->private_data; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u64 bounce_limit; unsigned long segment_boundary; unsigned short sg_tablesize; int rc; int adma_enable; u32 current_reg, new_reg, config_mask; rc = ata_scsi_slave_config(sdev); if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) /* Not a proper libata device, ignore */ return rc; if (ap->device[sdev->id].class == ATA_DEV_ATAPI) { /* * NVIDIA reports that ADMA mode does not support ATAPI commands. * Therefore ATAPI commands are sent through the legacy interface. * However, the legacy interface only supports 32-bit DMA. * Restrict DMA parameters as required by the legacy interface * when an ATAPI device is connected. */ bounce_limit = ATA_DMA_MASK; segment_boundary = ATA_DMA_BOUNDARY; /* Subtract 1 since an extra entry may be needed for padding, see libata-scsi.c */ sg_tablesize = LIBATA_MAX_PRD - 1; /* Since the legacy DMA engine is in use, we need to disable ADMA on the port. */ adma_enable = 0; nv_adma_register_mode(ap); } else { bounce_limit = *ap->dev->dma_mask; segment_boundary = NV_ADMA_DMA_BOUNDARY; sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; adma_enable = 1; } pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg); if(ap->port_no == 1) config_mask = NV_MCP_SATA_CFG_20_PORT1_EN | NV_MCP_SATA_CFG_20_PORT1_PWB_EN; else config_mask = NV_MCP_SATA_CFG_20_PORT0_EN | NV_MCP_SATA_CFG_20_PORT0_PWB_EN; if(adma_enable) { new_reg = current_reg | config_mask; pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; } else { new_reg = current_reg & ~config_mask; pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; } if(current_reg != new_reg) pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); blk_queue_bounce_limit(sdev->request_queue, bounce_limit); blk_queue_segment_boundary(sdev->request_queue, segment_boundary); blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); ata_port_printk(ap, KERN_INFO, "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n", (unsigned long long)bounce_limit, segment_boundary, sg_tablesize); return rc;}static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc){ struct nv_adma_port_priv *pp = qc->ap->private_data; return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);}static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb){ unsigned int idx = 0; cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB); if ((tf->flags & ATA_TFLAG_LBA48) == 0) { cpb[idx++] = cpu_to_le16(IGN); cpb[idx++] = cpu_to_le16(IGN); cpb[idx++] = cpu_to_le16(IGN); cpb[idx++] = cpu_to_le16(IGN); cpb[idx++] = cpu_to_le16(IGN); } else { cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature); cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal); cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam); cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah); } cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature); cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect); cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal); cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam); cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); return idx;}static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err){ struct nv_adma_port_priv *pp = ap->private_data; int complete = 0, have_err = 0; u8 flags = pp->cpb[cpb_num].resp_flags; VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags); if (flags & NV_CPB_RESP_DONE) { VPRINTK("CPB flags done, flags=0x%x\n", flags); complete = 1; } if (flags & NV_CPB_RESP_ATA_ERR) { ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags); have_err = 1; complete = 1; } if (flags & NV_CPB_RESP_CMD_ERR) { ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags); have_err = 1; complete = 1; } if (flags & NV_CPB_RESP_CPB_ERR) { ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags); have_err = 1; complete = 1; } if(complete || force_err) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); if(likely(qc)) { u8 ata_status = 0; /* Only use the ATA port status for non-NCQ commands. For NCQ commands the current status may have nothing to do with the command just completed. */ if(qc->tf.protocol != ATA_PROT_NCQ) ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4)); if(have_err || force_err) ata_status |= ATA_ERR; qc->err_mask |= ac_err_mask(ata_status); DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num, qc->err_mask); ata_qc_complete(qc); } }}static int nv_host_intr(struct ata_port *ap, u8 irq_stat){ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); int handled; /* freeze if hotplugged */ if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) { ata_port_freeze(ap); return 1; } /* bail out if not our interrupt */ if (!(irq_stat & NV_INT_DEV)) return 0; /* DEV interrupt w/ no active qc? */ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { ata_check_status(ap); return 1; } /* handle interrupt */ handled = ata_host_intr(ap, qc); if (unlikely(!handled)) { /* spurious, clear it */ ata_check_status(ap); } return 1;}static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs){ struct ata_host *host = dev_instance; int i, handled = 0; u32 notifier_clears[2]; spin_lock(&host->lock); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; notifier_clears[i] = 0; if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = nv_adma_ctl_block(ap); u16 status; u32 gen_ctl; int have_global_err = 0; u32 notifier, notifier_error; /* if in ATA register mode, use standard ata interrupt handler */ if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804) >> (NV_INT_PORT_SHIFT * i); handled += nv_host_intr(ap, irq_stat); continue; } notifier = readl(mmio + NV_ADMA_NOTIFIER); notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); notifier_clears[i] = notifier | notifier_error; gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL); if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && !notifier_error) /* Nothing to do */ continue; status = readw(mmio + NV_ADMA_STAT); /* Clear status. Ensure the controller sees the clearing before we start looking at any of the CPB statuses, so that any CPB completions after this point in the handler will raise another interrupt. */ writew(status, mmio + NV_ADMA_STAT); readw(mmio + NV_ADMA_STAT); /* flush posted write */ rmb(); /* freeze if hotplugged */ if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) { ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n"); ata_port_freeze(ap); handled++; continue; } if (status & NV_ADMA_STAT_TIMEOUT) { ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status); have_global_err = 1; } if (status & NV_ADMA_STAT_CPBERR) { ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status); have_global_err = 1; } if ((status & NV_ADMA_STAT_DONE) || have_global_err) { /** Check CPBs for completed commands */ if(ata_tag_valid(ap->active_tag)) /* Non-NCQ command */ nv_adma_check_cpb(ap, ap->active_tag, have_global_err || (notifier_error & (1 << ap->active_tag))); else { int pos; u32 active = ap->sactive; while( (pos = ffs(active)) ) { pos--; nv_adma_check_cpb(ap, pos, have_global_err || (notifier_error & (1 << pos)) ); active &= ~(1 << pos ); } } } handled++; /* irq handled if we got here */ } } if(notifier_clears[0] || notifier_clears[1]) { /* Note: Both notifier clear registers must be written if either is set, even if one is zero, according to NVIDIA. */ writel(notifier_clears[0], nv_adma_notifier_clear_block(host->ports[0])); writel(notifier_clears[1], nv_adma_notifier_clear_block(host->ports[1])); } spin_unlock(&host->lock); return IRQ_RETVAL(handled);}static void nv_adma_irq_clear(struct ata_port *ap)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -