📄 sata_nv.c
字号:
static const struct ata_port_operations nv_generic_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .exec_command = ata_exec_command, .check_status = ata_check_status, .dev_select = ata_std_dev_select, .bmdma_setup = ata_bmdma_setup, .bmdma_start = ata_bmdma_start, .bmdma_stop = ata_bmdma_stop, .bmdma_status = ata_bmdma_status, .qc_prep = ata_qc_prep, .qc_issue = nv_qc_issue, .freeze = ata_bmdma_freeze, .thaw = ata_bmdma_thaw, .error_handler = nv_error_handler, .post_internal_cmd = ata_bmdma_post_internal_cmd, .data_xfer = ata_data_xfer, .irq_handler = nv_generic_interrupt, .irq_clear = ata_bmdma_irq_clear, .irq_on = ata_irq_on, .irq_ack = ata_irq_ack, .scr_read = nv_scr_read, .scr_write = nv_scr_write, .port_start = nv_port_start, .port_stop = nv_port_stop, .host_stop = nv_host_stop,};static const struct ata_port_operations nv_nf2_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .exec_command = ata_exec_command, .check_status = ata_check_status, .dev_select = ata_std_dev_select, .bmdma_setup = ata_bmdma_setup, .bmdma_start = ata_bmdma_start, .bmdma_stop = ata_bmdma_stop, .bmdma_status = ata_bmdma_status, .qc_prep = ata_qc_prep, .qc_issue = nv_qc_issue, .freeze = nv_nf2_freeze, .thaw = nv_nf2_thaw, .error_handler = nv_error_handler, .post_internal_cmd = ata_bmdma_post_internal_cmd, .data_xfer = ata_data_xfer, .irq_handler = nv_nf2_interrupt, .irq_clear = ata_bmdma_irq_clear, .irq_on = ata_irq_on, .irq_ack = ata_irq_ack, .scr_read = nv_scr_read, .scr_write = nv_scr_write, .port_start = nv_port_start, .port_stop = nv_port_stop, .host_stop = nv_host_stop,};static const struct ata_port_operations nv_ck804_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .exec_command = ata_exec_command, .check_status = ata_check_status, .dev_select = ata_std_dev_select, .bmdma_setup = ata_bmdma_setup, .bmdma_start = ata_bmdma_start, .bmdma_stop = ata_bmdma_stop, .bmdma_status = ata_bmdma_status, .qc_prep = ata_qc_prep, .qc_issue = nv_qc_issue, .freeze = nv_ck804_freeze, .thaw = nv_ck804_thaw, .error_handler = nv_error_handler, .post_internal_cmd = ata_bmdma_post_internal_cmd, .data_xfer = ata_data_xfer, .irq_handler = nv_ck804_interrupt, .irq_clear = ata_bmdma_irq_clear, .irq_on = ata_irq_on, .irq_ack = ata_irq_ack, .scr_read = nv_scr_read, .scr_write = nv_scr_write, .port_start = nv_port_start, .port_stop = nv_port_stop, .host_stop = nv_ck804_host_stop,};static const struct ata_port_operations nv_adma_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = nv_adma_tf_read, .check_atapi_dma = nv_adma_check_atapi_dma, .exec_command = ata_exec_command, .check_status = ata_check_status, .dev_select = ata_std_dev_select, .bmdma_setup = ata_bmdma_setup, .bmdma_start = ata_bmdma_start, .bmdma_stop = ata_bmdma_stop, .bmdma_status = ata_bmdma_status, .qc_prep = nv_adma_qc_prep, .qc_issue = nv_adma_qc_issue, .freeze = nv_ck804_freeze, .thaw = nv_ck804_thaw, .error_handler = nv_adma_error_handler, .post_internal_cmd = nv_adma_post_internal_cmd, .data_xfer = ata_data_xfer, .irq_handler = nv_adma_interrupt, .irq_clear = nv_adma_irq_clear, .irq_on = ata_irq_on, .irq_ack = ata_irq_ack, .scr_read = nv_scr_read, .scr_write = nv_scr_write, .port_start = nv_adma_port_start, .port_stop = nv_adma_port_stop,#ifdef CONFIG_PM .port_suspend = nv_adma_port_suspend, .port_resume = nv_adma_port_resume,#endif .host_stop = nv_adma_host_stop,};static struct ata_port_info nv_port_info[] = { /* generic */ { .sht = &nv_sht, .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_HRST_TO_RESUME, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_generic_ops, }, /* nforce2/3 */ { .sht = &nv_sht, .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_HRST_TO_RESUME, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_nf2_ops, }, /* ck804 */ { .sht = &nv_sht, .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_HRST_TO_RESUME, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_ck804_ops, }, /* ADMA */ { .sht = &nv_adma_sht, .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_HRST_TO_RESUME | ATA_FLAG_MMIO | ATA_FLAG_NCQ, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_adma_ops, },};MODULE_AUTHOR("NVIDIA");MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");MODULE_LICENSE("GPL");MODULE_DEVICE_TABLE(pci, nv_pci_tbl);MODULE_VERSION(DRV_VERSION);static int adma_enabled = 1;static void nv_adma_register_mode(struct ata_port *ap){ struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 tmp, status; int count = 0; if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) return; status = readw(mmio + NV_ADMA_STAT); while(!(status & NV_ADMA_STAT_IDLE) && count < 20) { ndelay(50); status = readw(mmio + NV_ADMA_STAT); count++; } if(count == 20) ata_port_printk(ap, KERN_WARNING, "timeout waiting for ADMA IDLE, stat=0x%hx\n", status); tmp = readw(mmio + NV_ADMA_CTL); writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); count = 0; status = readw(mmio + NV_ADMA_STAT); while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) { ndelay(50); status = readw(mmio + NV_ADMA_STAT); count++; } if(count == 20) ata_port_printk(ap, KERN_WARNING, "timeout waiting for ADMA LEGACY, stat=0x%hx\n", status); pp->flags |= NV_ADMA_PORT_REGISTER_MODE;}static void nv_adma_mode(struct ata_port *ap){ struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 tmp, status; int count = 0; if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) return; WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); tmp = readw(mmio + NV_ADMA_CTL); writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); status = readw(mmio + NV_ADMA_STAT); while(((status & NV_ADMA_STAT_LEGACY) || !(status & NV_ADMA_STAT_IDLE)) && count < 20) { ndelay(50); status = readw(mmio + NV_ADMA_STAT); count++; } if(count == 20) ata_port_printk(ap, KERN_WARNING, "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n", status); pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;}static int nv_adma_slave_config(struct scsi_device *sdev){ struct ata_port *ap = ata_shost_to_port(sdev->host); struct nv_adma_port_priv *pp = ap->private_data; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u64 bounce_limit; unsigned long segment_boundary; unsigned short sg_tablesize; int rc; int adma_enable; u32 current_reg, new_reg, config_mask; rc = ata_scsi_slave_config(sdev); if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) /* Not a proper libata device, ignore */ return rc; if (ap->device[sdev->id].class == ATA_DEV_ATAPI) { /* * NVIDIA reports that ADMA mode does not support ATAPI commands. * Therefore ATAPI commands are sent through the legacy interface. * However, the legacy interface only supports 32-bit DMA. * Restrict DMA parameters as required by the legacy interface * when an ATAPI device is connected. */ bounce_limit = ATA_DMA_MASK; segment_boundary = ATA_DMA_BOUNDARY; /* Subtract 1 since an extra entry may be needed for padding, see libata-scsi.c */ sg_tablesize = LIBATA_MAX_PRD - 1; /* Since the legacy DMA engine is in use, we need to disable ADMA on the port. */ adma_enable = 0; nv_adma_register_mode(ap); } else { bounce_limit = *ap->dev->dma_mask; segment_boundary = NV_ADMA_DMA_BOUNDARY; sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; adma_enable = 1; } pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg); if(ap->port_no == 1) config_mask = NV_MCP_SATA_CFG_20_PORT1_EN | NV_MCP_SATA_CFG_20_PORT1_PWB_EN; else config_mask = NV_MCP_SATA_CFG_20_PORT0_EN | NV_MCP_SATA_CFG_20_PORT0_PWB_EN; if(adma_enable) { new_reg = current_reg | config_mask; pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; } else { new_reg = current_reg & ~config_mask; pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; } if(current_reg != new_reg) pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); blk_queue_bounce_limit(sdev->request_queue, bounce_limit); blk_queue_segment_boundary(sdev->request_queue, segment_boundary); blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); ata_port_printk(ap, KERN_INFO, "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n", (unsigned long long)bounce_limit, segment_boundary, sg_tablesize); return rc;}static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc){ struct nv_adma_port_priv *pp = qc->ap->private_data; return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);}static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf){ /* Since commands where a result TF is requested are not executed in ADMA mode, the only time this function will be called in ADMA mode will be if a command fails. In this case we don't care about going into register mode with ADMA commands pending, as the commands will all shortly be aborted anyway. */ nv_adma_register_mode(ap); ata_tf_read(ap, tf);}static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb){ unsigned int idx = 0; if(tf->flags & ATA_TFLAG_ISADDR) { if (tf->flags & ATA_TFLAG_LBA48) { cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal); cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam); cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah); cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature); } else cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB); cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect); cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal); cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam); cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); } if(tf->flags & ATA_TFLAG_DEVICE) cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); while(idx < 12) cpb[idx++] = cpu_to_le16(IGN); return idx;}static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err){ struct nv_adma_port_priv *pp = ap->private_data; u8 flags = pp->cpb[cpb_num].resp_flags; VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags); if (unlikely((force_err || flags & (NV_CPB_RESP_ATA_ERR | NV_CPB_RESP_CMD_ERR | NV_CPB_RESP_CPB_ERR)))) { struct ata_eh_info *ehi = &ap->eh_info; int freeze = 0; ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags ); if (flags & NV_CPB_RESP_ATA_ERR) { ata_ehi_push_desc(ehi, ": ATA error"); ehi->err_mask |= AC_ERR_DEV; } else if (flags & NV_CPB_RESP_CMD_ERR) { ata_ehi_push_desc(ehi, ": CMD error"); ehi->err_mask |= AC_ERR_DEV; } else if (flags & NV_CPB_RESP_CPB_ERR) { ata_ehi_push_desc(ehi, ": CPB error"); ehi->err_mask |= AC_ERR_SYSTEM; freeze = 1; } else { /* notifier error, but no error in CPB flags? */ ehi->err_mask |= AC_ERR_OTHER; freeze = 1; } /* Kill all commands. EH will determine what actually failed. */ if (freeze) ata_port_freeze(ap); else ata_port_abort(ap); return 1; } if (likely(flags & NV_CPB_RESP_DONE)) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); VPRINTK("CPB flags done, flags=0x%x\n", flags); if (likely(qc)) { DPRINTK("Completing qc from tag %d\n",cpb_num); ata_qc_complete(qc); } else { struct ata_eh_info *ehi = &ap->eh_info; /* Notifier bits set without a command may indicate the drive is misbehaving. Raise host state machine violation on this condition. */ ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n", cpb_num); ehi->err_mask |= AC_ERR_HSM; ehi->action |= ATA_EH_SOFTRESET; ata_port_freeze(ap); return 1; } } return 0;}static int nv_host_intr(struct ata_port *ap, u8 irq_stat){ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); /* freeze if hotplugged */ if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) { ata_port_freeze(ap); return 1; } /* bail out if not our interrupt */ if (!(irq_stat & NV_INT_DEV)) return 0; /* DEV interrupt w/ no active qc? */ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { ata_check_status(ap); return 1; } /* handle interrupt */ return ata_host_intr(ap, qc);}static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance){ struct ata_host *host = dev_instance; int i, handled = 0; u32 notifier_clears[2]; spin_lock(&host->lock); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; notifier_clears[i] = 0; if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 status; u32 gen_ctl; u32 notifier, notifier_error; /* if in ATA register mode, use standard ata interrupt handler */ if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) >> (NV_INT_PORT_SHIFT * i); if(ata_tag_valid(ap->active_tag)) /** NV_INT_DEV indication seems unreliable at times at least in ADMA mode. Force it on always when a command is active, to prevent losing interrupts. */ irq_stat |= NV_INT_DEV; handled += nv_host_intr(ap, irq_stat); continue; } notifier = readl(mmio + NV_ADMA_NOTIFIER); notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); notifier_clears[i] = notifier | notifier_error; gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && !notifier_error) /* Nothing to do */ continue; status = readw(mmio + NV_ADMA_STAT); /* Clear status. Ensure the controller sees the clearing before we start looking at any of the CPB statuses, so that any CPB completions after this point in the handler will raise another interrupt. */ writew(status, mmio + NV_ADMA_STAT); readw(mmio + NV_ADMA_STAT); /* flush posted write */ rmb(); handled++; /* irq handled if we got here */ /* freeze if hotplugged or controller error */ if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG | NV_ADMA_STAT_TIMEOUT | NV_ADMA_STAT_SERROR))) { struct ata_eh_info *ehi = &ap->eh_info; ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status ); if (status & NV_ADMA_STAT_TIMEOUT) { ehi->err_mask |= AC_ERR_SYSTEM; ata_ehi_push_desc(ehi, ": timeout"); } else if (status & NV_ADMA_STAT_HOTPLUG) { ata_ehi_hotplugged(ehi); ata_ehi_push_desc(ehi, ": hotplug"); } else if (status & NV_ADMA_STAT_HOTUNPLUG) { ata_ehi_hotplugged(ehi); ata_ehi_push_desc(ehi, ": hot unplug"); } else if (status & NV_ADMA_STAT_SERROR) { /* let libata analyze SError and figure out the cause */ ata_ehi_push_desc(ehi, ": SError"); } ata_port_freeze(ap); continue; } if (status & (NV_ADMA_STAT_DONE | NV_ADMA_STAT_CPBERR)) { u32 check_commands; int pos, error = 0; if(ata_tag_valid(ap->active_tag)) check_commands = 1 << ap->active_tag; else check_commands = ap->sactive; /** Check CPBs for completed commands */ while ((pos = ffs(check_commands)) && !error) { pos--; error = nv_adma_check_cpb(ap, pos, notifier_error & (1 << pos) ); check_commands &= ~(1 << pos ); } }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -