⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sata_nv.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	.qc_issue		= ata_qc_issue_prot,	.freeze			= nv_nf2_freeze,	.thaw			= nv_nf2_thaw,	.error_handler		= nv_error_handler,	.post_internal_cmd	= ata_bmdma_post_internal_cmd,	.data_xfer		= ata_data_xfer,	.irq_clear		= ata_bmdma_irq_clear,	.irq_on			= ata_irq_on,	.scr_read		= nv_scr_read,	.scr_write		= nv_scr_write,	.port_start		= ata_port_start,};static const struct ata_port_operations nv_ck804_ops = {	.tf_load		= ata_tf_load,	.tf_read		= ata_tf_read,	.exec_command		= ata_exec_command,	.check_status		= ata_check_status,	.dev_select		= ata_std_dev_select,	.bmdma_setup		= ata_bmdma_setup,	.bmdma_start		= ata_bmdma_start,	.bmdma_stop		= ata_bmdma_stop,	.bmdma_status		= ata_bmdma_status,	.qc_prep		= ata_qc_prep,	.qc_issue		= ata_qc_issue_prot,	.freeze			= nv_ck804_freeze,	.thaw			= nv_ck804_thaw,	.error_handler		= nv_error_handler,	.post_internal_cmd	= ata_bmdma_post_internal_cmd,	.data_xfer		= ata_data_xfer,	.irq_clear		= ata_bmdma_irq_clear,	.irq_on			= ata_irq_on,	.scr_read		= nv_scr_read,	.scr_write		= nv_scr_write,	.port_start		= ata_port_start,	.host_stop		= nv_ck804_host_stop,};static const struct ata_port_operations nv_adma_ops = {	.tf_load		= ata_tf_load,	.tf_read		= nv_adma_tf_read,	.check_atapi_dma	= nv_adma_check_atapi_dma,	.exec_command		= ata_exec_command,	.check_status		= ata_check_status,	.dev_select		= ata_std_dev_select,	.bmdma_setup		= ata_bmdma_setup,	.bmdma_start		= ata_bmdma_start,	.bmdma_stop		= ata_bmdma_stop,	.bmdma_status		= ata_bmdma_status,	.qc_defer		= ata_std_qc_defer,	.qc_prep		= nv_adma_qc_prep,	.qc_issue		= nv_adma_qc_issue,	.freeze			= nv_adma_freeze,	.thaw			= nv_adma_thaw,	.error_handler		= nv_adma_error_handler,	.post_internal_cmd	= nv_adma_post_internal_cmd,	.data_xfer		= ata_data_xfer,	.irq_clear		= nv_adma_irq_clear,	.irq_on			= ata_irq_on,	.scr_read		= nv_scr_read,	.scr_write		= nv_scr_write,	.port_start		= nv_adma_port_start,	.port_stop		= nv_adma_port_stop,#ifdef CONFIG_PM	.port_suspend		= nv_adma_port_suspend,	.port_resume		= nv_adma_port_resume,#endif	.host_stop		= nv_adma_host_stop,};static const struct ata_port_operations nv_swncq_ops = {	.tf_load		= ata_tf_load,	.tf_read		= ata_tf_read,	.exec_command		= ata_exec_command,	.check_status		= ata_check_status,	.dev_select		= ata_std_dev_select,	.bmdma_setup		= ata_bmdma_setup,	.bmdma_start		= ata_bmdma_start,	.bmdma_stop		= ata_bmdma_stop,	.bmdma_status		= ata_bmdma_status,	.qc_defer		= ata_std_qc_defer,	.qc_prep		= nv_swncq_qc_prep,	.qc_issue		= nv_swncq_qc_issue,	.freeze			= nv_mcp55_freeze,	.thaw			= nv_mcp55_thaw,	.error_handler		= nv_swncq_error_handler,	.post_internal_cmd	= ata_bmdma_post_internal_cmd,	.data_xfer		= ata_data_xfer,	.irq_clear		= ata_bmdma_irq_clear,	.irq_on			= ata_irq_on,	.scr_read		= nv_scr_read,	.scr_write		= nv_scr_write,#ifdef CONFIG_PM	.port_suspend		= nv_swncq_port_suspend,	.port_resume		= nv_swncq_port_resume,#endif	.port_start		= nv_swncq_port_start,};static const struct ata_port_info nv_port_info[] = {	/* generic */	{		.sht		= &nv_sht,		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,		.pio_mask	= NV_PIO_MASK,		.mwdma_mask	= NV_MWDMA_MASK,		.udma_mask	= NV_UDMA_MASK,		.port_ops	= &nv_generic_ops,		.irq_handler	= nv_generic_interrupt,	},	/* nforce2/3 */	{		.sht		= &nv_sht,		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,		.pio_mask	= NV_PIO_MASK,		.mwdma_mask	= NV_MWDMA_MASK,		.udma_mask	= NV_UDMA_MASK,		.port_ops	= &nv_nf2_ops,		.irq_handler	= nv_nf2_interrupt,	},	/* ck804 */	{		.sht		= &nv_sht,		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,		.pio_mask	= NV_PIO_MASK,		.mwdma_mask	= NV_MWDMA_MASK,		.udma_mask	= NV_UDMA_MASK,		.port_ops	= &nv_ck804_ops,		.irq_handler	= nv_ck804_interrupt,	},	/* ADMA */	{		.sht		= &nv_adma_sht,		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,		.pio_mask	= NV_PIO_MASK,		.mwdma_mask	= NV_MWDMA_MASK,		.udma_mask	= NV_UDMA_MASK,		.port_ops	= &nv_adma_ops,		.irq_handler	= nv_adma_interrupt,	},	/* SWNCQ */	{		.sht		= &nv_swncq_sht,		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |				  ATA_FLAG_NCQ,		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,		.pio_mask	= NV_PIO_MASK,		.mwdma_mask	= NV_MWDMA_MASK,		.udma_mask	= NV_UDMA_MASK,		.port_ops	= &nv_swncq_ops,		.irq_handler	= nv_swncq_interrupt,	},};MODULE_AUTHOR("NVIDIA");MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");MODULE_LICENSE("GPL");MODULE_DEVICE_TABLE(pci, nv_pci_tbl);MODULE_VERSION(DRV_VERSION);static int adma_enabled = 1;static int swncq_enabled;static void nv_adma_register_mode(struct ata_port *ap){	struct nv_adma_port_priv *pp = ap->private_data;	void __iomem *mmio = pp->ctl_block;	u16 tmp, status;	int count = 0;	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)		return;	status = readw(mmio + NV_ADMA_STAT);	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {		ndelay(50);		status = readw(mmio + NV_ADMA_STAT);		count++;	}	if (count == 20)		ata_port_printk(ap, KERN_WARNING,			"timeout waiting for ADMA IDLE, stat=0x%hx\n",			status);	tmp = readw(mmio + NV_ADMA_CTL);	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);	count = 0;	status = readw(mmio + NV_ADMA_STAT);	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {		ndelay(50);		status = readw(mmio + NV_ADMA_STAT);		count++;	}	if (count == 20)		ata_port_printk(ap, KERN_WARNING,			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",			 status);	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;}static void nv_adma_mode(struct ata_port *ap){	struct nv_adma_port_priv *pp = ap->private_data;	void __iomem *mmio = pp->ctl_block;	u16 tmp, status;	int count = 0;	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))		return;	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);	tmp = readw(mmio + NV_ADMA_CTL);	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);	status = readw(mmio + NV_ADMA_STAT);	while (((status & NV_ADMA_STAT_LEGACY) ||	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {		ndelay(50);		status = readw(mmio + NV_ADMA_STAT);		count++;	}	if (count == 20)		ata_port_printk(ap, KERN_WARNING,			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",			status);	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;}static int nv_adma_slave_config(struct scsi_device *sdev){	struct ata_port *ap = ata_shost_to_port(sdev->host);	struct nv_adma_port_priv *pp = ap->private_data;	struct pci_dev *pdev = to_pci_dev(ap->host->dev);	u64 bounce_limit;	unsigned long segment_boundary;	unsigned short sg_tablesize;	int rc;	int adma_enable;	u32 current_reg, new_reg, config_mask;	rc = ata_scsi_slave_config(sdev);	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)		/* Not a proper libata device, ignore */		return rc;	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {		/*		 * NVIDIA reports that ADMA mode does not support ATAPI commands.		 * Therefore ATAPI commands are sent through the legacy interface.		 * However, the legacy interface only supports 32-bit DMA.		 * Restrict DMA parameters as required by the legacy interface		 * when an ATAPI device is connected.		 */		bounce_limit = ATA_DMA_MASK;		segment_boundary = ATA_DMA_BOUNDARY;		/* Subtract 1 since an extra entry may be needed for padding, see		   libata-scsi.c */		sg_tablesize = LIBATA_MAX_PRD - 1;		/* Since the legacy DMA engine is in use, we need to disable ADMA		   on the port. */		adma_enable = 0;		nv_adma_register_mode(ap);	} else {		bounce_limit = *ap->dev->dma_mask;		segment_boundary = NV_ADMA_DMA_BOUNDARY;		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;		adma_enable = 1;	}	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);	if (ap->port_no == 1)		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;	else		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;	if (adma_enable) {		new_reg = current_reg | config_mask;		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;	} else {		new_reg = current_reg & ~config_mask;		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;	}	if (current_reg != new_reg)		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);	blk_queue_bounce_limit(sdev->request_queue, bounce_limit);	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);	ata_port_printk(ap, KERN_INFO,		"bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",		(unsigned long long)bounce_limit, segment_boundary, sg_tablesize);	return rc;}static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc){	struct nv_adma_port_priv *pp = qc->ap->private_data;	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);}static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf){	/* Other than when internal or pass-through commands are executed,	   the only time this function will be called in ADMA mode will be	   if a command fails. In the failure case we don't care about going	   into register mode with ADMA commands pending, as the commands will	   all shortly be aborted anyway. We assume that NCQ commands are not	   issued via passthrough, which is the only way that switching into	   ADMA mode could abort outstanding commands. */	nv_adma_register_mode(ap);	ata_tf_read(ap, tf);}static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb){	unsigned int idx = 0;	if (tf->flags & ATA_TFLAG_ISADDR) {		if (tf->flags & ATA_TFLAG_LBA48) {			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);		} else			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);	}	if (tf->flags & ATA_TFLAG_DEVICE)		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);	while (idx < 12)		cpb[idx++] = cpu_to_le16(IGN);	return idx;}static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err){	struct nv_adma_port_priv *pp = ap->private_data;	u8 flags = pp->cpb[cpb_num].resp_flags;	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);	if (unlikely((force_err ||		     flags & (NV_CPB_RESP_ATA_ERR |			      NV_CPB_RESP_CMD_ERR |			      NV_CPB_RESP_CPB_ERR)))) {		struct ata_eh_info *ehi = &ap->link.eh_info;		int freeze = 0;		ata_ehi_clear_desc(ehi);		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);		if (flags & NV_CPB_RESP_ATA_ERR) {			ata_ehi_push_desc(ehi, "ATA error");			ehi->err_mask |= AC_ERR_DEV;		} else if (flags & NV_CPB_RESP_CMD_ERR) {			ata_ehi_push_desc(ehi, "CMD error");			ehi->err_mask |= AC_ERR_DEV;		} else if (flags & NV_CPB_RESP_CPB_ERR) {			ata_ehi_push_desc(ehi, "CPB error");			ehi->err_mask |= AC_ERR_SYSTEM;			freeze = 1;		} else {			/* notifier error, but no error in CPB flags? */			ata_ehi_push_desc(ehi, "unknown");			ehi->err_mask |= AC_ERR_OTHER;			freeze = 1;		}		/* Kill all commands. EH will determine what actually failed. */		if (freeze)			ata_port_freeze(ap);		else			ata_port_abort(ap);		return 1;	}	if (likely(flags & NV_CPB_RESP_DONE)) {		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);		VPRINTK("CPB flags done, flags=0x%x\n", flags);		if (likely(qc)) {			DPRINTK("Completing qc from tag %d\n", cpb_num);			ata_qc_complete(qc);		} else {			struct ata_eh_info *ehi = &ap->link.eh_info;			/* Notifier bits set without a command may indicate the drive			   is misbehaving. Raise host state machine violation on this			   condition. */			ata_port_printk(ap, KERN_ERR,					"notifier for tag %d with no cmd?\n",					cpb_num);			ehi->err_mask |= AC_ERR_HSM;			ehi->action |= ATA_EH_SOFTRESET;			ata_port_freeze(ap);			return 1;		}	}	return 0;}static int nv_host_intr(struct ata_port *ap, u8 irq_stat){	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);	/* freeze if hotplugged */	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {		ata_port_freeze(ap);		return 1;	}	/* bail out if not our interrupt */	if (!(irq_stat & NV_INT_DEV))		return 0;	/* DEV interrupt w/ no active qc? */	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {		ata_check_status(ap);		return 1;	}	/* handle interrupt */	return ata_host_intr(ap, qc);}static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance){	struct ata_host *host = dev_instance;	int i, handled = 0;	u32 notifier_clears[2];	spin_lock(&host->lock);	for (i = 0; i < host->n_ports; i++) {		struct ata_port *ap = host->ports[i];		notifier_clears[i] = 0;		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {			struct nv_adma_port_priv *pp = ap->private_data;			void __iomem *mmio = pp->ctl_block;			u16 status;			u32 gen_ctl;			u32 notifier, notifier_error;			/* if ADMA is disabled, use standard ata interrupt handler */			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)					>> (NV_INT_PORT_SHIFT * i);				handled += nv_host_intr(ap, irq_stat);				continue;			}			/* if in ATA register mode, check for standard interrupts */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -