⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sata_nv.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)					>> (NV_INT_PORT_SHIFT * i);				if (ata_tag_valid(ap->link.active_tag))					/** NV_INT_DEV indication seems unreliable at times					    at least in ADMA mode. Force it on always when a					    command is active, to prevent losing interrupts. */					irq_stat |= NV_INT_DEV;				handled += nv_host_intr(ap, irq_stat);			}			notifier = readl(mmio + NV_ADMA_NOTIFIER);			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);			notifier_clears[i] = notifier | notifier_error;			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);			if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&			    !notifier_error)				/* Nothing to do */				continue;			status = readw(mmio + NV_ADMA_STAT);			/* Clear status. Ensure the controller sees the clearing before we start			   looking at any of the CPB statuses, so that any CPB completions after			   this point in the handler will raise another interrupt. */			writew(status, mmio + NV_ADMA_STAT);			readw(mmio + NV_ADMA_STAT); /* flush posted write */			rmb();			handled++; /* irq handled if we got here */			/* freeze if hotplugged or controller error */			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |					       NV_ADMA_STAT_HOTUNPLUG |					       NV_ADMA_STAT_TIMEOUT |					       NV_ADMA_STAT_SERROR))) {				struct ata_eh_info *ehi = &ap->link.eh_info;				ata_ehi_clear_desc(ehi);				__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);				if (status & NV_ADMA_STAT_TIMEOUT) {					ehi->err_mask |= AC_ERR_SYSTEM;					ata_ehi_push_desc(ehi, "timeout");				} else if (status & NV_ADMA_STAT_HOTPLUG) {					ata_ehi_hotplugged(ehi);					ata_ehi_push_desc(ehi, "hotplug");				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {					ata_ehi_hotplugged(ehi);					ata_ehi_push_desc(ehi, "hot unplug");				} else if (status & NV_ADMA_STAT_SERROR) {					/* let libata analyze SError and figure out the cause */					ata_ehi_push_desc(ehi, "SError");				} else					ata_ehi_push_desc(ehi, "unknown");				ata_port_freeze(ap);				continue;			}			if (status & (NV_ADMA_STAT_DONE |				      NV_ADMA_STAT_CPBERR)) {				u32 check_commands;				int pos, error = 0;				if (ata_tag_valid(ap->link.active_tag))					check_commands = 1 << ap->link.active_tag;				else					check_commands = ap->link.sactive;				/** Check CPBs for completed commands */				while ((pos = ffs(check_commands)) && !error) {					pos--;					error = nv_adma_check_cpb(ap, pos,						notifier_error & (1 << pos));					check_commands &= ~(1 << pos);				}			}		}	}	if (notifier_clears[0] || notifier_clears[1]) {		/* Note: Both notifier clear registers must be written		   if either is set, even if one is zero, according to NVIDIA. */		struct nv_adma_port_priv *pp = host->ports[0]->private_data;		writel(notifier_clears[0], pp->notifier_clear_block);		pp = host->ports[1]->private_data;		writel(notifier_clears[1], pp->notifier_clear_block);	}	spin_unlock(&host->lock);	return IRQ_RETVAL(handled);}static void nv_adma_freeze(struct ata_port *ap){	struct nv_adma_port_priv *pp = ap->private_data;	void __iomem *mmio = pp->ctl_block;	u16 tmp;	nv_ck804_freeze(ap);	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)		return;	/* clear any outstanding CK804 notifications */	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);	/* Disable interrupt */	tmp = readw(mmio + NV_ADMA_CTL);	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),		mmio + NV_ADMA_CTL);	readw(mmio + NV_ADMA_CTL);	/* flush posted write */}static void nv_adma_thaw(struct ata_port *ap){	struct nv_adma_port_priv *pp = ap->private_data;	void __iomem *mmio = pp->ctl_block;	u16 tmp;	nv_ck804_thaw(ap);	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)		return;	/* Enable interrupt */	tmp = readw(mmio + NV_ADMA_CTL);	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),		mmio + NV_ADMA_CTL);	readw(mmio + NV_ADMA_CTL);	/* flush posted write */}static void nv_adma_irq_clear(struct ata_port *ap){	struct nv_adma_port_priv *pp = ap->private_data;	void __iomem *mmio = pp->ctl_block;	u32 notifier_clears[2];	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {		ata_bmdma_irq_clear(ap);		return;	}	/* clear any outstanding CK804 notifications */	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);	/* clear ADMA status */	writew(0xffff, mmio + NV_ADMA_STAT);	/* clear notifiers - note both ports need to be written with	   something even though we are only clearing on one */	if (ap->port_no == 0) {		notifier_clears[0] = 0xFFFFFFFF;		notifier_clears[1] = 0;	} else {		notifier_clears[0] = 0;		notifier_clears[1] = 0xFFFFFFFF;	}	pp = ap->host->ports[0]->private_data;	writel(notifier_clears[0], pp->notifier_clear_block);	pp = ap->host->ports[1]->private_data;	writel(notifier_clears[1], pp->notifier_clear_block);}static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc){	struct nv_adma_port_priv *pp = qc->ap->private_data;	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)		ata_bmdma_post_internal_cmd(qc);}static int nv_adma_port_start(struct ata_port *ap){	struct device *dev = ap->host->dev;	struct nv_adma_port_priv *pp;	int rc;	void *mem;	dma_addr_t mem_dma;	void __iomem *mmio;	u16 tmp;	VPRINTK("ENTER\n");	rc = ata_port_start(ap);	if (rc)		return rc;	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);	if (!pp)		return -ENOMEM;	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +	       ap->port_no * NV_ADMA_PORT_SIZE;	pp->ctl_block = mmio;	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;	pp->notifier_clear_block = pp->gen_block +	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,				  &mem_dma, GFP_KERNEL);	if (!mem)		return -ENOMEM;	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);	/*	 * First item in chunk of DMA memory:	 * 128-byte command parameter block (CPB)	 * one for each command tag	 */	pp->cpb     = mem;	pp->cpb_dma = mem_dma;	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;	/*	 * Second item: block of ADMA_SGTBL_LEN s/g entries	 */	pp->aprd = mem;	pp->aprd_dma = mem_dma;	ap->private_data = pp;	/* clear any outstanding interrupt conditions */	writew(0xffff, mmio + NV_ADMA_STAT);	/* initialize port variables */	pp->flags = NV_ADMA_PORT_REGISTER_MODE;	/* clear CPB fetch count */	writew(0, mmio + NV_ADMA_CPB_COUNT);	/* clear GO for register mode, enable interrupt */	tmp = readw(mmio + NV_ADMA_CTL);	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);	tmp = readw(mmio + NV_ADMA_CTL);	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);	readw(mmio + NV_ADMA_CTL);	/* flush posted write */	udelay(1);	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);	readw(mmio + NV_ADMA_CTL);	/* flush posted write */	return 0;}static void nv_adma_port_stop(struct ata_port *ap){	struct nv_adma_port_priv *pp = ap->private_data;	void __iomem *mmio = pp->ctl_block;	VPRINTK("ENTER\n");	writew(0, mmio + NV_ADMA_CTL);}#ifdef CONFIG_PMstatic int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg){	struct nv_adma_port_priv *pp = ap->private_data;	void __iomem *mmio = pp->ctl_block;	/* Go to register mode - clears GO */	nv_adma_register_mode(ap);	/* clear CPB fetch count */	writew(0, mmio + NV_ADMA_CPB_COUNT);	/* disable interrupt, shut down port */	writew(0, mmio + NV_ADMA_CTL);	return 0;}static int nv_adma_port_resume(struct ata_port *ap){	struct nv_adma_port_priv *pp = ap->private_data;	void __iomem *mmio = pp->ctl_block;	u16 tmp;	/* set CPB block location */	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);	/* clear any outstanding interrupt conditions */	writew(0xffff, mmio + NV_ADMA_STAT);	/* initialize port variables */	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;	/* clear CPB fetch count */	writew(0, mmio + NV_ADMA_CPB_COUNT);	/* clear GO for register mode, enable interrupt */	tmp = readw(mmio + NV_ADMA_CTL);	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);	tmp = readw(mmio + NV_ADMA_CTL);	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);	readw(mmio + NV_ADMA_CTL);	/* flush posted write */	udelay(1);	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);	readw(mmio + NV_ADMA_CTL);	/* flush posted write */	return 0;}#endifstatic void nv_adma_setup_port(struct ata_port *ap){	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];	struct ata_ioports *ioport = &ap->ioaddr;	VPRINTK("ENTER\n");	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;	ioport->cmd_addr	= mmio;	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);	ioport->error_addr	=	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);	ioport->status_addr	=	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);	ioport->altstatus_addr	=	ioport->ctl_addr	= mmio + 0x20;}static int nv_adma_host_init(struct ata_host *host){	struct pci_dev *pdev = to_pci_dev(host->dev);	unsigned int i;	u32 tmp32;	VPRINTK("ENTER\n");	/* enable ADMA on the ports */	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |		 NV_MCP_SATA_CFG_20_PORT1_EN |		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);	for (i = 0; i < host->n_ports; i++)		nv_adma_setup_port(host->ports[i]);	return 0;}static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,			      struct scatterlist *sg,			      int idx,			      struct nv_adma_prd *aprd){	u8 flags = 0;	if (qc->tf.flags & ATA_TFLAG_WRITE)		flags |= NV_APRD_WRITE;	if (idx == qc->n_elem - 1)		flags |= NV_APRD_END;	else if (idx != 4)		flags |= NV_APRD_CONT;	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */	aprd->flags = flags;	aprd->packet_len = 0;}static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb){	struct nv_adma_port_priv *pp = qc->ap->private_data;	unsigned int idx;	struct nv_adma_prd *aprd;	struct scatterlist *sg;	VPRINTK("ENTER\n");	idx = 0;	ata_for_each_sg(sg, qc) {		aprd = (idx < 5) ? &cpb->aprd[idx] :			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];		nv_adma_fill_aprd(qc, sg, idx, aprd);		idx++;	}	if (idx > 5)		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));	else		cpb->next_aprd = cpu_to_le64(0);}static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc){	struct nv_adma_port_priv *pp = qc->ap->private_data;	/* ADMA engine can only be used for non-ATAPI DMA commands,	   or interrupt-driven no-data commands. */	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||	   (qc->tf.flags & ATA_TFLAG_POLLING))		return 1;	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||	   (qc->tf.protocol == ATA_PROT_NODATA))		return 0;	return 1;}static void nv_adma_qc_prep(struct ata_queued_cmd *qc){	struct nv_adma_port_priv *pp = qc->ap->private_data;	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |		       NV_CPB_CTL_IEN;	if (nv_adma_use_reg_mode(qc)) {		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&			(qc->flags & ATA_QCFLAG_DMAMAP));		nv_adma_register_mode(qc->ap);		ata_qc_prep(qc);		return;	}	cpb->resp_flags = NV_CPB_RESP_DONE;	wmb();	cpb->ctl_flags = 0;	wmb();	cpb->len		= 3;	cpb->tag		= qc->tag;	cpb->next_cpb_idx	= 0;	/* turn on NCQ flags for NCQ commands */	if (qc->tf.protocol == ATA_PROT_NCQ)		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;	VPRINTK("qc->flags = 0x%lx\n", qc->flags);	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);	if (qc->flags & ATA_QCFLAG_DMAMAP) {		nv_adma_fill_sg(qc, cpb);		ctl_flags |= NV_CPB_CTL_APRD_VALID;	} else		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID	   until we are finished filling in all of the contents */	wmb();	cpb->ctl_flags = ctl_flags;	wmb();	cpb->resp_flags = 0;}static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc){	struct nv_adma_port_priv *pp = qc->ap->private_data;	void __iomem *mmio = pp->ctl_block;	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);	VPRINTK("ENTER\n");

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -