⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sata_mv.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		break;	case SCR_ACTIVE:		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */		break;	default:		ofs = 0xffffffffU;		break;	}	return ofs;}static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val){	unsigned int ofs = mv_scr_offset(sc_reg_in);	if (ofs != 0xffffffffU) {		*val = readl(mv_ap_base(ap) + ofs);		return 0;	} else		return -EINVAL;}static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val){	unsigned int ofs = mv_scr_offset(sc_reg_in);	if (ofs != 0xffffffffU) {		writelfl(val, mv_ap_base(ap) + ofs);		return 0;	} else		return -EINVAL;}static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,			void __iomem *port_mmio){	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);	/* set up non-NCQ EDMA configuration */	cfg &= ~(1 << 9);	/* disable eQue */	if (IS_GEN_I(hpriv)) {		cfg &= ~0x1f;		/* clear queue depth */		cfg |= (1 << 8);	/* enab config burst size mask */	}	else if (IS_GEN_II(hpriv)) {		cfg &= ~0x1f;		/* clear queue depth */		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */	}	else if (IS_GEN_IIE(hpriv)) {		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */		cfg |= (1 << 22);	/* enab 4-entry host queue cache */		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */		cfg |= (1 << 18);	/* enab early completion */		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */		cfg &= ~(EDMA_CFG_NCQ);	/* clear NCQ */	}	writelfl(cfg, port_mmio + EDMA_CFG_OFS);}/** *      mv_port_start - Port specific init/start routine. *      @ap: ATA channel to manipulate * *      Allocate and point to DMA memory, init port private memory, *      zero indices. * *      LOCKING: *      Inherited from caller. */static int mv_port_start(struct ata_port *ap){	struct device *dev = ap->host->dev;	struct mv_host_priv *hpriv = ap->host->private_data;	struct mv_port_priv *pp;	void __iomem *port_mmio = mv_ap_base(ap);	void *mem;	dma_addr_t mem_dma;	unsigned long flags;	int rc;	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);	if (!pp)		return -ENOMEM;	mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,				  GFP_KERNEL);	if (!mem)		return -ENOMEM;	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);	rc = ata_pad_alloc(ap, dev);	if (rc)		return rc;	/* First item in chunk of DMA memory:	 * 32-slot command request table (CRQB), 32 bytes each in size	 */	pp->crqb = mem;	pp->crqb_dma = mem_dma;	mem += MV_CRQB_Q_SZ;	mem_dma += MV_CRQB_Q_SZ;	/* Second item:	 * 32-slot command response table (CRPB), 8 bytes each in size	 */	pp->crpb = mem;	pp->crpb_dma = mem_dma;	mem += MV_CRPB_Q_SZ;	mem_dma += MV_CRPB_Q_SZ;	/* Third item:	 * Table of scatter-gather descriptors (ePRD), 16 bytes each	 */	pp->sg_tbl = mem;	pp->sg_tbl_dma = mem_dma;	spin_lock_irqsave(&ap->host->lock, flags);	mv_edma_cfg(ap, hpriv, port_mmio);	mv_set_edma_ptrs(port_mmio, hpriv, pp);	spin_unlock_irqrestore(&ap->host->lock, flags);	/* Don't turn on EDMA here...do it before DMA commands only.  Else	 * we'll be unable to send non-data, PIO, etc due to restricted access	 * to shadow regs.	 */	ap->private_data = pp;	return 0;}/** *      mv_port_stop - Port specific cleanup/stop routine. *      @ap: ATA channel to manipulate * *      Stop DMA, cleanup port memory. * *      LOCKING: *      This routine uses the host lock to protect the DMA stop. */static void mv_port_stop(struct ata_port *ap){	mv_stop_dma(ap);}/** *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries *      @qc: queued command whose SG list to source from * *      Populate the SG list and mark the last entry. * *      LOCKING: *      Inherited from caller. */static void mv_fill_sg(struct ata_queued_cmd *qc){	struct mv_port_priv *pp = qc->ap->private_data;	struct scatterlist *sg;	struct mv_sg *mv_sg, *last_sg = NULL;	mv_sg = pp->sg_tbl;	ata_for_each_sg(sg, qc) {		dma_addr_t addr = sg_dma_address(sg);		u32 sg_len = sg_dma_len(sg);		while (sg_len) {			u32 offset = addr & 0xffff;			u32 len = sg_len;			if ((offset + sg_len > 0x10000))				len = 0x10000 - offset;			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);			mv_sg->flags_size = cpu_to_le32(len & 0xffff);			sg_len -= len;			addr += len;			last_sg = mv_sg;			mv_sg++;		}	}	if (likely(last_sg))		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);}static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last){	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |		(last ? CRQB_CMD_LAST : 0);	*cmdw = cpu_to_le16(tmp);}/** *      mv_qc_prep - Host specific command preparation. *      @qc: queued command to prepare * *      This routine simply redirects to the general purpose routine *      if command is not DMA.  Else, it handles prep of the CRQB *      (command request block), does some sanity checking, and calls *      the SG load routine. * *      LOCKING: *      Inherited from caller. */static void mv_qc_prep(struct ata_queued_cmd *qc){	struct ata_port *ap = qc->ap;	struct mv_port_priv *pp = ap->private_data;	__le16 *cw;	struct ata_taskfile *tf;	u16 flags = 0;	unsigned in_index;	if (qc->tf.protocol != ATA_PROT_DMA)		return;	/* Fill in command request block	 */	if (!(qc->tf.flags & ATA_TFLAG_WRITE))		flags |= CRQB_FLAG_READ;	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);	flags |= qc->tag << CRQB_TAG_SHIFT;	flags |= qc->tag << CRQB_IOID_SHIFT;	/* 50xx appears to ignore this*/	/* get current queue index from software */	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;	pp->crqb[in_index].sg_addr =		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);	pp->crqb[in_index].sg_addr_hi =		cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);	cw = &pp->crqb[in_index].ata_cmd[0];	tf = &qc->tf;	/* Sadly, the CRQB cannot accomodate all registers--there are	 * only 11 bytes...so we must pick and choose required	 * registers based on the command.  So, we drop feature and	 * hob_feature for [RW] DMA commands, but they are needed for	 * NCQ.  NCQ will drop hob_nsect.	 */	switch (tf->command) {	case ATA_CMD_READ:	case ATA_CMD_READ_EXT:	case ATA_CMD_WRITE:	case ATA_CMD_WRITE_EXT:	case ATA_CMD_WRITE_FUA_EXT:		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);		break;#ifdef LIBATA_NCQ		/* FIXME: remove this line when NCQ added */	case ATA_CMD_FPDMA_READ:	case ATA_CMD_FPDMA_WRITE:		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);		break;#endif				/* FIXME: remove this line when NCQ added */	default:		/* The only other commands EDMA supports in non-queued and		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none		 * of which are defined/used by Linux.  If we get here, this		 * driver needs work.		 *		 * FIXME: modify libata to give qc_prep a return value and		 * return error here.		 */		BUG_ON(tf->command);		break;	}	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */	if (!(qc->flags & ATA_QCFLAG_DMAMAP))		return;	mv_fill_sg(qc);}/** *      mv_qc_prep_iie - Host specific command preparation. *      @qc: queued command to prepare * *      This routine simply redirects to the general purpose routine *      if command is not DMA.  Else, it handles prep of the CRQB *      (command request block), does some sanity checking, and calls *      the SG load routine. * *      LOCKING: *      Inherited from caller. */static void mv_qc_prep_iie(struct ata_queued_cmd *qc){	struct ata_port *ap = qc->ap;	struct mv_port_priv *pp = ap->private_data;	struct mv_crqb_iie *crqb;	struct ata_taskfile *tf;	unsigned in_index;	u32 flags = 0;	if (qc->tf.protocol != ATA_PROT_DMA)		return;	/* Fill in Gen IIE command request block	 */	if (!(qc->tf.flags & ATA_TFLAG_WRITE))		flags |= CRQB_FLAG_READ;	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);	flags |= qc->tag << CRQB_TAG_SHIFT;	flags |= qc->tag << CRQB_IOID_SHIFT;	/* "I/O Id" is -really-						   what we use as our tag */	/* get current queue index from software */	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);	crqb->flags = cpu_to_le32(flags);	tf = &qc->tf;	crqb->ata_cmd[0] = cpu_to_le32(			(tf->command << 16) |			(tf->feature << 24)		);	crqb->ata_cmd[1] = cpu_to_le32(			(tf->lbal << 0) |			(tf->lbam << 8) |			(tf->lbah << 16) |			(tf->device << 24)		);	crqb->ata_cmd[2] = cpu_to_le32(			(tf->hob_lbal << 0) |			(tf->hob_lbam << 8) |			(tf->hob_lbah << 16) |			(tf->hob_feature << 24)		);	crqb->ata_cmd[3] = cpu_to_le32(			(tf->nsect << 0) |			(tf->hob_nsect << 8)		);	if (!(qc->flags & ATA_QCFLAG_DMAMAP))		return;	mv_fill_sg(qc);}/** *      mv_qc_issue - Initiate a command to the host *      @qc: queued command to start * *      This routine simply redirects to the general purpose routine *      if command is not DMA.  Else, it sanity checks our local *      caches of the request producer/consumer indices then enables *      DMA and bumps the request producer index. * *      LOCKING: *      Inherited from caller. */static unsigned int mv_qc_issue(struct ata_queued_cmd *qc){	struct ata_port *ap = qc->ap;	void __iomem *port_mmio = mv_ap_base(ap);	struct mv_port_priv *pp = ap->private_data;	struct mv_host_priv *hpriv = ap->host->private_data;	u32 in_index;	if (qc->tf.protocol != ATA_PROT_DMA) {		/* We're about to send a non-EDMA capable command to the		 * port.  Turn off EDMA so there won't be problems accessing		 * shadow block, etc registers.		 */		__mv_stop_dma(ap);		return ata_qc_issue_prot(qc);	}	mv_start_dma(port_mmio, hpriv, pp);	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;	/* until we do queuing, the queue should be empty at this point */	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));	pp->req_idx++;	in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;	/* and write the request in pointer to kick the EDMA to life */	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);	return 0;}/** *      mv_err_intr - Handle error interrupts on the port *      @ap: ATA channel to manipulate *      @reset_allowed: bool: 0 == don't trigger from reset here * *      In most cases, just clear the interrupt and move on.  However, *      some cases require an eDMA reset, which is done right before *      the COMRESET in mv_phy_reset().  The SERR case requires a *      clear of pending errors in the SATA SERROR register.  Finally, *      if the port disabled DMA, update our cached copy to match. * *      LOCKING: *      Inherited from caller. */static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc){	void __iomem *port_mmio = mv_ap_base(ap);	u32 edma_err_cause, eh_freeze_mask, serr = 0;	struct mv_port_priv *pp = ap->private_data;	struct mv_host_priv *hpriv = ap->host->private_data;	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);	unsigned int action = 0, err_mask = 0;	struct ata_eh_info *ehi = &ap->link.eh_info;	ata_ehi_clear_desc(ehi);	if (!edma_enabled) {		/* just a guess: do we need to do this? should we		 * expand this, and do it in all cases?		 */		sata_scr_read(&ap->link, SCR_ERROR, &serr);		sata_scr_write_flush(&ap->link, SCR_ERROR, serr);	}	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);	ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);	/*	 * all generations share these EDMA error cause bits	 */	if (edma_err_cause & EDMA_ERR_DEV)		err_mask |= AC_ERR_DEV;	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |			EDMA_ERR_INTRL_PAR)) {		err_mask |= AC_ERR_ATA_BUS;		action |= ATA_EH_HARDRESET;		ata_ehi_push_desc(ehi, "parity error");	}	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {		ata_ehi_hotplugged(ehi);		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?			"dev disconnect" : "dev connect");	}	if (IS_GEN_I(hpriv)) {		eh_freeze_mask = EDMA_EH_FREEZE_5;		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {			struct mv_port_priv *pp	= ap->private_data;			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;			ata_ehi_push_desc(ehi, "EDMA self-disable");		}	} else {		eh_freeze_mask = EDMA_EH_FREEZE;		if (edma_err_cause & EDMA_ERR_SELF_DIS) {			struct mv_port_priv *pp	= ap->private_data;			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;			ata_ehi_push_desc(ehi, "EDMA self-disable");		}		if (edma_err_cause & EDMA_ERR_SERR) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -