⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sata_mv.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	.slave_destroy		= ata_scsi_slave_destroy,	.bios_param		= ata_std_bios_param,};static const struct ata_port_operations mv5_ops = {	.tf_load		= ata_tf_load,	.tf_read		= ata_tf_read,	.check_status		= ata_check_status,	.exec_command		= ata_exec_command,	.dev_select		= ata_std_dev_select,	.cable_detect		= ata_cable_sata,	.qc_prep		= mv_qc_prep,	.qc_issue		= mv_qc_issue,	.data_xfer		= ata_data_xfer,	.irq_clear		= mv_irq_clear,	.irq_on			= ata_irq_on,	.error_handler		= mv_error_handler,	.post_internal_cmd	= mv_post_int_cmd,	.freeze			= mv_eh_freeze,	.thaw			= mv_eh_thaw,	.scr_read		= mv5_scr_read,	.scr_write		= mv5_scr_write,	.port_start		= mv_port_start,	.port_stop		= mv_port_stop,};static const struct ata_port_operations mv6_ops = {	.tf_load		= ata_tf_load,	.tf_read		= ata_tf_read,	.check_status		= ata_check_status,	.exec_command		= ata_exec_command,	.dev_select		= ata_std_dev_select,	.cable_detect		= ata_cable_sata,	.qc_prep		= mv_qc_prep,	.qc_issue		= mv_qc_issue,	.data_xfer		= ata_data_xfer,	.irq_clear		= mv_irq_clear,	.irq_on			= ata_irq_on,	.error_handler		= mv_error_handler,	.post_internal_cmd	= mv_post_int_cmd,	.freeze			= mv_eh_freeze,	.thaw			= mv_eh_thaw,	.scr_read		= mv_scr_read,	.scr_write		= mv_scr_write,	.port_start		= mv_port_start,	.port_stop		= mv_port_stop,};static const struct ata_port_operations mv_iie_ops = {	.tf_load		= ata_tf_load,	.tf_read		= ata_tf_read,	.check_status		= ata_check_status,	.exec_command		= ata_exec_command,	.dev_select		= ata_std_dev_select,	.cable_detect		= ata_cable_sata,	.qc_prep		= mv_qc_prep_iie,	.qc_issue		= mv_qc_issue,	.data_xfer		= ata_data_xfer,	.irq_clear		= mv_irq_clear,	.irq_on			= ata_irq_on,	.error_handler		= mv_error_handler,	.post_internal_cmd	= mv_post_int_cmd,	.freeze			= mv_eh_freeze,	.thaw			= mv_eh_thaw,	.scr_read		= mv_scr_read,	.scr_write		= mv_scr_write,	.port_start		= mv_port_start,	.port_stop		= mv_port_stop,};static const struct ata_port_info mv_port_info[] = {	{  /* chip_504x */		.flags		= MV_COMMON_FLAGS,		.pio_mask	= 0x1f,	/* pio0-4 */		.udma_mask	= ATA_UDMA6,		.port_ops	= &mv5_ops,	},	{  /* chip_508x */		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,		.pio_mask	= 0x1f,	/* pio0-4 */		.udma_mask	= ATA_UDMA6,		.port_ops	= &mv5_ops,	},	{  /* chip_5080 */		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,		.pio_mask	= 0x1f,	/* pio0-4 */		.udma_mask	= ATA_UDMA6,		.port_ops	= &mv5_ops,	},	{  /* chip_604x */		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,		.pio_mask	= 0x1f,	/* pio0-4 */		.udma_mask	= ATA_UDMA6,		.port_ops	= &mv6_ops,	},	{  /* chip_608x */		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |				  MV_FLAG_DUAL_HC,		.pio_mask	= 0x1f,	/* pio0-4 */		.udma_mask	= ATA_UDMA6,		.port_ops	= &mv6_ops,	},	{  /* chip_6042 */		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,		.pio_mask	= 0x1f,	/* pio0-4 */		.udma_mask	= ATA_UDMA6,		.port_ops	= &mv_iie_ops,	},	{  /* chip_7042 */		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,		.pio_mask	= 0x1f,	/* pio0-4 */		.udma_mask	= ATA_UDMA6,		.port_ops	= &mv_iie_ops,	},};static const struct pci_device_id mv_pci_tbl[] = {	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },	/* RocketRAID 1740/174x have different identifiers */	{ PCI_VDEVICE(TTI, 0x1740), chip_508x },	{ PCI_VDEVICE(TTI, 0x1742), chip_508x },	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },	/* Adaptec 1430SA */	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },	/* Marvell 7042 support */	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },	/* Highpoint RocketRAID PCIe series */	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },	{ }			/* terminate list */};static struct pci_driver mv_pci_driver = {	.name			= DRV_NAME,	.id_table		= mv_pci_tbl,	.probe			= mv_init_one,	.remove			= ata_pci_remove_one,};static const struct mv_hw_ops mv5xxx_ops = {	.phy_errata		= mv5_phy_errata,	.enable_leds		= mv5_enable_leds,	.read_preamp		= mv5_read_preamp,	.reset_hc		= mv5_reset_hc,	.reset_flash		= mv5_reset_flash,	.reset_bus		= mv5_reset_bus,};static const struct mv_hw_ops mv6xxx_ops = {	.phy_errata		= mv6_phy_errata,	.enable_leds		= mv6_enable_leds,	.read_preamp		= mv6_read_preamp,	.reset_hc		= mv6_reset_hc,	.reset_flash		= mv6_reset_flash,	.reset_bus		= mv_reset_pci_bus,};/* * module options */static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero *//* move to PCI layer or libata core? */static int pci_go_64(struct pci_dev *pdev){	int rc;	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);		if (rc) {			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);			if (rc) {				dev_printk(KERN_ERR, &pdev->dev,					   "64-bit DMA enable failed\n");				return rc;			}		}	} else {		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);		if (rc) {			dev_printk(KERN_ERR, &pdev->dev,				   "32-bit DMA enable failed\n");			return rc;		}		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);		if (rc) {			dev_printk(KERN_ERR, &pdev->dev,				   "32-bit consistent DMA enable failed\n");			return rc;		}	}	return rc;}/* * Functions */static inline void writelfl(unsigned long data, void __iomem *addr){	writel(data, addr);	(void) readl(addr);	/* flush to avoid PCI posted write */}static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc){	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));}static inline unsigned int mv_hc_from_port(unsigned int port){	return port >> MV_PORT_HC_SHIFT;}static inline unsigned int mv_hardport_from_port(unsigned int port){	return port & MV_PORT_MASK;}static inline void __iomem *mv_hc_base_from_port(void __iomem *base,						 unsigned int port){	return mv_hc_base(base, mv_hc_from_port(port));}static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port){	return  mv_hc_base_from_port(base, port) +		MV_SATAHC_ARBTR_REG_SZ +		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);}static inline void __iomem *mv_ap_base(struct ata_port *ap){	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);}static inline int mv_get_hc_count(unsigned long port_flags){	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);}static void mv_irq_clear(struct ata_port *ap){}static void mv_set_edma_ptrs(void __iomem *port_mmio,			     struct mv_host_priv *hpriv,			     struct mv_port_priv *pp){	u32 index;	/*	 * initialize request queue	 */	index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;	WARN_ON(pp->crqb_dma & 0x3ff);	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)		writelfl((pp->crqb_dma & 0xffffffff) | index,			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);	else		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);	/*	 * initialize response queue	 */	index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;	WARN_ON(pp->crpb_dma & 0xff);	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)		writelfl((pp->crpb_dma & 0xffffffff) | index,			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);	else		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);}/** *      mv_start_dma - Enable eDMA engine *      @base: port base address *      @pp: port private data * *      Verify the local cache of the eDMA state is accurate with a *      WARN_ON. * *      LOCKING: *      Inherited from caller. */static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,			 struct mv_port_priv *pp){	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {		/* clear EDMA event indicators, if any */		writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);		mv_set_edma_ptrs(base, hpriv, pp);		writelfl(EDMA_EN, base + EDMA_CMD_OFS);		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;	}	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));}/** *      __mv_stop_dma - Disable eDMA engine *      @ap: ATA channel to manipulate * *      Verify the local cache of the eDMA state is accurate with a *      WARN_ON. * *      LOCKING: *      Inherited from caller. */static int __mv_stop_dma(struct ata_port *ap){	void __iomem *port_mmio = mv_ap_base(ap);	struct mv_port_priv *pp	= ap->private_data;	u32 reg;	int i, err = 0;	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {		/* Disable EDMA if active.   The disable bit auto clears.		 */		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;	} else {		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));	}	/* now properly wait for the eDMA to stop */	for (i = 1000; i > 0; i--) {		reg = readl(port_mmio + EDMA_CMD_OFS);		if (!(reg & EDMA_EN))			break;		udelay(100);	}	if (reg & EDMA_EN) {		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");		err = -EIO;	}	return err;}static int mv_stop_dma(struct ata_port *ap){	unsigned long flags;	int rc;	spin_lock_irqsave(&ap->host->lock, flags);	rc = __mv_stop_dma(ap);	spin_unlock_irqrestore(&ap->host->lock, flags);	return rc;}#ifdef ATA_DEBUGstatic void mv_dump_mem(void __iomem *start, unsigned bytes){	int b, w;	for (b = 0; b < bytes; ) {		DPRINTK("%p: ", start + b);		for (w = 0; b < bytes && w < 4; w++) {			printk("%08x ", readl(start + b));			b += sizeof(u32);		}		printk("\n");	}}#endifstatic void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes){#ifdef ATA_DEBUG	int b, w;	u32 dw;	for (b = 0; b < bytes; ) {		DPRINTK("%02x: ", b);		for (w = 0; b < bytes && w < 4; w++) {			(void) pci_read_config_dword(pdev, b, &dw);			printk("%08x ", dw);			b += sizeof(u32);		}		printk("\n");	}#endif}static void mv_dump_all_regs(void __iomem *mmio_base, int port,			     struct pci_dev *pdev){#ifdef ATA_DEBUG	void __iomem *hc_base = mv_hc_base(mmio_base,					   port >> MV_PORT_HC_SHIFT);	void __iomem *port_base;	int start_port, num_ports, p, start_hc, num_hcs, hc;	if (0 > port) {		start_hc = start_port = 0;		num_ports = 8;		/* shld be benign for 4 port devs */		num_hcs = 2;	} else {		start_hc = port >> MV_PORT_HC_SHIFT;		start_port = port;		num_ports = num_hcs = 1;	}	DPRINTK("All registers for port(s) %u-%u:\n", start_port,		num_ports > 1 ? num_ports - 1 : start_port);	if (NULL != pdev) {		DPRINTK("PCI config space regs:\n");		mv_dump_pci_cfg(pdev, 0x68);	}	DPRINTK("PCI regs:\n");	mv_dump_mem(mmio_base+0xc00, 0x3c);	mv_dump_mem(mmio_base+0xd00, 0x34);	mv_dump_mem(mmio_base+0xf00, 0x4);	mv_dump_mem(mmio_base+0x1d00, 0x6c);	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {		hc_base = mv_hc_base(mmio_base, hc);		DPRINTK("HC regs (HC %i):\n", hc);		mv_dump_mem(hc_base, 0x1c);	}	for (p = start_port; p < start_port + num_ports; p++) {		port_base = mv_port_base(mmio_base, p);		DPRINTK("EDMA regs (port %i):\n", p);		mv_dump_mem(port_base, 0x54);		DPRINTK("SATA regs (port %i):\n", p);		mv_dump_mem(port_base+0x300, 0x60);	}#endif}static unsigned int mv_scr_offset(unsigned int sc_reg_in){	unsigned int ofs;	switch (sc_reg_in) {	case SCR_STATUS:	case SCR_CONTROL:	case SCR_ERROR:		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -