⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sata_inic162x.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * sata_inic162x.c - Driver for Initio 162x SATA controllers * * Copyright 2006  SUSE Linux Products GmbH * Copyright 2006  Tejun Heo <teheo@novell.com> * * This file is released under GPL v2. * * This controller is eccentric and easily locks up if something isn't * right.  Documentation is available at initio's website but it only * documents registers (not programming model). * * - ATA disks work. * - Hotplug works. * - ATAPI read works but burning doesn't.  This thing is really *   peculiar about ATAPI and I couldn't figure out how ATAPI PIO and *   ATAPI DMA WRITE should be programmed.  If you've got a clue, be *   my guest. * - Both STR and STD work. */#include <linux/kernel.h>#include <linux/module.h>#include <linux/pci.h>#include <scsi/scsi_host.h>#include <linux/libata.h>#include <linux/blkdev.h>#include <scsi/scsi_device.h>#define DRV_NAME	"sata_inic162x"#define DRV_VERSION	"0.3"enum {	MMIO_BAR		= 5,	NR_PORTS		= 2,	HOST_CTL		= 0x7c,	HOST_STAT		= 0x7e,	HOST_IRQ_STAT		= 0xbc,	HOST_IRQ_MASK		= 0xbe,	PORT_SIZE		= 0x40,	/* registers for ATA TF operation */	PORT_TF			= 0x00,	PORT_ALT_STAT		= 0x08,	PORT_IRQ_STAT		= 0x09,	PORT_IRQ_MASK		= 0x0a,	PORT_PRD_CTL		= 0x0b,	PORT_PRD_ADDR		= 0x0c,	PORT_PRD_XFERLEN	= 0x10,	/* IDMA register */	PORT_IDMA_CTL		= 0x14,	PORT_SCR		= 0x20,	/* HOST_CTL bits */	HCTL_IRQOFF		= (1 << 8),  /* global IRQ off */	HCTL_PWRDWN		= (1 << 13), /* power down PHYs */	HCTL_SOFTRST		= (1 << 13), /* global reset (no phy reset) */	HCTL_RPGSEL		= (1 << 15), /* register page select */	HCTL_KNOWN_BITS		= HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST |				  HCTL_RPGSEL,	/* HOST_IRQ_(STAT|MASK) bits */	HIRQ_PORT0		= (1 << 0),	HIRQ_PORT1		= (1 << 1),	HIRQ_SOFT		= (1 << 14),	HIRQ_GLOBAL		= (1 << 15), /* STAT only */	/* PORT_IRQ_(STAT|MASK) bits */	PIRQ_OFFLINE		= (1 << 0),  /* device unplugged */	PIRQ_ONLINE		= (1 << 1),  /* device plugged */	PIRQ_COMPLETE		= (1 << 2),  /* completion interrupt */	PIRQ_FATAL		= (1 << 3),  /* fatal error */	PIRQ_ATA		= (1 << 4),  /* ATA interrupt */	PIRQ_REPLY		= (1 << 5),  /* reply FIFO not empty */	PIRQ_PENDING		= (1 << 7),  /* port IRQ pending (STAT only) */	PIRQ_ERR		= PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,	PIRQ_MASK_DMA_READ	= PIRQ_REPLY | PIRQ_ATA,	PIRQ_MASK_OTHER		= PIRQ_REPLY | PIRQ_COMPLETE,	PIRQ_MASK_FREEZE	= 0xff,	/* PORT_PRD_CTL bits */	PRD_CTL_START		= (1 << 0),	PRD_CTL_WR		= (1 << 3),	PRD_CTL_DMAEN		= (1 << 7),  /* DMA enable */	/* PORT_IDMA_CTL bits */	IDMA_CTL_RST_ATA	= (1 << 2),  /* hardreset ATA bus */	IDMA_CTL_RST_IDMA	= (1 << 5),  /* reset IDMA machinary */	IDMA_CTL_GO		= (1 << 7),  /* IDMA mode go */	IDMA_CTL_ATA_NIEN	= (1 << 8),  /* ATA IRQ disable */};struct inic_host_priv {	u16	cached_hctl;};struct inic_port_priv {	u8	dfl_prdctl;	u8	cached_prdctl;	u8	cached_pirq_mask;};static int inic_slave_config(struct scsi_device *sdev){	/* This controller is braindamaged.  dma_boundary is 0xffff	 * like others but it will lock up the whole machine HARD if	 * 65536 byte PRD entry is fed.  Reduce maximum segment size.	 */	blk_queue_max_segment_size(sdev->request_queue, 65536 - 512);	return ata_scsi_slave_config(sdev);}static struct scsi_host_template inic_sht = {	.module			= THIS_MODULE,	.name			= DRV_NAME,	.ioctl			= ata_scsi_ioctl,	.queuecommand		= ata_scsi_queuecmd,	.can_queue		= ATA_DEF_QUEUE,	.this_id		= ATA_SHT_THIS_ID,	.sg_tablesize		= LIBATA_MAX_PRD,	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,	.emulated		= ATA_SHT_EMULATED,	.use_clustering		= ATA_SHT_USE_CLUSTERING,	.proc_name		= DRV_NAME,	.dma_boundary		= ATA_DMA_BOUNDARY,	.slave_configure	= inic_slave_config,	.slave_destroy		= ata_scsi_slave_destroy,	.bios_param		= ata_std_bios_param,};static const int scr_map[] = {	[SCR_STATUS]	= 0,	[SCR_ERROR]	= 1,	[SCR_CONTROL]	= 2,};static void __iomem *inic_port_base(struct ata_port *ap){	return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE;}static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask){	void __iomem *port_base = inic_port_base(ap);	struct inic_port_priv *pp = ap->private_data;	writeb(mask, port_base + PORT_IRQ_MASK);	pp->cached_pirq_mask = mask;}static void inic_set_pirq_mask(struct ata_port *ap, u8 mask){	struct inic_port_priv *pp = ap->private_data;	if (pp->cached_pirq_mask != mask)		__inic_set_pirq_mask(ap, mask);}static void inic_reset_port(void __iomem *port_base){	void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;	u16 ctl;	ctl = readw(idma_ctl);	ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO);	/* mask IRQ and assert reset */	writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl);	readw(idma_ctl); /* flush */	/* give it some time */	msleep(1);	/* release reset */	writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl);	/* clear irq */	writeb(0xff, port_base + PORT_IRQ_STAT);	/* reenable ATA IRQ, turn off IDMA mode */	writew(ctl, idma_ctl);}static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val){	void __iomem *scr_addr = ap->ioaddr.scr_addr;	void __iomem *addr;	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))		return -EINVAL;	addr = scr_addr + scr_map[sc_reg] * 4;	*val = readl(scr_addr + scr_map[sc_reg] * 4);	/* this controller has stuck DIAG.N, ignore it */	if (sc_reg == SCR_ERROR)		*val &= ~SERR_PHYRDY_CHG;	return 0;}static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val){	void __iomem *scr_addr = ap->ioaddr.scr_addr;	void __iomem *addr;	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))		return -EINVAL;	addr = scr_addr + scr_map[sc_reg] * 4;	writel(val, scr_addr + scr_map[sc_reg] * 4);	return 0;}/* * In TF mode, inic162x is very similar to SFF device.  TF registers * function the same.  DMA engine behaves similary using the same PRD * format as BMDMA but different command register, interrupt and event * notification methods are used.  The following inic_bmdma_*() * functions do the impedance matching. */static void inic_bmdma_setup(struct ata_queued_cmd *qc){	struct ata_port *ap = qc->ap;	struct inic_port_priv *pp = ap->private_data;	void __iomem *port_base = inic_port_base(ap);	int rw = qc->tf.flags & ATA_TFLAG_WRITE;	/* make sure device sees PRD table writes */	wmb();	/* load transfer length */	writel(qc->nbytes, port_base + PORT_PRD_XFERLEN);	/* turn on DMA and specify data direction */	pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN;	if (!rw)		pp->cached_prdctl |= PRD_CTL_WR;	writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);	/* issue r/w command */	ap->ops->exec_command(ap, &qc->tf);}static void inic_bmdma_start(struct ata_queued_cmd *qc){	struct ata_port *ap = qc->ap;	struct inic_port_priv *pp = ap->private_data;	void __iomem *port_base = inic_port_base(ap);	/* start host DMA transaction */	pp->cached_prdctl |= PRD_CTL_START;	writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);}static void inic_bmdma_stop(struct ata_queued_cmd *qc){	struct ata_port *ap = qc->ap;	struct inic_port_priv *pp = ap->private_data;	void __iomem *port_base = inic_port_base(ap);	/* stop DMA engine */	writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);}static u8 inic_bmdma_status(struct ata_port *ap){	/* event is already verified by the interrupt handler */	return ATA_DMA_INTR;}static void inic_irq_clear(struct ata_port *ap){	/* noop */}static void inic_host_intr(struct ata_port *ap){	void __iomem *port_base = inic_port_base(ap);	struct ata_eh_info *ehi = &ap->link.eh_info;	u8 irq_stat;	/* fetch and clear irq */	irq_stat = readb(port_base + PORT_IRQ_STAT);	writeb(irq_stat, port_base + PORT_IRQ_STAT);	if (likely(!(irq_stat & PIRQ_ERR))) {		struct ata_queued_cmd *qc =			ata_qc_from_tag(ap, ap->link.active_tag);		if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {			ata_chk_status(ap);	/* clear ATA interrupt */			return;		}		if (likely(ata_host_intr(ap, qc)))			return;		ata_chk_status(ap);	/* clear ATA interrupt */		ata_port_printk(ap, KERN_WARNING, "unhandled "				"interrupt, irq_stat=%x\n", irq_stat);		return;	}	/* error */	ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat);	if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {		ata_ehi_hotplugged(ehi);		ata_port_freeze(ap);	} else		ata_port_abort(ap);}static irqreturn_t inic_interrupt(int irq, void *dev_instance){	struct ata_host *host = dev_instance;	void __iomem *mmio_base = host->iomap[MMIO_BAR];	u16 host_irq_stat;	int i, handled = 0;;	host_irq_stat = readw(mmio_base + HOST_IRQ_STAT);	if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))		goto out;	spin_lock(&host->lock);	for (i = 0; i < NR_PORTS; i++) {		struct ata_port *ap = host->ports[i];		if (!(host_irq_stat & (HIRQ_PORT0 << i)))			continue;		if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {			inic_host_intr(ap);			handled++;		} else {			if (ata_ratelimit())				dev_printk(KERN_ERR, host->dev, "interrupt "					   "from disabled port %d (0x%x)\n",					   i, host_irq_stat);		}	}	spin_unlock(&host->lock); out:	return IRQ_RETVAL(handled);}static unsigned int inic_qc_issue(struct ata_queued_cmd *qc){	struct ata_port *ap = qc->ap;	/* ATA IRQ doesn't wait for DMA transfer completion and vice	 * versa.  Mask IRQ selectively to detect command completion.	 * Without it, ATA DMA read command can cause data corruption.	 *	 * Something similar might be needed for ATAPI writes.  I	 * tried a lot of combinations but couldn't find the solution.	 */	if (qc->tf.protocol == ATA_PROT_DMA &&	    !(qc->tf.flags & ATA_TFLAG_WRITE))		inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ);	else		inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);	/* Issuing a command to yet uninitialized port locks up the	 * controller.  Most of the time, this happens for the first	 * command after reset which are ATA and ATAPI IDENTIFYs.	 * Fast fail if stat is 0x7f or 0xff for those commands.	 */	if (unlikely(qc->tf.command == ATA_CMD_ID_ATA ||		     qc->tf.command == ATA_CMD_ID_ATAPI)) {		u8 stat = ata_chk_status(ap);		if (stat == 0x7f || stat == 0xff)			return AC_ERR_HSM;	}	return ata_qc_issue_prot(qc);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -