⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmac.c

📁 优龙2410linux2.6.8内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	while (readl(&dma->status) & RUN)		udelay(1);	/* Build sglist */	if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE)		pmif->sg_nents = i = pmac_ide_raw_build_sglist(drive, rq);	else		pmif->sg_nents = i = pmac_ide_build_sglist(drive, rq);	if (!i)		return 0;	/* Build DBDMA commands list */	sg = pmif->sg_table;	while (i && sg_dma_len(sg)) {		u32 cur_addr;		u32 cur_len;		cur_addr = sg_dma_address(sg);		cur_len = sg_dma_len(sg);		if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) {			if (pmif->broken_dma_warn == 0) {				printk(KERN_WARNING "%s: DMA on non aligned address,"				       "switching to PIO on Ohare chipset\n", drive->name);				pmif->broken_dma_warn = 1;			}			goto use_pio_instead;		}		while (cur_len) {			unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;			if (count++ >= MAX_DCMDS) {				printk(KERN_WARNING "%s: DMA table too small\n",				       drive->name);				goto use_pio_instead;			}			st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE);			st_le16(&table->req_count, tc);			st_le32(&table->phy_addr, cur_addr);			table->cmd_dep = 0;			table->xfer_status = 0;			table->res_count = 0;			cur_addr += tc;			cur_len -= tc;			++table;		}		sg++;		i--;	}	/* convert the last command to an input/output last command */	if (count) {		st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST);		/* add the stop command to the end of the list */		memset(table, 0, sizeof(struct dbdma_cmd));		st_le16(&table->command, DBDMA_STOP);		mb();		writel(pmif->dma_table_dma, &dma->cmdptr);		return 1;	}	printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name); use_pio_instead:	pci_unmap_sg(hwif->pci_dev,		     pmif->sg_table,		     pmif->sg_nents,		     pmif->sg_dma_direction);	hwif->sg_dma_active = 0;	return 0; /* revert to PIO for this request */}/* Teardown mappings after DMA has completed.  */static void __pmacpmac_ide_destroy_dmatable (ide_drive_t *drive){	struct pci_dev *dev = HWIF(drive)->pci_dev;	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;	struct scatterlist *sg = pmif->sg_table;	int nents = pmif->sg_nents;	if (nents) {		pci_unmap_sg(dev, sg, nents, pmif->sg_dma_direction);		pmif->sg_nents = 0;		HWIF(drive)->sg_dma_active = 0;	}}/* * Pick up best MDMA timing for the drive and apply it */static int __pmacpmac_ide_mdma_enable(ide_drive_t *drive, u16 mode){	ide_hwif_t *hwif = HWIF(drive);	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;	int drive_cycle_time;	struct hd_driveid *id = drive->id;	u32 *timings, *timings2;	u32 timing_local[2];	int ret;	/* which drive is it ? */	timings = &pmif->timings[drive->select.b.unit & 0x01];	timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2];	/* Check if drive provide explicit cycle time */	if ((id->field_valid & 2) && (id->eide_dma_time))		drive_cycle_time = id->eide_dma_time;	else		drive_cycle_time = 0;	/* Copy timings to local image */	timing_local[0] = *timings;	timing_local[1] = *timings2;	/* Calculate controller timings */	ret = set_timings_mdma(	drive, pmif->kind,				&timing_local[0],				&timing_local[1],				mode,				drive_cycle_time);	if (ret)		return 0;	/* Set feature on drive */    	printk(KERN_INFO "%s: Enabling MultiWord DMA %d\n", drive->name, mode & 0xf);	ret = pmac_ide_do_setfeature(drive, mode);	if (ret) {	    	printk(KERN_WARNING "%s: Failed !\n", drive->name);	    	return 0;	}	/* Apply timings to controller */	*timings = timing_local[0];	*timings2 = timing_local[1];		/* Set speed info in drive */	drive->current_speed = mode;		if (!drive->init_speed)		drive->init_speed = mode;	return 1;}/* * Pick up best UDMA timing for the drive and apply it */static int __pmacpmac_ide_udma_enable(ide_drive_t *drive, u16 mode){	ide_hwif_t *hwif = HWIF(drive);	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;	u32 *timings, *timings2;	u32 timing_local[2];	int ret;			/* which drive is it ? */	timings = &pmif->timings[drive->select.b.unit & 0x01];	timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2];	/* Copy timings to local image */	timing_local[0] = *timings;	timing_local[1] = *timings2;		/* Calculate timings for interface */	if (pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6)		ret = set_timings_udma_ata6(	&timing_local[0],						&timing_local[1],						mode);	else		ret = set_timings_udma_ata4(&timing_local[0], mode);	if (ret)		return 0;			/* Set feature on drive */    	printk(KERN_INFO "%s: Enabling Ultra DMA %d\n", drive->name, mode & 0x0f);	ret = pmac_ide_do_setfeature(drive, mode);	if (ret) {		printk(KERN_WARNING "%s: Failed !\n", drive->name);		return 0;	}	/* Apply timings to controller */	*timings = timing_local[0];	*timings2 = timing_local[1];	/* Set speed info in drive */	drive->current_speed = mode;		if (!drive->init_speed)		drive->init_speed = mode;	return 1;}/* * Check what is the best DMA timing setting for the drive and * call appropriate functions to apply it. */static int __pmacpmac_ide_dma_check(ide_drive_t *drive){	struct hd_driveid *id = drive->id;	ide_hwif_t *hwif = HWIF(drive);	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;	int enable = 1;	int map;	drive->using_dma = 0;		if (drive->media == ide_floppy)		enable = 0;	if (((id->capability & 1) == 0) && !__ide_dma_good_drive(drive))		enable = 0;	if (__ide_dma_bad_drive(drive))		enable = 0;	if (enable) {		short mode;				map = XFER_MWDMA;		if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6		    || pmif->kind == controller_k2_ata6) {			map |= XFER_UDMA;			if (pmif->cable_80) {				map |= XFER_UDMA_66;				if (pmif->kind == controller_un_ata6 ||				    pmif->kind == controller_k2_ata6)					map |= XFER_UDMA_100;			}		}		mode = ide_find_best_mode(drive, map);		if (mode & XFER_UDMA)			drive->using_dma = pmac_ide_udma_enable(drive, mode);		else if (mode & XFER_MWDMA)			drive->using_dma = pmac_ide_mdma_enable(drive, mode);		hwif->OUTB(0, IDE_CONTROL_REG);		/* Apply settings to controller */		pmac_ide_do_update_timings(drive);	}	return 0;}/* * Prepare a DMA transfer. We build the DMA table, adjust the timings for * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion */static int __pmacpmac_ide_dma_start(ide_drive_t *drive, int reading){	ide_hwif_t *hwif = HWIF(drive);	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;	struct request *rq = HWGROUP(drive)->rq;	u8 unit = (drive->select.b.unit & 0x01);	u8 ata4;	if (pmif == NULL)		return 1;	ata4 = (pmif->kind == controller_kl_ata4);		if (!pmac_ide_build_dmatable(drive, rq))		return 1;	/* Apple adds 60ns to wrDataSetup on reads */	if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {		writel(pmif->timings[unit] + (reading ? 0x00800000UL : 0),			(unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));		(void)readl((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG));	}	drive->waiting_for_dma = 1;	return 0;}/* * Start a DMA READ command */static int __pmacpmac_ide_dma_read(ide_drive_t *drive){	struct request *rq = HWGROUP(drive)->rq;	u8 lba48 = (drive->addressing == 1) ? 1 : 0;	task_ioreg_t command = WIN_NOP;	if (pmac_ide_dma_start(drive, 1))		return 1;	if (drive->media != ide_disk)		return 0;	command = (lba48) ? WIN_READDMA_EXT : WIN_READDMA;		if (drive->vdma)		command = (lba48) ? WIN_READ_EXT: WIN_READ;			if (rq->flags & REQ_DRIVE_TASKFILE) {		ide_task_t *args = rq->special;		command = args->tfRegister[IDE_COMMAND_OFFSET];	}	/* issue cmd to drive */	ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, NULL);	return pmac_ide_dma_begin(drive);}/* * Start a DMA WRITE command */static int __pmacpmac_ide_dma_write (ide_drive_t *drive){	struct request *rq = HWGROUP(drive)->rq;	u8 lba48 = (drive->addressing == 1) ? 1 : 0;	task_ioreg_t command = WIN_NOP;	if (pmac_ide_dma_start(drive, 0))		return 1;	if (drive->media != ide_disk)		return 0;	command = (lba48) ? WIN_WRITEDMA_EXT : WIN_WRITEDMA;	if (drive->vdma)		command = (lba48) ? WIN_WRITE_EXT: WIN_WRITE;			if (rq->flags & REQ_DRIVE_TASKFILE) {		ide_task_t *args = rq->special;		command = args->tfRegister[IDE_COMMAND_OFFSET];	}	/* issue cmd to drive */	ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, NULL);	return pmac_ide_dma_begin(drive);}/* * Kick the DMA controller into life after the DMA command has been issued * to the drive. */static int __pmacpmac_ide_dma_begin (ide_drive_t *drive){	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;	volatile struct dbdma_regs *dma;	if (pmif == NULL)		return 1;	dma = pmif->dma_regs;	writel((RUN << 16) | RUN, &dma->control);	/* Make sure it gets to the controller right now */	(void)readl(&dma->control);	return 0;}/* * After a DMA transfer, make sure the controller is stopped */static int __pmacpmac_ide_dma_end (ide_drive_t *drive){	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;	volatile struct dbdma_regs *dma;	u32 dstat;		if (pmif == NULL)		return 0;	dma = pmif->dma_regs;	drive->waiting_for_dma = 0;	dstat = readl(&dma->status);	writel(((RUN|WAKE|DEAD) << 16), &dma->control);	pmac_ide_destroy_dmatable(drive);	/* verify good dma status. we don't check for ACTIVE beeing 0. We should...	 * in theory, but with ATAPI decices doing buffer underruns, that would	 * cause us to disable DMA, which isn't what we want	 */	return (dstat & (RUN|DEAD)) != RUN;}/* * Check out that the interrupt we got was for us. We can't always know this * for sure with those Apple interfaces (well, we could on the recent ones but * that's not implemented yet), on the other hand, we don't have shared interrupts * so it's not really a problem */static int __pmacpmac_ide_dma_test_irq (ide_drive_t *drive){	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;	volatile struct dbdma_regs *dma;	unsigned long status, timeout;	if (pmif == NULL)		return 0;	dma = pmif->dma_regs;	/* We have to things to deal with here:	 * 	 * - The dbdma won't stop if the command was started	 * but completed with an error without transferring all	 * datas. This happens when bad blocks are met during	 * a multi-block transfer.	 * 	 * - The dbdma fifo hasn't yet finished flushing to	 * to system memory when the disk interrupt occurs.	 * 	 */	/* If ACTIVE is cleared, the STOP command have passed and	 * transfer is complete.	 */	status = readl(&dma->status);	if (!(status & ACTIVE))		return 1;	if (!drive->waiting_for_dma)		printk(KERN_WARNING "ide%d, ide_dma_test_irq \			called while not waiting\n", HWIF(drive)->index);	/* If dbdma didn't execute the STOP command yet, the	 * active bit is still set. We consider that we aren't	 * sharing interrupts (which is hopefully the case with	 * those controllers) and so we just try to flush the	 * channel for pending data in the fifo	 */	udelay(1);	writel((FLUSH << 16) | FLUSH, &dma->control);	timeout = 0;	for (;;) {		udelay(1);		status = readl(&dma->status);		if ((status & FLUSH) == 0)			break;		if (++timeout > 100) {			printk(KERN_WARNING "ide%d, ide_dma_test_irq \			timeout flushing channel\n", HWIF(drive)->index);			break;		}	}		return 1;}static int __pmacpmac_ide_dma_host_off (ide_drive_t *drive){	return 0;}static int __pmacpmac_ide_dma_host_on (ide_drive_t *drive){	return 0;}static int __pmacpmac_ide_dma_lostirq (ide_drive_t *drive){	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;	volatile struct dbdma_regs *dma;	unsigned long status;	if (pmif == NULL)		return 0;	dma = pmif->dma_regs;	status = readl(&dma->status);	printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);	return 0;}/* * Allocate the data structures needed for using DMA with an interface * and fill the proper list of functions pointers */static void __init pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif){	/* We won't need pci_dev if we switch to generic consistent	 * DMA routines ...	 */	if (hwif->pci_dev == NULL)		return;	/*	 * Allocate space for the DBDMA commands.	 * The +2 is +1 for the stop command and +1 to allow for	 * aligning the start address to a multiple of 16 bytes.	 */	pmif->dma_table_cpu = (struct dbdma_cmd*)pci_alloc_consistent(		hwif->pci_dev,		(MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),		&pmif->dma_table_dma);	if (pmif->dma_table_cpu == NULL) {		printk(KERN_ERR "%s: unable to allocate DMA command list\n",		       hwif->name);		return;	}	pmif->sg_table = kmalloc(sizeof(struct scatterlist) * MAX_DCMDS,				 GFP_KERNEL);	if (pmif->sg_table == NULL) {		pci_free_consistent(	hwif->pci_dev,					(MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),				    	pmif->dma_table_cpu, pmif->dma_table_dma);		return;	}	hwif->ide_dma_off_quietly = &__ide_dma_off_quietly;	hwif->ide_dma_on = &__ide_dma_on;	hwif->ide_dma_check = &pmac_ide_dma_check;	hwif->ide_dma_read = &pmac_ide_dma_read;	hwif->ide_dma_write = &pmac_ide_dma_write;	hwif->ide_dma_begin = &pmac_ide_dma_begin;	hwif->ide_dma_end = &pmac_ide_dma_end;	hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;	hwif->ide_dma_host_off = &pmac_ide_dma_host_off;	hwif->ide_dma_host_on = &pmac_ide_dma_host_on;	hwif->ide_dma_verbose = &__ide_dma_verbose;	hwif->ide_dma_timeout = &__ide_dma_timeout;	hwif->ide_dma_lostirq = &pmac_ide_dma_lostirq;#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC_AUTO	if (!noautodma)		hwif->autodma = 1;#endif	hwif->drives[0].autodma = hwif->autodma;	hwif->drives[1].autodma = hwif->autodma;	hwif->atapi_dma = 1;	switch(pmif->kind) {		case controller_un_ata6:		case controller_k2_ata6:			hwif->ultra_mask = pmif->cable_80 ? 0x3f : 0x07;			hwif->mwdma_mask = 0x07;			hwif->swdma_mask = 0x00;			break;		case controller_kl_ata4:			hwif->ultra_mask = pmif->cable_80 ? 0x1f : 0x07;			hwif->mwdma_mask = 0x07;			hwif->swdma_mask = 0x00;			break;		default:			hwif->ultra_mask = 0x00;			hwif->mwdma_mask = 0x07;			hwif->swdma_mask = 0x00;			break;	}	}#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -