⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cpqarray.c

📁 Linux块设备驱动源码
💻 C
📖 第 1 页 / 共 4 页
字号:
	/* Now do all the DMA Mappings */	if (rq_data_dir(creq) == READ)		dir = PCI_DMA_FROMDEVICE;	else		dir = PCI_DMA_TODEVICE;	for( i=0; i < seg; i++)	{		c->req.sg[i].size = tmp_sg[i].length;		c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,						 tmp_sg[i].page,						 tmp_sg[i].offset,						 tmp_sg[i].length, dir);	}DBGPX(	printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );	c->req.hdr.sg_cnt = seg;	c->req.hdr.blk_cnt = creq->nr_sectors;	c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;	c->type = CMD_RWREQ;	/* Put the request on the tail of the request queue */	addQ(&h->reqQ, c);	h->Qdepth++;	if (h->Qdepth > h->maxQsinceinit) 		h->maxQsinceinit = h->Qdepth;	goto queue_next;startio:	start_io(h);}/*  * start_io submits everything on a controller's request queue * and moves it to the completion queue. * * Interrupts had better be off if you're in here */static void start_io(ctlr_info_t *h){	cmdlist_t *c;	while((c = h->reqQ) != NULL) {		/* Can't do anything if we're busy */		if (h->access.fifo_full(h) == 0)			return;		/* Get the first entry from the request Q */		removeQ(&h->reqQ, c);		h->Qdepth--;			/* Tell the controller to do our bidding */		h->access.submit_command(h, c);		/* Get onto the completion Q */		addQ(&h->cmpQ, c);	}}static inline void complete_buffers(struct bio *bio, int ok){	struct bio *xbh;	while(bio) {		int nr_sectors = bio_sectors(bio);		xbh = bio->bi_next;		bio->bi_next = NULL;				blk_finished_io(nr_sectors);		bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);		bio = xbh;	}}/* * Mark all buffers that cmd was responsible for */static inline void complete_command(cmdlist_t *cmd, int timeout){	int ok=1;	int i, ddir;	if (cmd->req.hdr.rcode & RCODE_NONFATAL &&	   (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {		printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",				cmd->ctlr, cmd->hdr.unit);		hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;	}	if (cmd->req.hdr.rcode & RCODE_FATAL) {		printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",				cmd->ctlr, cmd->hdr.unit);		ok = 0;	}	if (cmd->req.hdr.rcode & RCODE_INVREQ) {				printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",				cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,				cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,				cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);		ok = 0;		}	if (timeout) ok = 0;	/* unmap the DMA mapping for all the scatter gather elements */	if (cmd->req.hdr.cmd == IDA_READ)		ddir = PCI_DMA_FROMDEVICE;	else		ddir = PCI_DMA_TODEVICE;        for(i=0; i<cmd->req.hdr.sg_cnt; i++)                pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,				cmd->req.sg[i].size, ddir);	complete_buffers(cmd->rq->bio, ok);        DBGPX(printk("Done with %p\n", cmd->rq););	end_that_request_last(cmd->rq);}/* *  The controller will interrupt us upon completion of commands. *  Find the command on the completion queue, remove it, tell the OS and *  try to queue up more IO */static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs *regs){	ctlr_info_t *h = dev_id;	cmdlist_t *c;	unsigned long istat;	unsigned long flags;	__u32 a,a1;	istat = h->access.intr_pending(h);	/* Is this interrupt for us? */	if (istat == 0)		return IRQ_NONE;	/*	 * If there are completed commands in the completion queue,	 * we had better do something about it.	 */	spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);	if (istat & FIFO_NOT_EMPTY) {		while((a = h->access.command_completed(h))) {			a1 = a; a &= ~3;			if ((c = h->cmpQ) == NULL)			{  				printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);				continue;				} 			while(c->busaddr != a) {				c = c->next;				if (c == h->cmpQ) 					break;			}			/*			 * If we've found the command, take it off the			 * completion Q and free it			 */			if (c->busaddr == a) {				removeQ(&h->cmpQ, c);				/*  Check for invalid command.                                 *  Controller returns command error,                                 *  But rcode = 0.                                 */				if((a1 & 0x03) && (c->req.hdr.rcode == 0))                                {                                	c->req.hdr.rcode = RCODE_INVREQ;                                }				if (c->type == CMD_RWREQ) {					complete_command(c, 0);					cmd_free(h, c, 1);				} else if (c->type == CMD_IOCTL_PEND) {					c->type = CMD_IOCTL_DONE;				}				continue;			}		}	}	/*	 * See if we can queue up some more IO	 */	do_ida_request(h->queue);	spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 	return IRQ_HANDLED;}/* * This timer was for timing out requests that haven't happened after * IDA_TIMEOUT.  That wasn't such a good idea.  This timer is used to * reset a flags structure so we don't flood the user with * "Non-Fatal error" messages. */static void ida_timer(unsigned long tdata){	ctlr_info_t *h = (ctlr_info_t*)tdata;	h->timer.expires = jiffies + IDA_TIMER;	add_timer(&h->timer);	h->misc_tflags = 0;}/* *  ida_ioctl does some miscellaneous stuff like reporting drive geometry, *  setting readahead and submitting commands from userspace to the controller. */static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg){	drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);	ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);	int error;	int diskinfo[4];	struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;	ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;	ida_ioctl_t *my_io;	switch(cmd) {	case HDIO_GETGEO:		if (drv->cylinders) {			diskinfo[0] = drv->heads;			diskinfo[1] = drv->sectors;			diskinfo[2] = drv->cylinders;		} else {			diskinfo[0] = 0xff;			diskinfo[1] = 0x3f;			diskinfo[2] = drv->nr_blks / (0xff*0x3f);		}		put_user(diskinfo[0], &geo->heads);		put_user(diskinfo[1], &geo->sectors);		put_user(diskinfo[2], &geo->cylinders);		put_user(get_start_sect(inode->i_bdev), &geo->start);		return 0;	case IDAGETDRVINFO:		if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))			return -EFAULT;		return 0;	case IDAPASSTHRU:		if (!capable(CAP_SYS_RAWIO))			return -EPERM;		my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);		if (!my_io)			return -ENOMEM;		error = -EFAULT;		if (copy_from_user(my_io, io, sizeof(*my_io)))			goto out_passthru;		error = ida_ctlr_ioctl(host, drv - host->drv, my_io);		if (error)			goto out_passthru;		error = -EFAULT;		if (copy_to_user(io, my_io, sizeof(*my_io)))			goto out_passthru;		error = 0;out_passthru:		kfree(my_io);		return error;	case IDAGETCTLRSIG:		if (!arg) return -EINVAL;		put_user(host->ctlr_sig, (int __user *)arg);		return 0;	case IDAREVALIDATEVOLS:		if (iminor(inode) != 0)			return -ENXIO;		return revalidate_allvol(host);	case IDADRIVERVERSION:		if (!arg) return -EINVAL;		put_user(DRIVER_VERSION, (unsigned long __user *)arg);		return 0;	case IDAGETPCIINFO:	{				ida_pci_info_struct pciinfo;		if (!arg) return -EINVAL;		pciinfo.bus = host->pci_dev->bus->number;		pciinfo.dev_fn = host->pci_dev->devfn;		pciinfo.board_id = host->board_id;		if(copy_to_user((void __user *) arg, &pciinfo,  			sizeof( ida_pci_info_struct)))				return -EFAULT;		return(0);	}		default:		return -EINVAL;	}		}/* * ida_ctlr_ioctl is for passing commands to the controller from userspace. * The command block (io) has already been copied to kernel space for us, * however, any elements in the sglist need to be copied to kernel space * or copied back to userspace. * * Only root may perform a controller passthru command, however I'm not doing * any serious sanity checking on the arguments.  Doing an IDA_WRITE_MEDIA and * putting a 64M buffer in the sglist is probably a *bad* idea. */static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io){	int ctlr = h->ctlr;	cmdlist_t *c;	void *p = NULL;	unsigned long flags;	int error;	if ((c = cmd_alloc(h, 0)) == NULL)		return -ENOMEM;	c->ctlr = ctlr;	c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;	c->hdr.size = sizeof(rblk_t) >> 2;	c->size += sizeof(rblk_t);	c->req.hdr.cmd = io->cmd;	c->req.hdr.blk = io->blk;	c->req.hdr.blk_cnt = io->blk_cnt;	c->type = CMD_IOCTL_PEND;	/* Pre submit processing */	switch(io->cmd) {	case PASSTHRU_A:		p = kmalloc(io->sg[0].size, GFP_KERNEL);		if (!p) 		{ 			error = -ENOMEM; 			cmd_free(h, c, 0); 			return(error);		}		if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {			kfree(p);			cmd_free(h, c, 0); 			return -EFAULT;		}		c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), 				sizeof(ida_ioctl_t), 				PCI_DMA_BIDIRECTIONAL);		c->req.sg[0].size = io->sg[0].size;		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);		c->req.hdr.sg_cnt = 1;		break;	case IDA_READ:	case READ_FLASH_ROM:	case SENSE_CONTROLLER_PERFORMANCE:		p = kmalloc(io->sg[0].size, GFP_KERNEL);		if (!p) 		{                         error = -ENOMEM;                         cmd_free(h, c, 0);                        return(error);                }		c->req.sg[0].size = io->sg[0].size;		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 		c->req.hdr.sg_cnt = 1;		break;	case IDA_WRITE:	case IDA_WRITE_MEDIA:	case DIAG_PASS_THRU:	case COLLECT_BUFFER:	case WRITE_FLASH_ROM:		p = kmalloc(io->sg[0].size, GFP_KERNEL);		if (!p)  		{                         error = -ENOMEM;                         cmd_free(h, c, 0);                        return(error);                }		if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {			kfree(p);                        cmd_free(h, c, 0);			return -EFAULT;		}		c->req.sg[0].size = io->sg[0].size;		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 		c->req.hdr.sg_cnt = 1;		break;	default:		c->req.sg[0].size = sizeof(io->c);		c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c, 			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);		c->req.hdr.sg_cnt = 1;	}		/* Put the request on the tail of the request queue */	spin_lock_irqsave(IDA_LOCK(ctlr), flags);	addQ(&h->reqQ, c);	h->Qdepth++;	start_io(h);	spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);	/* Wait for completion */	while(c->type != CMD_IOCTL_DONE)		schedule();	/* Unmap the DMA  */	pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size, 		PCI_DMA_BIDIRECTIONAL);	/* Post submit processing */	switch(io->cmd) {	case PASSTHRU_A:		pci_unmap_single(h->pci_dev, c->req.hdr.blk,                                sizeof(ida_ioctl_t),                                PCI_DMA_BIDIRECTIONAL);	case IDA_READ:	case DIAG_PASS_THRU:	case SENSE_CONTROLLER_PERFORMANCE:	case READ_FLASH_ROM:		if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {			kfree(p);			return -EFAULT;		}		/* fall through and free p */	case IDA_WRITE:	case IDA_WRITE_MEDIA:	case COLLECT_BUFFER:	case WRITE_FLASH_ROM:		kfree(p);		break;	default:;		/* Nothing to do */	}	io->rcode = c->req.hdr.rcode;	cmd_free(h, c, 0);	return(0);}/* * Commands are pre-allocated in a large block.  Here we use a simple bitmap * scheme to suballocte them to the driver.  Operations that are not time * critical (and can wait for kmalloc and possibly sleep) can pass in NULL * as the first argument to get a new command. */static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool){	cmdlist_t * c;	int i;	dma_addr_t cmd_dhandle;	if (!get_from_pool) {		c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev, 			sizeof(cmdlist_t), &cmd_dhandle);		if(c==NULL)			return NULL;	} else {		do {			i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);			if (i == NR_CMDS)				return NULL;		} while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);		c = h->cmd_pool + i;		cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);		h->nr_allocs++;	}	memset(c, 0, sizeof(cmdlist_t));	c->busaddr = cmd_dhandle; 	return c;}static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool){	int i;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -