⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cciss.c

📁 Linux块设备驱动源码
💻 C
📖 第 1 页 / 共 5 页
字号:
			// Fill in the scatter gather information		if (iocommand.buf_size > 0 ) 		{			temp64.val = pci_map_single( host->pdev, buff,                                        iocommand.buf_size,                                 PCI_DMA_BIDIRECTIONAL);				c->SG[0].Addr.lower = temp64.val32.lower;			c->SG[0].Addr.upper = temp64.val32.upper;			c->SG[0].Len = iocommand.buf_size;			c->SG[0].Ext = 0;  // we are not chaining		}		c->waiting = &wait;		/* Put the request on the tail of the request queue */		spin_lock_irqsave(CCISS_LOCK(ctlr), flags);		addQ(&host->reqQ, c);		host->Qdepth++;		start_io(host);		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);		wait_for_completion(&wait);		/* unlock the buffers from DMA */		temp64.val32.lower = c->SG[0].Addr.lower;                temp64.val32.upper = c->SG[0].Addr.upper;                pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,                	iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);		/* Copy the error information out */ 		iocommand.error_info = *(c->err_info);		if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )		{			kfree(buff);			cmd_free(host, c, 0);			return( -EFAULT);			} 			if (iocommand.Request.Type.Direction == XFER_READ)                {                        /* Copy the data out of the buffer we created */                        if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))			{                        	kfree(buff);				cmd_free(host, c, 0);				return -EFAULT;			}                }                kfree(buff);		cmd_free(host, c, 0);                return(0);	} 	case CCISS_BIG_PASSTHRU: {		BIG_IOCTL_Command_struct *ioc;		CommandList_struct *c;		unsigned char **buff = NULL;		int	*buff_size = NULL;		u64bit	temp64;		unsigned long flags;		BYTE sg_used = 0;		int status = 0;		int i;		DECLARE_COMPLETION(wait);		__u32   left;		__u32	sz;		BYTE    __user *data_ptr;		if (!arg)			return -EINVAL;		if (!capable(CAP_SYS_RAWIO))			return -EPERM;		ioc = (BIG_IOCTL_Command_struct *) 			kmalloc(sizeof(*ioc), GFP_KERNEL);		if (!ioc) {			status = -ENOMEM;			goto cleanup1;		}		if (copy_from_user(ioc, argp, sizeof(*ioc))) {			status = -EFAULT;			goto cleanup1;		}		if ((ioc->buf_size < 1) &&			(ioc->Request.Type.Direction != XFER_NONE)) {				status = -EINVAL;				goto cleanup1;		}		/* Check kmalloc limits  using all SGs */		if (ioc->malloc_size > MAX_KMALLOC_SIZE) {			status = -EINVAL;			goto cleanup1;		}		if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {			status = -EINVAL;			goto cleanup1;		}		buff = (unsigned char **) kmalloc(MAXSGENTRIES * 				sizeof(char *), GFP_KERNEL);		if (!buff) {			status = -ENOMEM;			goto cleanup1;		}		memset(buff, 0, MAXSGENTRIES);		buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int), 					GFP_KERNEL);		if (!buff_size) {			status = -ENOMEM;			goto cleanup1;		}		left = ioc->buf_size;		data_ptr = ioc->buf;		while (left) {			sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;			buff_size[sg_used] = sz;			buff[sg_used] = kmalloc(sz, GFP_KERNEL);			if (buff[sg_used] == NULL) {				status = -ENOMEM;				goto cleanup1;			}			if (ioc->Request.Type.Direction == XFER_WRITE &&				copy_from_user(buff[sg_used], data_ptr, sz)) {					status = -ENOMEM;					goto cleanup1;						} else {				memset(buff[sg_used], 0, sz);			}			left -= sz;			data_ptr += sz;			sg_used++;		}		if ((c = cmd_alloc(host , 0)) == NULL) {			status = -ENOMEM;			goto cleanup1;			}		c->cmd_type = CMD_IOCTL_PEND;		c->Header.ReplyQueue = 0;				if( ioc->buf_size > 0) {			c->Header.SGList = sg_used;			c->Header.SGTotal= sg_used;		} else { 			c->Header.SGList = 0;			c->Header.SGTotal= 0;		}		c->Header.LUN = ioc->LUN_info;		c->Header.Tag.lower = c->busaddr;				c->Request = ioc->Request;		if (ioc->buf_size > 0 ) {			int i;			for(i=0; i<sg_used; i++) {				temp64.val = pci_map_single( host->pdev, buff[i],					buff_size[i],					PCI_DMA_BIDIRECTIONAL);				c->SG[i].Addr.lower = temp64.val32.lower;				c->SG[i].Addr.upper = temp64.val32.upper;				c->SG[i].Len = buff_size[i];				c->SG[i].Ext = 0;  /* we are not chaining */			}		}		c->waiting = &wait;		/* Put the request on the tail of the request queue */		spin_lock_irqsave(CCISS_LOCK(ctlr), flags);		addQ(&host->reqQ, c);		host->Qdepth++;		start_io(host);		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);		wait_for_completion(&wait);		/* unlock the buffers from DMA */		for(i=0; i<sg_used; i++) {			temp64.val32.lower = c->SG[i].Addr.lower;			temp64.val32.upper = c->SG[i].Addr.upper;			pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,				buff_size[i], PCI_DMA_BIDIRECTIONAL);		}		/* Copy the error information out */		ioc->error_info = *(c->err_info);		if (copy_to_user(argp, ioc, sizeof(*ioc))) {			cmd_free(host, c, 0);			status = -EFAULT;			goto cleanup1;		}		if (ioc->Request.Type.Direction == XFER_READ) {			/* Copy the data out of the buffer we created */			BYTE __user *ptr = ioc->buf;	        	for(i=0; i< sg_used; i++) {				if (copy_to_user(ptr, buff[i], buff_size[i])) {					cmd_free(host, c, 0);					status = -EFAULT;					goto cleanup1;				}				ptr += buff_size[i];			}		}		cmd_free(host, c, 0);		status = 0;cleanup1:		if (buff) {			for(i=0; i<sg_used; i++)				if(buff[i] != NULL)					kfree(buff[i]);			kfree(buff);		}		if (buff_size)			kfree(buff_size);		if (ioc)			kfree(ioc);		return(status);	}	default:		return -ENOTTY;	}	}/* * revalidate_allvol is for online array config utilities.  After a * utility reconfigures the drives in the array, it can use this function * (through an ioctl) to make the driver zap any previous disk structs for * that controller and get new ones. * * Right now I'm using the getgeometry() function to do this, but this * function should probably be finer grained and allow you to revalidate one * particualar logical volume (instead of all of them on a particular * controller). */static int revalidate_allvol(ctlr_info_t *host){	int ctlr = host->ctlr, i;	unsigned long flags;        spin_lock_irqsave(CCISS_LOCK(ctlr), flags);        if (host->usage_count > 1) {                spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);                printk(KERN_WARNING "cciss: Device busy for volume"                        " revalidation (usage=%d)\n", host->usage_count);                return -EBUSY;        }        host->usage_count++;	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);	for(i=0; i< NWD; i++) {		struct gendisk *disk = host->gendisk[i];		if (disk->flags & GENHD_FL_UP)			del_gendisk(disk);	}        /*         * Set the partition and block size structures for all volumes         * on this controller to zero.  We will reread all of this data         */        memset(host->drv,        0, sizeof(drive_info_struct)						* CISS_MAX_LUN);        /*         * Tell the array controller not to give us any interrupts while         * we check the new geometry.  Then turn interrupts back on when         * we're done.         */        host->access.set_intr_mask(host, CCISS_INTR_OFF);        cciss_getgeometry(ctlr);        host->access.set_intr_mask(host, CCISS_INTR_ON);	/* Loop through each real device */ 	for (i = 0; i < NWD; i++) {		struct gendisk *disk = host->gendisk[i];		drive_info_struct *drv = &(host->drv[i]);		/* we must register the controller even if no disks exist */		/* this is for the online array utilities */		if (!drv->heads && i)			continue;		blk_queue_hardsect_size(drv->queue, drv->block_size);		set_capacity(disk, drv->nr_blocks);		add_disk(disk);	}        host->usage_count--;        return 0;}/* This function will check the usage_count of the drive to be updated/added. * If the usage_count is zero then the drive information will be updated and * the disk will be re-registered with the kernel.  If not then it will be * left alone for the next reboot.  The exception to this is disk 0 which * will always be left registered with the kernel since it is also the * controller node.  Any changes to disk 0 will show up on the next * reboot.*/static void cciss_update_drive_info(int ctlr, int drv_index)  {	ctlr_info_t *h = hba[ctlr];	struct gendisk *disk;	ReadCapdata_struct *size_buff = NULL;	InquiryData_struct *inq_buff = NULL;	unsigned int block_size;	unsigned int total_size;	unsigned long flags = 0;	int ret = 0;	/* if the disk already exists then deregister it before proceeding*/	if (h->drv[drv_index].raid_level != -1){		spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);		h->drv[drv_index].busy_configuring = 1;		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);		ret = deregister_disk(h->gendisk[drv_index],			&h->drv[drv_index], 0);		h->drv[drv_index].busy_configuring = 0;	}	/* If the disk is in use return */	if (ret)		return;	/* Get information about the disk and modify the driver sturcture */	size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);        if (size_buff == NULL)		goto mem_msg;	inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);	if (inq_buff == NULL)		goto mem_msg;	cciss_read_capacity(ctlr, drv_index, size_buff, 1,		&total_size, &block_size);	cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,		inq_buff, &h->drv[drv_index]);	++h->num_luns;	disk = h->gendisk[drv_index];	set_capacity(disk, h->drv[drv_index].nr_blocks);	/* if it's the controller it's already added */	if (drv_index){		disk->queue = blk_init_queue(do_cciss_request, &h->lock);		/* Set up queue information */		disk->queue->backing_dev_info.ra_pages = READ_AHEAD;		blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);		/* This is a hardware imposed limit. */		blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);		/* This is a limit in the driver and could be eliminated. */		blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);		blk_queue_max_sectors(disk->queue, 512);		disk->queue->queuedata = hba[ctlr];		blk_queue_hardsect_size(disk->queue,			hba[ctlr]->drv[drv_index].block_size);		h->drv[drv_index].queue = disk->queue;		add_disk(disk);	}freeret:	kfree(size_buff);	kfree(inq_buff);	return;mem_msg:	printk(KERN_ERR "cciss: out of memory\n");	goto freeret;}/* This function will find the first index of the controllers drive array * that has a -1 for the raid_level and will return that index.  This is * where new drives will be added.  If the index to be returned is greater * than the highest_lun index for the controller then highest_lun is set * to this new index.  If there are no available indexes then -1 is returned.*/static int cciss_find_free_drive_index(int ctlr){	int i;	for (i=0; i < CISS_MAX_LUN; i++){		if (hba[ctlr]->drv[i].raid_level == -1){			if (i > hba[ctlr]->highest_lun)				hba[ctlr]->highest_lun = i;			return i;		}	}	return -1;}/* This function will add and remove logical drives from the Logical * drive array of the controller and maintain persistancy of ordering * so that mount points are preserved until the next reboot.  This allows * for the removal of logical drives in the middle of the drive array * without a re-ordering of those drives. * INPUT * h		= The controller to perform the operations on * del_disk	= The disk to remove if specified.  If the value given *		  is NULL then no disk is removed.*/static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk){	int ctlr = h->ctlr;	int num_luns;	ReportLunData_struct *ld_buff = NULL;	drive_info_struct *drv = NULL;	int return_code;	int listlength = 0;	int i;	int drv_found;	int drv_index = 0;	__u32 lunid = 0;	unsigned long flags;	/* Set busy_configuring flag for this operation */	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);	if (h->num_luns >= CISS_MAX_LUN){		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);		return -EINVAL;	}	if (h->busy_configuring){		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);		return -EBUSY;	}	h->busy_configuring = 1;	/* if del_disk is NULL then we are being called to add a new disk	 * and update the logical drive table.  If it is not NULL then	 * we will check if the disk is in use or not.	 */	if (del_disk != NULL){		drv = get_drv(del_disk);		drv->busy_configuring = 1;		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);		return_code = deregister_disk(del_disk, drv, 1);		drv->busy_configuring = 0;		h->busy_configuring = 0;		return return_code;	} else {		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);		if (!capable(CAP_SYS_RAWIO))			return -EPERM;		ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);		if (ld_buff == NULL)			goto mem_msg;		return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,				sizeof(ReportLunData_struct), 0, 0, 0,				TYPE_CMD);		if (return_code == IO_OK){			listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;			listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;			listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;			listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -