sg.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,278 行 · 第 1/5 页

C
2,278
字号
		if (result)			return result;		else {			sg_req_info_t rinfo[SG_MAX_QUEUE];			Sg_request *srp;			read_lock_irqsave(&sfp->rq_list_lock, iflags);			for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;			     ++val, srp = srp ? srp->nextrp : srp) {				memset(&rinfo[val], 0, SZ_SG_REQ_INFO);				if (srp) {					rinfo[val].req_state = srp->done + 1;					rinfo[val].problem =					    srp->header.masked_status & 					    srp->header.host_status & 					    srp->header.driver_status;					rinfo[val].duration =					    srp->done ? srp->header.duration :					    jiffies_to_msecs(						jiffies - srp->header.duration);					rinfo[val].orphan = srp->orphan;					rinfo[val].sg_io_owned = srp->sg_io_owned;					rinfo[val].pack_id = srp->header.pack_id;					rinfo[val].usr_ptr = srp->header.usr_ptr;				}			}			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);			return (__copy_to_user(p, rinfo,			        SZ_SG_REQ_INFO * SG_MAX_QUEUE) ? -EFAULT : 0);		}	case SG_EMULATED_HOST:		if (sdp->detached)			return -ENODEV;		return put_user(sdp->device->host->hostt->emulated, ip);	case SG_SCSI_RESET:		if (sdp->detached)			return -ENODEV;		if (filp->f_flags & O_NONBLOCK) {			if (test_bit(SHOST_RECOVERY,				     &sdp->device->host->shost_state))				return -EBUSY;		} else if (!scsi_block_when_processing_errors(sdp->device))			return -EBUSY;		result = get_user(val, ip);		if (result)			return result;		if (SG_SCSI_RESET_NOTHING == val)			return 0;		switch (val) {		case SG_SCSI_RESET_DEVICE:			val = SCSI_TRY_RESET_DEVICE;			break;		case SG_SCSI_RESET_BUS:			val = SCSI_TRY_RESET_BUS;			break;		case SG_SCSI_RESET_HOST:			val = SCSI_TRY_RESET_HOST;			break;		default:			return -EINVAL;		}		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))			return -EACCES;		return (scsi_reset_provider(sdp->device, val) ==			SUCCESS) ? 0 : -EIO;	case SCSI_IOCTL_SEND_COMMAND:		if (sdp->detached)			return -ENODEV;		if (read_only) {			unsigned char opcode = WRITE_6;			Scsi_Ioctl_Command __user *siocp = p;			if (copy_from_user(&opcode, siocp->data, 1))				return -EFAULT;			if (!sg_allow_access(opcode, sdp->device->type))				return -EPERM;		}		return scsi_ioctl_send_command(sdp->device, p);	case SG_SET_DEBUG:		result = get_user(val, ip);		if (result)			return result;		sdp->sgdebug = (char) val;		return 0;	case SCSI_IOCTL_GET_IDLUN:	case SCSI_IOCTL_GET_BUS_NUMBER:	case SCSI_IOCTL_PROBE_HOST:	case SG_GET_TRANSFORM:		if (sdp->detached)			return -ENODEV;		return scsi_ioctl(sdp->device, cmd_in, p);	default:		if (read_only)			return -EPERM;	/* don't know so take safe approach */		return scsi_ioctl(sdp->device, cmd_in, p);	}}static unsigned intsg_poll(struct file *filp, poll_table * wait){	unsigned int res = 0;	Sg_device *sdp;	Sg_fd *sfp;	Sg_request *srp;	int count = 0;	unsigned long iflags;	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))	    || sfp->closed)		return POLLERR;	poll_wait(filp, &sfp->read_wait, wait);	read_lock_irqsave(&sfp->rq_list_lock, iflags);	for (srp = sfp->headrp; srp; srp = srp->nextrp) {		/* if any read waiting, flag it */		if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))			res = POLLIN | POLLRDNORM;		++count;	}	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);	if (sdp->detached)		res |= POLLHUP;	else if (!sfp->cmd_q) {		if (0 == count)			res |= POLLOUT | POLLWRNORM;	} else if (count < SG_MAX_QUEUE)		res |= POLLOUT | POLLWRNORM;	SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",				   sdp->disk->disk_name, (int) res));	return res;}static intsg_fasync(int fd, struct file *filp, int mode){	int retval;	Sg_device *sdp;	Sg_fd *sfp;	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))		return -ENXIO;	SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",				   sdp->disk->disk_name, mode));	retval = fasync_helper(fd, filp, mode, &sfp->async_qp);	return (retval < 0) ? retval : 0;}static inline unsigned char *sg_scatg2virt(const struct scatterlist *sclp){	return (sclp && sclp->page) ?	    (unsigned char *) page_address(sclp->page) + sclp->offset : NULL;}/* When startFinish==1 increments page counts for pages other than the    first of scatter gather elements obtained from __get_free_pages().   When startFinish==0 decrements ... */static voidsg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish){	void *page_ptr;	struct page *page;	int k, m;	SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n", 				   startFinish, rsv_schp->k_use_sg));	/* N.B. correction _not_ applied to base page of each allocation */	if (rsv_schp->k_use_sg) {	/* reserve buffer is a scatter gather list */		struct scatterlist *sclp = rsv_schp->buffer;		for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {			for (m = PAGE_SIZE; m < sclp->length; m += PAGE_SIZE) {				page_ptr = sg_scatg2virt(sclp) + m;				page = virt_to_page(page_ptr);				if (startFinish)					get_page(page);				else {					if (page_count(page) > 0)						__put_page(page);				}			}		}	} else {		/* reserve buffer is just a single allocation */		for (m = PAGE_SIZE; m < rsv_schp->bufflen; m += PAGE_SIZE) {			page_ptr = (unsigned char *) rsv_schp->buffer + m;			page = virt_to_page(page_ptr);			if (startFinish)				get_page(page);			else {				if (page_count(page) > 0)					__put_page(page);			}		}	}}static struct page *sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type){	Sg_fd *sfp;	struct page *page = NOPAGE_SIGBUS;	void *page_ptr = NULL;	unsigned long offset;	Sg_scatter_hold *rsv_schp;	if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))		return page;	rsv_schp = &sfp->reserve;	offset = addr - vma->vm_start;	if (offset >= rsv_schp->bufflen)		return page;	SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",				   offset, rsv_schp->k_use_sg));	if (rsv_schp->k_use_sg) {	/* reserve buffer is a scatter gather list */		int k;		unsigned long sa = vma->vm_start;		unsigned long len;		struct scatterlist *sclp = rsv_schp->buffer;		for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);		     ++k, ++sclp) {			len = vma->vm_end - sa;			len = (len < sclp->length) ? len : sclp->length;			if (offset < len) {				page_ptr = sg_scatg2virt(sclp) + offset;				page = virt_to_page(page_ptr);				get_page(page);	/* increment page count */				break;			}			sa += len;			offset -= len;		}	} else {		/* reserve buffer is just a single allocation */		page_ptr = (unsigned char *) rsv_schp->buffer + offset;		page = virt_to_page(page_ptr);		get_page(page);	/* increment page count */	}	if (type)		*type = VM_FAULT_MINOR;	return page;}static struct vm_operations_struct sg_mmap_vm_ops = {	.nopage = sg_vma_nopage,};static intsg_mmap(struct file *filp, struct vm_area_struct *vma){	Sg_fd *sfp;	unsigned long req_sz = vma->vm_end - vma->vm_start;	Sg_scatter_hold *rsv_schp;	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))		return -ENXIO;	SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",				   (void *) vma->vm_start, (int) req_sz));	if (vma->vm_pgoff)		return -EINVAL;	/* want no offset */	rsv_schp = &sfp->reserve;	if (req_sz > rsv_schp->bufflen)		return -ENOMEM;	/* cannot map more than reserved buffer */	if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */		int k;		unsigned long sa = vma->vm_start;		unsigned long len;		struct scatterlist *sclp = rsv_schp->buffer;		for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);		     ++k, ++sclp) {			if (0 != sclp->offset)				return -EFAULT;	/* non page aligned memory ?? */			len = vma->vm_end - sa;			len = (len < sclp->length) ? len : sclp->length;			sa += len;		}	} else {	/* reserve buffer is just a single allocation */		if ((unsigned long) rsv_schp->buffer & (PAGE_SIZE - 1))			return -EFAULT;	/* non page aligned memory ?? */	}	if (0 == sfp->mmap_called) {		sg_rb_correct4mmap(rsv_schp, 1);	/* do only once per fd lifetime */		sfp->mmap_called = 1;	}	vma->vm_flags |= (VM_RESERVED | VM_IO);	vma->vm_private_data = sfp;	vma->vm_ops = &sg_mmap_vm_ops;	return 0;}/* This function is a "bottom half" handler that is called by the * mid level when a command is completed (or has failed). */static voidsg_cmd_done(Scsi_Cmnd * SCpnt){	Scsi_Request *SRpnt = NULL;	Sg_device *sdp = NULL;	Sg_fd *sfp;	Sg_request *srp = NULL;	if (SCpnt && (SRpnt = SCpnt->sc_request))		srp = (Sg_request *) SRpnt->upper_private_data;	if (NULL == srp) {		printk(KERN_ERR "sg_cmd_done: NULL request\n");		if (SRpnt)			scsi_release_request(SRpnt);		return;	}	sfp = srp->parentfp;	if (sfp)		sdp = sfp->parentdp;	if ((NULL == sdp) || sdp->detached) {		printk(KERN_INFO "sg_cmd_done: device detached\n");		scsi_release_request(SRpnt);		return;	}	/* First transfer ownership of data buffers to sg_device object. */	srp->data.k_use_sg = SRpnt->sr_use_sg;	srp->data.sglist_len = SRpnt->sr_sglist_len;	srp->data.bufflen = SRpnt->sr_bufflen;	srp->data.buffer = SRpnt->sr_buffer;	/* now clear out request structure */	SRpnt->sr_use_sg = 0;	SRpnt->sr_sglist_len = 0;	SRpnt->sr_bufflen = 0;	SRpnt->sr_buffer = NULL;	SRpnt->sr_underflow = 0;	SRpnt->sr_request->rq_disk = NULL; /* "sg" _disowns_ request blk */	srp->my_cmdp = NULL;	SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",		sdp->disk->disk_name, srp->header.pack_id, (int) SRpnt->sr_result));	srp->header.resid = SCpnt->resid;	/* N.B. unit of duration changes here from jiffies to millisecs */	srp->header.duration =	    jiffies_to_msecs(jiffies - srp->header.duration);	if (0 != SRpnt->sr_result) {		memcpy(srp->sense_b, SRpnt->sr_sense_buffer,		       sizeof (srp->sense_b));		srp->header.status = 0xff & SRpnt->sr_result;		srp->header.masked_status = status_byte(SRpnt->sr_result);		srp->header.msg_status = msg_byte(SRpnt->sr_result);		srp->header.host_status = host_byte(SRpnt->sr_result);		srp->header.driver_status = driver_byte(SRpnt->sr_result);		if ((sdp->sgdebug > 0) &&		    ((CHECK_CONDITION == srp->header.masked_status) ||		     (COMMAND_TERMINATED == srp->header.masked_status)))			print_req_sense("sg_cmd_done", SRpnt);		/* Following if statement is a patch supplied by Eric Youngdale */		if (driver_byte(SRpnt->sr_result) != 0		    && (SRpnt->sr_sense_buffer[0] & 0x7f) == 0x70		    && (SRpnt->sr_sense_buffer[2] & 0xf) == UNIT_ATTENTION		    && sdp->device->removable) {			/* Detected disc change. Set the bit - this may be used if */			/* there are filesystems using this device. */			sdp->device->changed = 1;		}	}	/* Rely on write phase to clean out srp status values, so no "else" */	scsi_release_request(SRpnt);	SRpnt = NULL;	if (sfp->closed) {	/* whoops this fd already released, cleanup */		SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));		sg_finish_rem_req(srp);		srp = NULL;		if (NULL == sfp->headrp) {			SCSI_LOG_TIMEOUT(1, printk("sg...bh: already closed, final cleanup\n"));			if (0 == sg_remove_sfp(sdp, sfp)) {	/* device still present */				scsi_device_put(sdp->device);			}			sfp = NULL;		}	} else if (srp && srp->orphan) {		if (sfp->keep_orphan)			srp->sg_io_owned = 0;		else {			sg_finish_rem_req(srp);			srp = NULL;		}	}	if (sfp && srp) {		/* Now wake up any sg_read() that is waiting for this packet. */		kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);		srp->done = 1;		wake_up_interruptible(&sfp->read_wait);	}}static struct file_operations sg_fops = {	.owner = THIS_MODULE,	.read = sg_read,	.write = sg_write,	.poll = sg_poll,	.ioctl = sg_ioctl,	.open = sg_open,	.mmap = sg_mmap,	.release = sg_release,	.fasync = sg_fasync,};static struct class_simple * sg_sysfs_class;static int sg_sysfs_valid = 0;static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp){	Sg_device *sdp;	unsigned long iflags;	void *old_sg_dev_arr = NULL;	int k, error;	sdp = kmalloc(sizeof(Sg_device), GFP_KERNEL);	if (!sdp) {		printk(KERN_WARNING "kmalloc Sg_device failure\n");		return -ENOMEM;	}	write_lock_irqsave(&sg_dev_arr_lock, iflags);	if (unlikely(sg_nr_dev >= sg_dev_max)) {	/* try to resize */		Sg_device **tmp_da;		int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP;		write_unlock_irqrestore(&sg_dev_arr_lock, iflags);		tmp_da = kmalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL);		if (unlikely(!tmp_da))			goto expand_failed;		write_lock_irqsave(&sg_dev_arr_lock, iflags);		memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *));		memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *));		old_sg_dev_arr = sg_dev_arr;		sg_dev_arr = tmp_da;		sg_dev_max = tmp_dev_max;	}	for (k = 0; k < sg_dev_max; k++)		if (!sg_dev_arr[k])			break;	if (unlikely(k >= SG_MAX_DEVS))		goto overflow;	memset(sdp, 0, sizeof(*sdp));	SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));	sprintf(disk->disk_name, "sg%d", k);	disk->first_minor = k;	sdp->disk = disk;	sdp->device = scsidp;	init_waitqueue_head(&sdp->o_excl_wait);	sdp->sg_tablesize = scsidp->host ? scsidp->host->sg_tablesize : 0;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?