📄 sg.c
字号:
case SG_SET_TIMEOUT: result = get_user(val, (int *)arg); if (result) return result; if (val < 0) return -EIO; sfp->timeout = val; return 0; case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ return sfp->timeout; /* strange ..., for backward compatibility */ case SG_SET_FORCE_LOW_DMA: result = get_user(val, (int *)arg); if (result) return result; if (val) { sfp->low_dma = 1; if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { val = (int)sfp->reserve.bufflen; sg_remove_scat(&sfp->reserve); sg_build_reserve(sfp, val); } } else { if (sdp->detached) return -ENODEV; sfp->low_dma = sdp->device->host->unchecked_isa_dma; } return 0; case SG_GET_LOW_DMA: return put_user((int)sfp->low_dma, (int *)arg); case SG_GET_SCSI_ID: result = verify_area(VERIFY_WRITE, (void *)arg, sizeof(sg_scsi_id_t)); if (result) return result; else { sg_scsi_id_t * sg_idp = (sg_scsi_id_t *)arg; if (sdp->detached) return -ENODEV; __put_user((int)sdp->device->host->host_no, &sg_idp->host_no); __put_user((int)sdp->device->channel, &sg_idp->channel); __put_user((int)sdp->device->id, &sg_idp->scsi_id); __put_user((int)sdp->device->lun, &sg_idp->lun); __put_user((int)sdp->device->type, &sg_idp->scsi_type); __put_user((short)sdp->device->host->cmd_per_lun, &sg_idp->h_cmd_per_lun); __put_user((short)sdp->device->queue_depth, &sg_idp->d_queue_depth); __put_user(0, &sg_idp->unused[0]); __put_user(0, &sg_idp->unused[1]); return 0; } case SG_SET_FORCE_PACK_ID: result = get_user(val, (int *)arg); if (result) return result; sfp->force_packid = val ? 1 : 0; return 0; case SG_GET_PACK_ID: result = verify_area(VERIFY_WRITE, (void *) arg, sizeof(int)); if (result) return result; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) { if ((1 == srp->done) && (! srp->sg_io_owned)) { read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __put_user(srp->header.pack_id, (int *)arg); return 0; } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __put_user(-1, (int *)arg); return 0; case SG_GET_NUM_WAITING: read_lock_irqsave(&sfp->rq_list_lock, iflags); for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) { if ((1 == srp->done) && (! srp->sg_io_owned)) ++val; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(val, (int *)arg); case SG_GET_SG_TABLESIZE: return put_user(sdp->sg_tablesize, (int *)arg); case SG_SET_RESERVED_SIZE: result = get_user(val, (int *)arg); if (result) return result; if (val != sfp->reserve.bufflen) { if (sg_res_in_use(sfp) || sfp->mmap_called) return -EBUSY; sg_remove_scat(&sfp->reserve); sg_build_reserve(sfp, val); } return 0; case SG_GET_RESERVED_SIZE: val = (int)sfp->reserve.bufflen; return put_user(val, (int *)arg); case SG_SET_COMMAND_Q: result = get_user(val, (int *)arg); if (result) return result; sfp->cmd_q = val ? 1 : 0; return 0; case SG_GET_COMMAND_Q: return put_user((int)sfp->cmd_q, (int *)arg); case SG_SET_KEEP_ORPHAN: result = get_user(val, (int *)arg); if (result) return result; sfp->keep_orphan = val; return 0; case SG_GET_KEEP_ORPHAN: return put_user((int)sfp->keep_orphan, (int *)arg); case SG_NEXT_CMD_LEN: result = get_user(val, (int *)arg); if (result) return result; sfp->next_cmd_len = (val > 0) ? val : 0; return 0; case SG_GET_VERSION_NUM: return put_user(sg_version_num, (int *)arg); case SG_GET_ACCESS_COUNT: val = (sdp->device ? sdp->device->access_count : 0); return put_user(val, (int *)arg); case SG_GET_REQUEST_TABLE: result = verify_area(VERIFY_WRITE, (void *) arg, SZ_SG_REQ_INFO * SG_MAX_QUEUE); if (result) return result; else { sg_req_info_t rinfo[SG_MAX_QUEUE]; Sg_request * srp; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; ++val, srp = srp ? srp->nextrp : srp) { memset(&rinfo[val], 0, SZ_SG_REQ_INFO); if (srp) { rinfo[val].req_state = srp->done + 1; rinfo[val].problem = srp->header.masked_status & srp->header.host_status & srp->header.driver_status; rinfo[val].duration = srp->done ? srp->header.duration : sg_jif_to_ms(jiffies - srp->header.duration); rinfo[val].orphan = srp->orphan; rinfo[val].sg_io_owned = srp->sg_io_owned; rinfo[val].pack_id = srp->header.pack_id; rinfo[val].usr_ptr = srp->header.usr_ptr; } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __copy_to_user((void *)arg, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); return 0; } case SG_EMULATED_HOST: if (sdp->detached) return -ENODEV; return put_user(sdp->device->host->hostt->emulated, (int *)arg); case SG_SCSI_RESET: if (sdp->detached) return -ENODEV; if (filp->f_flags & O_NONBLOCK) { if (sdp->device->host->in_recovery) return -EBUSY; } else if (! scsi_block_when_processing_errors(sdp->device)) return -EBUSY; result = get_user(val, (int *)arg); if (result) return result; if (SG_SCSI_RESET_NOTHING == val) return 0;#ifdef SCSI_TRY_RESET_DEVICE switch (val) { case SG_SCSI_RESET_DEVICE: val = SCSI_TRY_RESET_DEVICE; break; case SG_SCSI_RESET_BUS: val = SCSI_TRY_RESET_BUS; break; case SG_SCSI_RESET_HOST: val = SCSI_TRY_RESET_HOST; break; default: return -EINVAL; } if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; return (scsi_reset_provider(sdp->device, val) == SUCCESS) ? 0 : -EIO;#else SCSI_LOG_TIMEOUT(1, printk("sg_ioctl: SG_RESET_SCSI not supported\n")); result = -EINVAL;#endif case SCSI_IOCTL_SEND_COMMAND: if (sdp->detached) return -ENODEV; if (read_only) { unsigned char opcode = WRITE_6; Scsi_Ioctl_Command * siocp = (void *)arg; copy_from_user(&opcode, siocp->data, 1); if (! sg_allow_access(opcode, sdp->device->type)) return -EACCES; } return scsi_ioctl_send_command(sdp->device, (void *)arg); case SG_SET_DEBUG: result = get_user(val, (int *)arg); if (result) return result; sdp->sgdebug = (char)val; return 0; case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SCSI_IOCTL_PROBE_HOST: case SG_GET_TRANSFORM: if (sdp->detached) return -ENODEV; return scsi_ioctl(sdp->device, cmd_in, (void *)arg); default: if (read_only) return -EACCES; /* don't know so take safe approach */ return scsi_ioctl(sdp->device, cmd_in, (void *)arg); }}static unsigned int sg_poll(struct file * filp, poll_table * wait){ unsigned int res = 0; Sg_device * sdp; Sg_fd * sfp; Sg_request * srp; int count = 0; unsigned long iflags; if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)) || sfp->closed) return POLLERR; poll_wait(filp, &sfp->read_wait, wait); read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) { /* if any read waiting, flag it */ if ((0 == res) && (1 == srp->done) && (! srp->sg_io_owned)) res = POLLIN | POLLRDNORM; ++count; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (sdp->detached) res |= POLLHUP; else if (! sfp->cmd_q) { if (0 == count) res |= POLLOUT | POLLWRNORM; } else if (count < SG_MAX_QUEUE) res |= POLLOUT | POLLWRNORM; SCSI_LOG_TIMEOUT(3, printk("sg_poll: dev=%d, res=0x%x\n", MINOR(sdp->i_rdev), (int)res)); return res;}static int sg_fasync(int fd, struct file * filp, int mode){ int retval; Sg_device * sdp; Sg_fd * sfp; if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_fasync: dev=%d, mode=%d\n", MINOR(sdp->i_rdev), mode)); retval = fasync_helper(fd, filp, mode, &sfp->async_qp); return (retval < 0) ? retval : 0;}static void sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish){ void * page_ptr; struct page * page; int k, m; SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, " "scatg=%d\n", startFinish, rsv_schp->k_use_sg)); /* N.B. correction _not_ applied to base page of aech allocation */ if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */ struct scatterlist * sclp = rsv_schp->buffer; for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) { for (m = PAGE_SIZE; m < sclp->length; m += PAGE_SIZE) { page_ptr = (unsigned char *)sclp->address + m; page = virt_to_page(page_ptr); if (startFinish) get_page(page); /* increment page count */ else { if (page_count(page) > 0) put_page_testzero(page); /* decrement page count */ } } } } else { /* reserve buffer is just a single allocation */ for (m = PAGE_SIZE; m < rsv_schp->bufflen; m += PAGE_SIZE) { page_ptr = (unsigned char *)rsv_schp->buffer + m; page = virt_to_page(page_ptr); if (startFinish) get_page(page); /* increment page count */ else { if (page_count(page) > 0) put_page_testzero(page); /* decrement page count */ } } }}static struct page * sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int unused){ Sg_fd * sfp; struct page * page = NOPAGE_SIGBUS; void * page_ptr = NULL; unsigned long offset; Sg_scatter_hold * rsv_schp; if ((NULL == vma) || (! (sfp = (Sg_fd *)vma->vm_private_data))) return page; rsv_schp = &sfp->reserve; offset = addr - vma->vm_start; if (offset >= rsv_schp->bufflen) return page; SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n", offset, rsv_schp->k_use_sg)); if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */ int k; unsigned long sa = vma->vm_start; unsigned long len; struct scatterlist * sclp = rsv_schp->buffer; for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); ++k, ++sclp) { len = vma->vm_end - sa; len = (len < sclp->length) ? len : sclp->length; if (offset < len) { page_ptr = (unsigned char *)sclp->address + offset; page = virt_to_page(page_ptr); get_page(page); /* increment page count */ break; } sa += len; offset -= len; } } else { /* reserve buffer is just a single allocation */ page_ptr = (unsigned char *)rsv_schp->buffer + offset; page = virt_to_page(page_ptr); get_page(page); /* increment page count */ } return page;}static struct vm_operations_struct sg_mmap_vm_ops = { nopage : sg_vma_nopage,};static int sg_mmap(struct file * filp, struct vm_area_struct *vma){ Sg_fd * sfp; unsigned long req_sz = vma->vm_end - vma->vm_start; Sg_scatter_hold * rsv_schp; if ((! filp) || (! vma) || (! (sfp = (Sg_fd *)filp->private_data))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n", (void *)vma->vm_start, (int)req_sz)); if (vma->vm_pgoff) return -EINVAL; /* want no offset */ rsv_schp = &sfp->reserve; if (req_sz > rsv_schp->bufflen) return -ENOMEM; /* cannot map more than reserved buffer */ if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */ int k; unsigned long sa = vma->vm_start; unsigned long len; struct scatterlist * sclp = rsv_schp->buffer; for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); ++k, ++sclp) { if ((unsigned long)sclp->address & (PAGE_SIZE - 1)) return -EFAULT; /* non page aligned memory ?? */ len = vma->vm_end - sa; len = (len < sclp->length) ? len : sclp->length; sa += len; } } else { /* reserve buffer is just a single allocation */ if ((unsigned long)rsv_schp->buffer & (PAGE_SIZE - 1)) return -EFAULT; /* non page aligned memory ?? */ } if (0 == sfp->mmap_called) { sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */ sfp->mmap_called = 1; } vma->vm_flags |= (VM_RESERVED | VM_IO); vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -