scsi_lib.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 1,742 行 · 第 1/4 页
C
1,742 行
sdev->device_busy--; if(sdev->device_busy == 0) blk_plug_device(q); out: /* must be careful here...if we trigger the ->remove() function * we cannot be holding the q lock */ spin_unlock_irq(q->queue_lock); put_device(&sdev->sdev_gendev); spin_lock_irq(q->queue_lock);}u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost){ struct device *host_dev; if (shost->unchecked_isa_dma) return BLK_BOUNCE_ISA; host_dev = scsi_get_device(shost); if (PCI_DMA_BUS_IS_PHYS && host_dev && host_dev->dma_mask) return *host_dev->dma_mask; /* * Platforms with virtual-DMA translation * hardware have no practical limit. */ return BLK_BOUNCE_ANY;}struct request_queue *scsi_alloc_queue(struct scsi_device *sdev){ struct Scsi_Host *shost = sdev->host; struct request_queue *q; q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock); if (!q) return NULL; blk_queue_prep_rq(q, scsi_prep_fn); blk_queue_max_hw_segments(q, shost->sg_tablesize); blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); blk_queue_max_sectors(q, shost->max_sectors); blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); blk_queue_segment_boundary(q, shost->dma_boundary); blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); if (!shost->use_clustering) clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); return q;}void scsi_free_queue(struct request_queue *q){ blk_cleanup_queue(q);}/* * Function: scsi_block_requests() * * Purpose: Utility function used by low-level drivers to prevent further * commands from being queued to the device. * * Arguments: shost - Host in question * * Returns: Nothing * * Lock status: No locks are assumed held. * * Notes: There is no timer nor any other means by which the requests * get unblocked other than the low-level driver calling * scsi_unblock_requests(). */void scsi_block_requests(struct Scsi_Host *shost){ shost->host_self_blocked = 1;}/* * Function: scsi_unblock_requests() * * Purpose: Utility function used by low-level drivers to allow further * commands from being queued to the device. * * Arguments: shost - Host in question * * Returns: Nothing * * Lock status: No locks are assumed held. * * Notes: There is no timer nor any other means by which the requests * get unblocked other than the low-level driver calling * scsi_unblock_requests(). * * This is done as an API function so that changes to the * internals of the scsi mid-layer won't require wholesale * changes to drivers that use this feature. */void scsi_unblock_requests(struct Scsi_Host *shost){ shost->host_self_blocked = 0; scsi_run_host_queues(shost);}int __init scsi_init_queue(void){ int i; for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; int size = sgp->size * sizeof(struct scatterlist); sgp->slab = kmem_cache_create(sgp->name, size, 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (!sgp->slab) { printk(KERN_ERR "SCSI: can't init sg slab %s\n", sgp->name); } sgp->pool = mempool_create(SG_MEMPOOL_SIZE, mempool_alloc_slab, mempool_free_slab, sgp->slab); if (!sgp->pool) { printk(KERN_ERR "SCSI: can't init sg mempool %s\n", sgp->name); } } return 0;}void scsi_exit_queue(void){ int i; for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; mempool_destroy(sgp->pool); kmem_cache_destroy(sgp->slab); }}/** * __scsi_mode_sense - issue a mode sense, falling back from 10 to * six bytes if necessary. * @sreq: SCSI request to fill in with the MODE_SENSE * @dbd: set if mode sense will allow block descriptors to be returned * @modepage: mode page being requested * @buffer: request buffer (may not be smaller than eight bytes) * @len: length of request buffer. * @timeout: command timeout * @retries: number of retries before failing * @data: returns a structure abstracting the mode header data * * Returns zero if unsuccessful, or the header offset (either 4 * or 8 depending on whether a six or ten byte command was * issued) if successful. **/int__scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data) { unsigned char cmd[12]; int use_10_for_ms; int header_length; memset(data, 0, sizeof(*data)); memset(&cmd[0], 0, 12); cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ cmd[2] = modepage; retry: use_10_for_ms = sreq->sr_device->use_10_for_ms; if (use_10_for_ms) { if (len < 8) len = 8; cmd[0] = MODE_SENSE_10; cmd[8] = len; header_length = 8; } else { if (len < 4) len = 4; cmd[0] = MODE_SENSE; cmd[4] = len; header_length = 4; } sreq->sr_cmd_len = 0; sreq->sr_sense_buffer[0] = 0; sreq->sr_sense_buffer[2] = 0; sreq->sr_data_direction = DMA_FROM_DEVICE; memset(buffer, 0, len); scsi_wait_req(sreq, cmd, buffer, len, timeout, retries); /* This code looks awful: what it's doing is making sure an * ILLEGAL REQUEST sense return identifies the actual command * byte as the problem. MODE_SENSE commands can return * ILLEGAL REQUEST if the code page isn't supported */ if (use_10_for_ms && ! scsi_status_is_good(sreq->sr_result) && (driver_byte(sreq->sr_result) & DRIVER_SENSE) && sreq->sr_sense_buffer[2] == ILLEGAL_REQUEST && (sreq->sr_sense_buffer[4] & 0x40) == 0x40 && sreq->sr_sense_buffer[5] == 0 && sreq->sr_sense_buffer[6] == 0 ) { sreq->sr_device->use_10_for_ms = 0; goto retry; } if(scsi_status_is_good(sreq->sr_result)) { data->header_length = header_length; if(use_10_for_ms) { data->length = buffer[0]*256 + buffer[1] + 2; data->medium_type = buffer[2]; data->device_specific = buffer[3]; data->longlba = buffer[4] & 0x01; data->block_descriptor_length = buffer[6]*256 + buffer[7]; } else { data->length = buffer[0] + 1; data->medium_type = buffer[1]; data->device_specific = buffer[2]; data->block_descriptor_length = buffer[3]; } } return sreq->sr_result;}/** * scsi_mode_sense - issue a mode sense, falling back from 10 to * six bytes if necessary. * @sdev: scsi device to send command to. * @dbd: set if mode sense will disable block descriptors in the return * @modepage: mode page being requested * @buffer: request buffer (may not be smaller than eight bytes) * @len: length of request buffer. * @timeout: command timeout * @retries: number of retries before failing * * Returns zero if unsuccessful, or the header offset (either 4 * or 8 depending on whether a six or ten byte command was * issued) if successful. **/intscsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data){ struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL); int ret; if (!sreq) return -1; ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len, timeout, retries, data); scsi_release_request(sreq); return ret;}intscsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries){ struct scsi_request *sreq; char cmd[] = { TEST_UNIT_READY, 0, 0, 0, 0, 0, }; int result; sreq = scsi_allocate_request(sdev, GFP_KERNEL); if (!sreq) return -ENOMEM; sreq->sr_data_direction = DMA_NONE; scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries); if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && (sreq->sr_sense_buffer[2] & 0x0f) == UNIT_ATTENTION && sdev->removable) { sdev->changed = 1; sreq->sr_result = 0; } result = sreq->sr_result; scsi_release_request(sreq); return result;}EXPORT_SYMBOL(scsi_test_unit_ready);/** * scsi_device_set_state - Take the given device through the device * state model. * @sdev: scsi device to change the state of. * @state: state to change to. * * Returns zero if unsuccessful or an error if the requested * transition is illegal. **/intscsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state){ enum scsi_device_state oldstate = sdev->sdev_state; if (state == oldstate) return 0; switch (state) { case SDEV_CREATED: /* There are no legal states that come back to * created. This is the manually initialised start * state */ goto illegal; case SDEV_RUNNING: switch (oldstate) { case SDEV_CREATED: case SDEV_OFFLINE: case SDEV_QUIESCE: break; default: goto illegal; } break; case SDEV_QUIESCE: switch (oldstate) { case SDEV_RUNNING: case SDEV_OFFLINE: break; default: goto illegal; } break; case SDEV_OFFLINE: switch (oldstate) { case SDEV_CREATED: case SDEV_RUNNING: case SDEV_QUIESCE: break; default: goto illegal; } break; case SDEV_CANCEL: switch (oldstate) { case SDEV_CREATED: case SDEV_RUNNING: case SDEV_OFFLINE: break; default: goto illegal; } break; case SDEV_DEL: switch (oldstate) { case SDEV_CANCEL: break; default: goto illegal; } break; } sdev->sdev_state = state; return 0; illegal: dev_printk(KERN_ERR, &sdev->sdev_gendev, "Illegal state transition %s->%s\n", scsi_device_state_name(oldstate), scsi_device_state_name(state)); WARN_ON(1); return -EINVAL;}EXPORT_SYMBOL(scsi_device_set_state);/** * scsi_device_quiesce - Block user issued commands. * @sdev: scsi device to quiesce. * * This works by trying to transition to the SDEV_QUIESCE state * (which must be a legal transition). When the device is in this * state, only special requests will be accepted, all others will * be deferred. Since special requests may also be requeued requests, * a successful return doesn't guarantee the device will be * totally quiescent. * * Must be called with user context, may sleep. * * Returns zero if unsuccessful or an error if not. **/intscsi_device_quiesce(struct scsi_device *sdev){ int err = scsi_device_set_state(sdev, SDEV_QUIESCE); if (err) return err; scsi_run_queue(sdev->request_queue); while (sdev->device_busy) { schedule_timeout(HZ/5); scsi_run_queue(sdev->request_queue); } return 0;}EXPORT_SYMBOL(scsi_device_quiesce);/** * scsi_device_resume - Restart user issued commands to a quiesced device. * @sdev: scsi device to resume. * * Moves the device from quiesced back to running and restarts the * queues. * * Must be called with user context, may sleep. **/voidscsi_device_resume(struct scsi_device *sdev){ if(scsi_device_set_state(sdev, SDEV_RUNNING)) return; scsi_run_queue(sdev->request_queue);}EXPORT_SYMBOL(scsi_device_resume);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?