scsi_lib.c
来自「linux 内核源代码」· C语言 代码 · 共 2,521 行 · 第 1/5 页
C
2,521 行
spin_unlock_irqrestore(shost->host_lock, flags);}/* * Function: scsi_run_queue() * * Purpose: Select a proper request queue to serve next * * Arguments: q - last request's queue * * Returns: Nothing * * Notes: The previous command was completely finished, start * a new one if possible. */static void scsi_run_queue(struct request_queue *q){ struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost = sdev->host; unsigned long flags; if (sdev->single_lun) scsi_single_lun_run(sdev); spin_lock_irqsave(shost->host_lock, flags); while (!list_empty(&shost->starved_list) && !shost->host_blocked && !shost->host_self_blocked && !((shost->can_queue > 0) && (shost->host_busy >= shost->can_queue))) { /* * As long as shost is accepting commands and we have * starved queues, call blk_run_queue. scsi_request_fn * drops the queue_lock and can add us back to the * starved_list. * * host_lock protects the starved_list and starved_entry. * scsi_request_fn must get the host_lock before checking * or modifying starved_list or starved_entry. */ sdev = list_entry(shost->starved_list.next, struct scsi_device, starved_entry); list_del_init(&sdev->starved_entry); spin_unlock_irqrestore(shost->host_lock, flags); if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && !test_and_set_bit(QUEUE_FLAG_REENTER, &sdev->request_queue->queue_flags)) { blk_run_queue(sdev->request_queue); clear_bit(QUEUE_FLAG_REENTER, &sdev->request_queue->queue_flags); } else blk_run_queue(sdev->request_queue); spin_lock_irqsave(shost->host_lock, flags); if (unlikely(!list_empty(&sdev->starved_entry))) /* * sdev lost a race, and was put back on the * starved list. This is unlikely but without this * in theory we could loop forever. */ break; } spin_unlock_irqrestore(shost->host_lock, flags); blk_run_queue(q);}/* * Function: scsi_requeue_command() * * Purpose: Handle post-processing of completed commands. * * Arguments: q - queue to operate on * cmd - command that may need to be requeued. * * Returns: Nothing * * Notes: After command completion, there may be blocks left * over which weren't finished by the previous command * this can be for a number of reasons - the main one is * I/O errors in the middle of the request, in which case * we need to request the blocks that come after the bad * sector. * Notes: Upon return, cmd is a stale pointer. */static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd){ struct request *req = cmd->request; unsigned long flags; scsi_unprep_request(req); spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); scsi_run_queue(q);}void scsi_next_command(struct scsi_cmnd *cmd){ struct scsi_device *sdev = cmd->device; struct request_queue *q = sdev->request_queue; /* need to hold a reference on the device before we let go of the cmd */ get_device(&sdev->sdev_gendev); scsi_put_command(cmd); scsi_run_queue(q); /* ok to remove device now */ put_device(&sdev->sdev_gendev);}void scsi_run_host_queues(struct Scsi_Host *shost){ struct scsi_device *sdev; shost_for_each_device(sdev, shost) scsi_run_queue(sdev->request_queue);}/* * Function: scsi_end_request() * * Purpose: Post-processing of completed commands (usually invoked at end * of upper level post-processing and scsi_io_completion). * * Arguments: cmd - command that is complete. * uptodate - 1 if I/O indicates success, <= 0 for I/O error. * bytes - number of bytes of completed I/O * requeue - indicates whether we should requeue leftovers. * * Lock status: Assumed that lock is not held upon entry. * * Returns: cmd if requeue required, NULL otherwise. * * Notes: This is called for block device requests in order to * mark some number of sectors as complete. * * We are guaranteeing that the request queue will be goosed * at some point during this call. * Notes: If cmd was requeued, upon return it will be a stale pointer. */static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, int bytes, int requeue){ struct request_queue *q = cmd->device->request_queue; struct request *req = cmd->request; unsigned long flags; /* * If there are blocks left over at the end, set up the command * to queue the remainder of them. */ if (end_that_request_chunk(req, uptodate, bytes)) { int leftover = (req->hard_nr_sectors << 9); if (blk_pc_request(req)) leftover = req->data_len; /* kill remainder if no retrys */ if (!uptodate && blk_noretry_request(req)) end_that_request_chunk(req, 0, leftover); else { if (requeue) { /* * Bleah. Leftovers again. Stick the * leftovers in the front of the * queue, and goose the queue again. */ scsi_requeue_command(q, cmd); cmd = NULL; } return cmd; } } add_disk_randomness(req->rq_disk); spin_lock_irqsave(q->queue_lock, flags); if (blk_rq_tagged(req)) blk_queue_end_tag(q, req); end_that_request_last(req, uptodate); spin_unlock_irqrestore(q->queue_lock, flags); /* * This will goose the queue request function at the end, so we don't * need to worry about launching another command. */ scsi_next_command(cmd); return NULL;}/* * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. */#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048static inline unsigned int scsi_sgtable_index(unsigned short nents){ unsigned int index; switch (nents) { case 1 ... 8: index = 0; break; case 9 ... 16: index = 1; break;#if (SCSI_MAX_SG_SEGMENTS > 16) case 17 ... 32: index = 2; break;#if (SCSI_MAX_SG_SEGMENTS > 32) case 33 ... 64: index = 3; break;#if (SCSI_MAX_SG_SEGMENTS > 64) case 65 ... 128: index = 4; break;#endif#endif#endif default: printk(KERN_ERR "scsi: bad segment count=%d\n", nents); BUG(); } return index;}struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask){ struct scsi_host_sg_pool *sgp; struct scatterlist *sgl, *prev, *ret; unsigned int index; int this, left; BUG_ON(!cmd->use_sg); left = cmd->use_sg; ret = prev = NULL; do { this = left; if (this > SCSI_MAX_SG_SEGMENTS) { this = SCSI_MAX_SG_SEGMENTS - 1; index = SG_MEMPOOL_NR - 1; } else index = scsi_sgtable_index(this); left -= this; sgp = scsi_sg_pools + index; sgl = mempool_alloc(sgp->pool, gfp_mask); if (unlikely(!sgl)) goto enomem; sg_init_table(sgl, sgp->size); /* * first loop through, set initial index and return value */ if (!ret) ret = sgl; /* * chain previous sglist, if any. we know the previous * sglist must be the biggest one, or we would not have * ended up doing another loop. */ if (prev) sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); /* * if we have nothing left, mark the last segment as * end-of-list */ if (!left) sg_mark_end(&sgl[this - 1]); /* * don't allow subsequent mempool allocs to sleep, it would * violate the mempool principle. */ gfp_mask &= ~__GFP_WAIT; gfp_mask |= __GFP_HIGH; prev = sgl; } while (left); /* * ->use_sg may get modified after dma mapping has potentially * shrunk the number of segments, so keep a copy of it for free. */ cmd->__use_sg = cmd->use_sg; return ret;enomem: if (ret) { /* * Free entries chained off ret. Since we were trying to * allocate another sglist, we know that all entries are of * the max size. */ sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; prev = ret; ret = &ret[SCSI_MAX_SG_SEGMENTS - 1]; while ((sgl = sg_chain_ptr(ret)) != NULL) { ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1]; mempool_free(sgl, sgp->pool); } mempool_free(prev, sgp->pool); } return NULL;}EXPORT_SYMBOL(scsi_alloc_sgtable);void scsi_free_sgtable(struct scsi_cmnd *cmd){ struct scatterlist *sgl = cmd->request_buffer; struct scsi_host_sg_pool *sgp; /* * if this is the biggest size sglist, check if we have * chained parts we need to free */ if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) { unsigned short this, left; struct scatterlist *next; unsigned int index; left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1); next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]); while (left && next) { sgl = next; this = left; if (this > SCSI_MAX_SG_SEGMENTS) { this = SCSI_MAX_SG_SEGMENTS - 1; index = SG_MEMPOOL_NR - 1; } else index = scsi_sgtable_index(this); left -= this; sgp = scsi_sg_pools + index; if (left) next = sg_chain_ptr(&sgl[sgp->size - 1]); mempool_free(sgl, sgp->pool); } /* * Restore original, will be freed below */ sgl = cmd->request_buffer; sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; } else sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg); mempool_free(sgl, sgp->pool);}EXPORT_SYMBOL(scsi_free_sgtable);/* * Function: scsi_release_buffers() * * Purpose: Completion processing for block device I/O requests. * * Arguments: cmd - command that we are bailing. * * Lock status: Assumed that no lock is held upon entry. * * Returns: Nothing * * Notes: In the event that an upper level driver rejects a * command, we must release resources allocated during * the __init_io() function. Primarily this would involve * the scatter-gather table, and potentially any bounce * buffers. */static void scsi_release_buffers(struct scsi_cmnd *cmd){ if (cmd->use_sg) scsi_free_sgtable(cmd); /* * Zero these out. They now point to freed memory, and it is * dangerous to hang onto the pointers. */ cmd->request_buffer = NULL; cmd->request_bufflen = 0;}/* * Function: scsi_io_completion() * * Purpose: Completion processing for block device I/O requests. * * Arguments: cmd - command that is finished. * * Lock status: Assumed that no lock is held upon entry. * * Returns: Nothing * * Notes: This function is matched in terms of capabilities to * the function that created the scatter-gather list. * In other words, if there are no bounce buffers * (the normal case for most drivers), we don't need * the logic to deal with cleaning up afterwards. * * We must do one of several things here: * * a) Call scsi_end_request. This will finish off the * specified number of sectors. If we are done, the * command block will be released, and the queue * function will be goosed. If we are not done, then * scsi_end_request will directly goose the queue. * * b) We can just use scsi_requeue_command() here. This would * be used if we just wanted to retry, for example. */void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes){ int result = cmd->result; int this_count = cmd->request_bufflen; struct request_queue *q = cmd->device->request_queue; struct request *req = cmd->request; int clear_errors = 1; struct scsi_sense_hdr sshdr; int sense_valid = 0; int sense_deferred = 0; scsi_release_buffers(cmd); if (result) { sense_valid = scsi_command_normalize_sense(cmd, &sshdr); if (sense_valid) sense_deferred = scsi_sense_is_deferred(&sshdr); } if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ req->errors = result; if (result) { clear_errors = 0; if (sense_valid && req->sense) { /* * SG_IO wants current and deferred errors */ int len = 8 + cmd->sense_buffer[7]; if (len > SCSI_SENSE_BUFFERSIZE) len = SCSI_SENSE_BUFFERSIZE; memcpy(req->sense, cmd->sense_buffer, len); req->sense_len = len; } } req->data_len = cmd->resid; } /* * Next deal with any sectors which we were able to correctly * handle. */ SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " "%d bytes done.\n", req->nr_sectors, good_bytes)); SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg)); if (clear_errors) req->errors = 0; /* A number of bytes were successfully read. If there * are leftovers and there is some kind of error * (result != 0), retry the rest. */ if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) return; /* good_bytes = 0, or (inclusive) there were leftovers and * result = 0, so scsi_end_request couldn't retry. */ if (sense_valid && !sense_deferred) { switch (sshdr.sense_key) { case UNIT_ATTENTION: if (cmd->device->removable) { /* Detected disc change. Set a bit * and quietly refuse further access. */ cmd->device->changed = 1; scsi_end_request(cmd, 0, this_count, 1); return; } else { /* Must have been a power glitch, or a * bus reset. Could not have been a * media change, so we just retry the * request and see what happens. */ scsi_requeue_command(q, cmd);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?