📄 scsi.c
字号:
*reqp = req->next; wake_up(&wait_for_request); } } else { SCpnt->request.rq_status = RQ_SCSI_BUSY; SCpnt->request.sem = NULL; /* And no one is waiting for this * to complete */ } atomic_inc(&SCpnt->host->host_active); SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", SCpnt->target, atomic_read(&SCpnt->host->host_active))); break; } } SCpnt->use_sg = 0; /* Reset the scatter-gather flag */ SCpnt->old_use_sg = 0; SCpnt->transfersize = 0; /* No default transfer size */ SCpnt->cmd_len = 0; SCpnt->underflow = 0; /* Do not flag underflow conditions */ /* Since not everyone seems to set the device info correctly * before Scsi_Cmnd gets send out to scsi_do_command, we do it here. * FIXME(eric) This doesn't make any sense. */ SCpnt->channel = device->channel; SCpnt->lun = device->lun; SCpnt->target = device->id; SCpnt->state = SCSI_STATE_INITIALIZING; SCpnt->owner = SCSI_OWNER_HIGHLEVEL; return SCpnt;}/* * Function: scsi_release_command * * Purpose: Release a command block. * * Arguments: SCpnt - command block we are releasing. * * Notes: The command block can no longer be used by the caller once * this funciton is called. This is in effect the inverse * of scsi_allocate_device/scsi_request_queueable. */voidscsi_release_command(Scsi_Cmnd * SCpnt){ SCpnt->request.rq_status = RQ_INACTIVE; SCpnt->state = SCSI_STATE_UNUSED; SCpnt->owner = SCSI_OWNER_NOBODY; atomic_dec(&SCpnt->host->host_active); SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n", SCpnt->target, atomic_read(&SCpnt->host->host_active), SCpnt->host->host_failed)); if( SCpnt->host->host_failed != 0 ) { SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n", SCpnt->host->in_recovery, SCpnt->host->eh_active)); } /* * If the host is having troubles, then look to see if this was the last * command that might have failed. If so, wake up the error handler. */ if( SCpnt->host->in_recovery && !SCpnt->host->eh_active && SCpnt->host->host_busy == SCpnt->host->host_failed ) { SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n", atomic_read(&SCpnt->host->eh_wait->count))); up(SCpnt->host->eh_wait); }}/* * This is inline because we have stack problemes if we recurse to deeply. */inline int internal_cmnd (Scsi_Cmnd * SCpnt){#ifdef DEBUG_DELAY unsigned long clock;#endif struct Scsi_Host * host; int rtn = 0; unsigned long timeout;#if DEBUG unsigned long *ret = 0;#ifdef __mips__ __asm__ __volatile__ ("move\t%0,$31":"=r"(ret));#else ret = __builtin_return_address(0);#endif#endif host = SCpnt->host; /* Assign a unique nonzero serial_number. */ if (++serial_number == 0) serial_number = 1; SCpnt->serial_number = serial_number; /* * We will wait MIN_RESET_DELAY clock ticks after the last reset so * we can avoid the drive not being ready. */ timeout = host->last_reset + MIN_RESET_DELAY; if (host->resetting && time_before(jiffies, timeout)) { int ticks_remaining = timeout - jiffies; /* * NOTE: This may be executed from within an interrupt * handler! This is bad, but for now, it'll do. The irq * level of the interrupt handler has been masked out by the * platform dependent interrupt handling code already, so the * sti() here will not cause another call to the SCSI host's * interrupt handler (assuming there is one irq-level per * host). */ spin_unlock_irq(&io_request_lock); while (--ticks_remaining >= 0) mdelay(1+999/HZ); host->resetting = 0; spin_lock_irq(&io_request_lock); } if( host->hostt->use_new_eh_code ) { scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out); } else { scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_old_times_out); } /* * We will use a queued command if possible, otherwise we will emulate the * queuing and calling of completion function ourselves. */ SCSI_LOG_MLQUEUE(3,printk("internal_cmnd (host = %d, channel = %d, target = %d, " "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n", SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd, SCpnt->buffer, SCpnt->bufflen, SCpnt->done)); SCpnt->state = SCSI_STATE_QUEUED; SCpnt->owner = SCSI_OWNER_LOWLEVEL; if (host->can_queue) { SCSI_LOG_MLQUEUE(3,printk("queuecommand : routine at %p\n", host->hostt->queuecommand)); /* * Use the old error handling code if we haven't converted the driver * to use the new one yet. Note - only the new queuecommand variant * passes a meaningful return value. */ if( host->hostt->use_new_eh_code ) { rtn = host->hostt->queuecommand (SCpnt, scsi_done); if( rtn != 0 ) { scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY); } } else { host->hostt->queuecommand (SCpnt, scsi_old_done); } } else { int temp; SCSI_LOG_MLQUEUE(3,printk("command() : routine at %p\n", host->hostt->command)); temp = host->hostt->command (SCpnt); SCpnt->result = temp;#ifdef DEBUG_DELAY clock = jiffies + 4 * HZ; spin_unlock_irq(&io_request_lock); while (time_before(jiffies, clock)) barrier(); spin_lock_irq(&io_request_lock); printk("done(host = %d, result = %04x) : routine at %p\n", host->host_no, temp, host->hostt->command);#endif if( host->hostt->use_new_eh_code ) { scsi_done(SCpnt); } else { scsi_old_done(SCpnt); } } SCSI_LOG_MLQUEUE(3,printk("leaving internal_cmnd()\n")); return rtn;}/* * scsi_do_cmd sends all the commands out to the low-level driver. It * handles the specifics required for each low level driver - ie queued * or non queued. It also prevents conflicts when different high level * drivers go for the same host at the same time. */void scsi_do_cmd (Scsi_Cmnd * SCpnt, const void *cmnd , void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *), int timeout, int retries){ struct Scsi_Host * host = SCpnt->host; Scsi_Device * device = SCpnt->device; int mlqueue = 0; SCpnt->owner = SCSI_OWNER_MIDLEVEL;SCSI_LOG_MLQUEUE(4, { int i; int target = SCpnt->target; printk ("scsi_do_cmd (host = %d, channel = %d target = %d, " "buffer =%p, bufflen = %d, done = %p, timeout = %d, " "retries = %d)\n" "command : " , host->host_no, SCpnt->channel, target, buffer, bufflen, done, timeout, retries); for (i = 0; i < 10; ++i) printk ("%02x ", ((unsigned char *) cmnd)[i]); printk("\n"); }); if (!host) { panic ("Invalid or not present host.\n"); } /* * We must prevent reentrancy to the lowlevel host driver. This prevents * it - we enter a loop until the host we want to talk to is not busy. * Race conditions are prevented, as interrupts are disabled in between the * time we check for the host being not busy, and the time we mark it busy * ourselves. */ SCpnt->pid = scsi_pid++; while (SCSI_BLOCK((Scsi_Device *) NULL, host)) { if (in_interrupt()){ mlqueue = 1; break; } spin_unlock(&io_request_lock); /* FIXME!!! */ SCSI_SLEEP(&host->host_wait, SCSI_BLOCK((Scsi_Device *) NULL, host)); spin_lock_irq(&io_request_lock); /* FIXME!!! */ } if (host->block) host_active = host; host->host_busy++; device->device_busy++; /* * Our own function scsi_done (which marks the host as not busy, disables * the timeout counter, etc) will be called by us or by the * scsi_hosts[host].queuecommand() function needs to also call * the completion function for the high level driver. */ memcpy ((void *) SCpnt->data_cmnd , (const void *) cmnd, 12); SCpnt->reset_chain = NULL; SCpnt->serial_number = 0; SCpnt->serial_number_at_timeout = 0; SCpnt->bufflen = bufflen; SCpnt->buffer = buffer; SCpnt->flags = 0; SCpnt->retries = 0; SCpnt->allowed = retries; SCpnt->done = done; SCpnt->timeout_per_command = timeout; memcpy ((void *) SCpnt->cmnd , (const void *) cmnd, 12); /* Zero the sense buffer. Some host adapters automatically request * sense on error. 0 is not a valid sense code. */ memset ((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer); SCpnt->request_buffer = buffer; SCpnt->request_bufflen = bufflen; SCpnt->old_use_sg = SCpnt->use_sg; if (SCpnt->cmd_len == 0) SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); SCpnt->old_cmd_len = SCpnt->cmd_len; /* Start the timer ticking. */ SCpnt->internal_timeout = NORMAL_TIMEOUT; SCpnt->abort_reason = 0; SCpnt->result = 0; if (mlqueue == 1 && host->hostt->use_new_eh_code){ /* Assign a unique nonzero serial_number. */ if (++serial_number == 0) serial_number = 1; SCpnt->serial_number = serial_number; scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY); } else internal_cmnd (SCpnt); SCSI_LOG_MLQUEUE(3,printk ("Leaving scsi_do_cmd()\n"));}/* This function is the mid-level interrupt routine, which decides how * to handle error conditions. Each invocation of this function must * do one and *only* one of the following: * * 1) Insert command in BH queue. * 2) Activate error handler for host. * * FIXME(eric) - I am concerned about stack overflow (still). An interrupt could * come while we are processing the bottom queue, which would cause another command * to be stuffed onto the bottom queue, and it would in turn be processed as that * interrupt handler is returning. Given a sufficiently steady rate of returning * commands, this could cause the stack to overflow. I am not sure what is the most * appropriate solution here - we should probably keep a depth count, and not process * any commands while we still have a bottom handler active higher in the stack. * * There is currently code in the bottom half handler to monitor recursion in the bottom * handler and report if it ever happens. If this becomes a problem, it won't be hard to * engineer something to deal with it so that only the outer layer ever does any real * processing. */voidscsi_done (Scsi_Cmnd * SCpnt){ /* * We don't have to worry about this one timing out any more. */ scsi_delete_timer(SCpnt); /* Set the serial numbers back to zero */ SCpnt->serial_number = 0; /* * First, see whether this command already timed out. If so, we ignore * the response. We treat it as if the command never finished. * * Since serial_number is now 0, the error handler cound detect this * situation and avoid to call the the low level driver abort routine. * (DB) */ if( SCpnt->state == SCSI_STATE_TIMEOUT ) { SCSI_LOG_MLCOMPLETE(1,printk("Ignoring completion of %p due to timeout status", SCpnt)); return; } SCpnt->serial_number_at_timeout = 0; SCpnt->state = SCSI_STATE_BHQUEUE; SCpnt->owner = SCSI_OWNER_BH_HANDLER; SCpnt->bh_next = NULL; /* * Next, put this command in the BH queue. * * We need a spinlock here, or compare and exchange if we can reorder incoming * Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times * before bh is serviced. -jj * * We already have the io_request_lock here, since we are called from the * interrupt handler or the error handler. (DB) * */ if (!scsi_bh_queue_head) { scsi_bh_queue_head = SCpnt; scsi_bh_queue_tail = SCpnt; } else { scsi_bh_queue_tail->bh_next = SCpnt; scsi_bh_queue_tail = SCpnt; } /* * Mark the bottom half handler to be run. */ mark_bh(SCSI_BH);}/* * Procedure: scsi_bottom_half_handler * * Purpose: Called after we have finished processing interrupts, it * performs post-interrupt handling for commands that may * have completed. * * Notes: This is called with all interrupts enabled. This should reduce * interrupt latency, stack depth, and reentrancy of the low-level * drivers. * * The io_request_lock is required in all the routine. There was a subtle * race condition when scsi_done is called after a command has already * timed out but before the time out is processed by the error handler. * (DB) */void scsi_bottom_half_handler(void){ Scsi_Cmnd * SCpnt; Scsi_Cmnd * SCnext; unsigned long flags; spin_lock_irqsave(&io_request_lock, flags); while(1==1) { SCpnt = scsi_bh_queue_head;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -