⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 scsi.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 5 页
字号:
                                set_current_state(TASK_INTERRUPTIBLE);                        } else {                                set_current_state(TASK_UNINTERRUPTIBLE);                        }                        spin_unlock_irqrestore(&device_request_lock, flags);			/*			 * This should block until a device command block			 * becomes available.			 */                        schedule();			spin_lock_irqsave(&device_request_lock, flags);                        remove_wait_queue(&device->scpnt_wait, &wait);                        /*                         * FIXME - Isn't this redundant??  Someone                         * else will have forced the state back to running.                         */                        set_current_state(TASK_RUNNING);                        /*                         * In the event that a signal has arrived that we need                         * to consider, then simply return NULL.  Everyone                         * that calls us should be prepared for this                         * possibility, and pass the appropriate code back                         * to the user.                         */                        if( interruptable ) {                                if (signal_pending(current)) {                                        spin_unlock_irqrestore(&device_request_lock, flags);                                        return NULL;                                }                        }		} else {                        spin_unlock_irqrestore(&device_request_lock, flags);			return NULL;		}	}	SCpnt->request.rq_status = RQ_SCSI_BUSY;	SCpnt->request.waiting = NULL;	/* And no one is waiting for this					 * to complete */	atomic_inc(&SCpnt->host->host_active);	atomic_inc(&SCpnt->device->device_active);	SCpnt->buffer  = NULL;	SCpnt->bufflen = 0;	SCpnt->request_buffer = NULL;	SCpnt->request_bufflen = 0;	SCpnt->use_sg = 0;	/* Reset the scatter-gather flag */	SCpnt->old_use_sg = 0;	SCpnt->transfersize = 0;	/* No default transfer size */	SCpnt->cmd_len = 0;	SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;	SCpnt->sc_request = NULL;	SCpnt->sc_magic = SCSI_CMND_MAGIC;        SCpnt->result = 0;	SCpnt->underflow = 0;	/* Do not flag underflow conditions */	SCpnt->old_underflow = 0;	SCpnt->resid = 0;	SCpnt->state = SCSI_STATE_INITIALIZING;	SCpnt->owner = SCSI_OWNER_HIGHLEVEL;	spin_unlock_irqrestore(&device_request_lock, flags);	SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",				   SCpnt->target,				atomic_read(&SCpnt->host->host_active)));	return SCpnt;}inline void __scsi_release_command(Scsi_Cmnd * SCpnt){	unsigned long flags;        Scsi_Device * SDpnt;	spin_lock_irqsave(&device_request_lock, flags);        SDpnt = SCpnt->device;	SCpnt->request.rq_status = RQ_INACTIVE;	SCpnt->state = SCSI_STATE_UNUSED;	SCpnt->owner = SCSI_OWNER_NOBODY;	atomic_dec(&SCpnt->host->host_active);	atomic_dec(&SDpnt->device_active);	SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",				   SCpnt->target,				   atomic_read(&SCpnt->host->host_active),				   SCpnt->host->host_failed));	if (SCpnt->host->host_failed != 0) {		SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",						SCpnt->host->in_recovery,						SCpnt->host->eh_active));	}	/*	 * If the host is having troubles, then look to see if this was the last	 * command that might have failed.  If so, wake up the error handler.	 */	if (SCpnt->host->in_recovery	    && !SCpnt->host->eh_active	    && SCpnt->host->host_busy == SCpnt->host->host_failed) {		SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",			     atomic_read(&SCpnt->host->eh_wait->count)));		up(SCpnt->host->eh_wait);	}	spin_unlock_irqrestore(&device_request_lock, flags);        /*         * Wake up anyone waiting for this device.  Do this after we         * have released the lock, as they will need it as soon as         * they wake up.           */	wake_up(&SDpnt->scpnt_wait);}/* * Function:    scsi_release_command * * Purpose:     Release a command block. * * Arguments:   SCpnt - command block we are releasing. * * Notes:       The command block can no longer be used by the caller once *              this funciton is called.  This is in effect the inverse *              of scsi_allocate_device.  Note that we also must perform *              a couple of additional tasks.  We must first wake up any *              processes that might have blocked waiting for a command *              block, and secondly we must hit the queue handler function *              to make sure that the device is busy.  Note - there is an *              option to not do this - there were instances where we could *              recurse too deeply and blow the stack if this happened *              when we were indirectly called from the request function *              itself. * *              The idea is that a lot of the mid-level internals gunk *              gets hidden in this function.  Upper level drivers don't *              have any chickens to wave in the air to get things to *              work reliably. * *              This function is deprecated, and drivers should be *              rewritten to use Scsi_Request instead of Scsi_Cmnd. */void scsi_release_command(Scsi_Cmnd * SCpnt){        request_queue_t *q;        Scsi_Device * SDpnt;        SDpnt = SCpnt->device;        __scsi_release_command(SCpnt);        /*         * Finally, hit the queue request function to make sure that         * the device is actually busy if there are requests present.         * This won't block - if the device cannot take any more, life         * will go on.           */        q = &SDpnt->request_queue;        scsi_queue_next_request(q, NULL);                }/* * Function:    scsi_dispatch_command * * Purpose:     Dispatch a command to the low-level driver. * * Arguments:   SCpnt - command block we are dispatching. * * Notes: */int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt){#ifdef DEBUG_DELAY	unsigned long clock;#endif	struct Scsi_Host *host;	int rtn = 0;	unsigned long flags = 0;	unsigned long timeout;	ASSERT_LOCK(&io_request_lock, 0);#if DEBUG	unsigned long *ret = 0;#ifdef __mips__	__asm__ __volatile__("move\t%0,$31":"=r"(ret));#else	ret = __builtin_return_address(0);#endif#endif	host = SCpnt->host;	/* Assign a unique nonzero serial_number. */	if (++serial_number == 0)		serial_number = 1;	SCpnt->serial_number = serial_number;	SCpnt->pid = scsi_pid++;	/*	 * We will wait MIN_RESET_DELAY clock ticks after the last reset so	 * we can avoid the drive not being ready.	 */	timeout = host->last_reset + MIN_RESET_DELAY;	if (host->resetting && time_before(jiffies, timeout)) {		int ticks_remaining = timeout - jiffies;		/*		 * NOTE: This may be executed from within an interrupt		 * handler!  This is bad, but for now, it'll do.  The irq		 * level of the interrupt handler has been masked out by the		 * platform dependent interrupt handling code already, so the		 * sti() here will not cause another call to the SCSI host's		 * interrupt handler (assuming there is one irq-level per		 * host).		 */		while (--ticks_remaining >= 0)			mdelay(1 + 999 / HZ);		host->resetting = 0;	}	if (host->hostt->use_new_eh_code) {		scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);	} else {		scsi_add_timer(SCpnt, SCpnt->timeout_per_command,			       scsi_old_times_out);	}	/*	 * We will use a queued command if possible, otherwise we will emulate the	 * queuing and calling of completion function ourselves.	 */	SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "	       "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",	SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,			    SCpnt->buffer, SCpnt->bufflen, SCpnt->done));	SCpnt->state = SCSI_STATE_QUEUED;	SCpnt->owner = SCSI_OWNER_LOWLEVEL;	if (host->can_queue) {		SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",					   host->hostt->queuecommand));		/*		 * Use the old error handling code if we haven't converted the driver		 * to use the new one yet.  Note - only the new queuecommand variant		 * passes a meaningful return value.		 */		if (host->hostt->use_new_eh_code) {			/*			 * Before we queue this command, check if the command			 * length exceeds what the host adapter can handle.			 */			if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {				spin_lock_irqsave(&io_request_lock, flags);				rtn = host->hostt->queuecommand(SCpnt, scsi_done);				spin_unlock_irqrestore(&io_request_lock, flags);				if (rtn != 0) {					scsi_delete_timer(SCpnt);					scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);					SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));                                				}			} else {				SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));				SCpnt->result = (DID_ABORT << 16);				spin_lock_irqsave(&io_request_lock, flags);				scsi_done(SCpnt);				spin_unlock_irqrestore(&io_request_lock, flags);				rtn = 1;			}		} else {			/*			 * Before we queue this command, check if the command			 * length exceeds what the host adapter can handle.			 */			if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {				spin_lock_irqsave(&io_request_lock, flags);				host->hostt->queuecommand(SCpnt, scsi_old_done);				spin_unlock_irqrestore(&io_request_lock, flags);			} else {				SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));				SCpnt->result = (DID_ABORT << 16);				spin_lock_irqsave(&io_request_lock, flags);				scsi_old_done(SCpnt);				spin_unlock_irqrestore(&io_request_lock, flags);				rtn = 1;			}		}	} else {		int temp;		SCSI_LOG_MLQUEUE(3, printk("command() :  routine at %p\n", host->hostt->command));                spin_lock_irqsave(&io_request_lock, flags);		temp = host->hostt->command(SCpnt);		SCpnt->result = temp;#ifdef DEBUG_DELAY                spin_unlock_irqrestore(&io_request_lock, flags);		clock = jiffies + 4 * HZ;		while (time_before(jiffies, clock)) {			barrier();			cpu_relax();		}		printk("done(host = %d, result = %04x) : routine at %p\n",		       host->host_no, temp, host->hostt->command);                spin_lock_irqsave(&io_request_lock, flags);#endif		if (host->hostt->use_new_eh_code) {			scsi_done(SCpnt);		} else {			scsi_old_done(SCpnt);		}                spin_unlock_irqrestore(&io_request_lock, flags);	}	SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));	return rtn;}devfs_handle_t scsi_devfs_handle;/* * scsi_do_cmd sends all the commands out to the low-level driver.  It * handles the specifics required for each low level driver - ie queued * or non queued.  It also prevents conflicts when different high level * drivers go for the same host at the same time. */void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd , 		  void *buffer, unsigned bufflen,  		  int timeout, int retries){	DECLARE_COMPLETION(wait);	request_queue_t *q = &SRpnt->sr_device->request_queue;		SRpnt->sr_request.waiting = &wait;	SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;	scsi_do_req (SRpnt, (void *) cmnd,		buffer, bufflen, scsi_wait_done, timeout, retries);	generic_unplug_device(q);	wait_for_completion(&wait);	SRpnt->sr_request.waiting = NULL;	if( SRpnt->sr_command != NULL )	{		scsi_release_command(SRpnt->sr_command);		SRpnt->sr_command = NULL;	}} /* * Function:    scsi_do_req * * Purpose:     Queue a SCSI request * * Arguments:   SRpnt     - command descriptor. *              cmnd      - actual SCSI command to be performed. *              buffer    - data buffer. *              bufflen   - size of data buffer. *              done      - completion function to be run. *              timeout   - how long to let it run before timeout. *              retries   - number of retries we allow. * * Lock status: With the new queueing code, this is SMP-safe, and no locks *              need be held upon entry.   The old queueing code the lock was *              assumed to be held upon entry. * * Returns:     Nothing. * * Notes:       Prior to the new queue code, this function was not SMP-safe. *              Also, this function is now only used for queueing requests *              for things like ioctls and character device requests - this *              is because we essentially just inject a request into the *              queue for the device. Normal block device handling manipulates *              the queue directly. */void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,	      void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),		 int timeout, int retries){	Scsi_Device * SDpnt = SRpnt->sr_device;	struct Scsi_Host *host = SDpnt->host;	ASSERT_LOCK(&io_request_lock, 0);	SCSI_LOG_MLQUEUE(4,			 {			 int i;			 int target = SDpnt->id;			 int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);			 printk("scsi_do_req (host = %d, channel = %d target = %d, "		    "buffer =%p, bufflen = %d, done = %p, timeout = %d, "				"retries = %d)\n"				"command : ", host->host_no, SDpnt->channel, target, buffer,				bufflen, done, timeout, retries);			 for (i	 = 0; i < size; ++i)			 	printk("%02x  ", ((unsigned char *) cmnd)[i]);			 	printk("\n");			 });	if (!host) {		panic("Invalid or not present host.\n");	}	/*	 * If the upper level driver is reusing these things, then	 * we should release the low-level block now.  Another one will	 * be allocated later when this request is getting queued.	 */	if( SRpnt->sr_command != NULL )	{		scsi_release_command(SRpnt->sr_command);		SRpnt->sr_command = NULL;	}	/*	 * We must prevent reentrancy to the lowlevel host driver.  This prevents	 * it - we enter a loop until the host we want to talk to is not busy.	 * Race conditions are prevented, as interrupts are disabled in between the	 * time we check for the host being not busy, and the time we mark it busy	 * ourselves.	 */	/*	 * Our own function scsi_done (which marks the host as not busy, disables	 * the timeout counter, etc) will be called by us or by the	 * scsi_hosts[host].queuecommand() function needs to also call	 * the completion function for the high level driver.	 */	memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd, 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -