⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iscsi_main.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * iSCSI driver for Linux * Copyright (C) 2001 Cisco Systems, Inc. * maintained by linux-iscsi@cisco.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * See the file COPYING included with this distribution for more details. * * $Id: iscsi.c,v 1.58 2002/02/20 20:15:58 smferris Exp $  * *//* there's got to be a better way to wait for child processes created by kernel_thread */static int errno = 0;#define __KERNEL_SYSCALLS__#include <linux/config.h>#include <linux/version.h>#include <linux/module.h>#include <linux/sched.h>#include <asm/io.h>#include <asm/byteorder.h>#include <linux/stddef.h>#include <linux/string.h>#include <linux/errno.h>#include <linux/file.h>#include <linux/kernel.h>#include <linux/ioport.h>#include <linux/slab.h>#include <linux/delay.h>#include <linux/proc_fs.h>#include <linux/blk.h>#include <linux/types.h>#include <linux/stat.h>#include <linux/config.h>#include <linux/poll.h>#include <linux/smp_lock.h>#include <linux/kernel.h>#include <linux/wait.h>#include <linux/net.h>#include <net/sock.h>#include <linux/socket.h>#include <linux/errno.h>#include <linux/unistd.h>#include <linux/timer.h>#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) )# include <asm/semaphore.h>#else# include <asm/spinlock.h>#endif#include <asm/uaccess.h>#include <scsi/sg.h>#include <sd.h>#include <scsi.h>#include <hosts.h>#ifdef DEBUG# define DEBUG_ERROR  1# define DEBUG_TRACE  1# define DEBUG_INIT   1# define DEBUG_QUEUE  1# define DEBUG_FLOW   1# define DEBUG_ALLOC  1# define DEBUG_EH     1# define DEBUG_SMP    1#else# define DEBUG_ERROR  1# define DEBUG_TRACE  0# define DEBUG_INIT   0# define DEBUG_QUEUE  0# define DEBUG_FLOW   0# define DEBUG_ALLOC  0# define DEBUG_EH     0# define DEBUG_SMP    0#endif#define TEST_ABORTS 0#define ABORT_FREQUENCY 2000#define ABORT_COUNT 4/* requires TEST_ABORTS 1 */#define TEST_DEVICE_RESETS 0#define DEVICE_RESET_FREQUENCY 1/* note: any count greater than 1 will cause scsi_unjam_host to eventually do a bus reset as well */#define DEVICE_RESET_COUNT 3/* requires TEST_DEVICE_RESETS 1 */#define TEST_BUS_RESETS 0#define BUS_RESET_FREQUENCY 1#define BUS_RESET_COUNT 2/* requires TEST_BUS_RESETS 1 */#define TEST_HOST_RESETS 0/* periodically fake unit attention sense data to test bugs in Linux */#define FAKE_DEFERRED_ERRORS 0#define FAKE_DEFERRED_ERROR_FREQUENCY 100#include "iscsi-common.h"#include "iscsi-protocol.h"#include "iscsi-login.h"#include "iscsi-ioctl.h"#include "iscsi-trace.h"#include "iscsi.h"#include "version.h"/* *  IMPORTANT NOTE: to prevent deadlock, when holding multiple locks, *  the following locking order must be followed at all times: * *  hba_list_lock           - access to collection of HBA instances *  session->task_lock      - access to a session's collections of tasks *  hba->free_task_lock     - for task alloc/free from the HBA's task pool *  io_request_lock         - mid-layer acquires before calling queuecommand, eh_*,  *                                we must acquire before done() callback *  hba->session_lock       - access to an HBA's collection of sessions    *  session->scsi_cmnd_lock - access to a session's list of Scsi_Cmnds *  iscsi_trace_lock        - for the (mostly unmaintained) tracing code * *  *  The locking order is somewhat counter-intuitive.  The queue() *  function may get called by a bottom-half handler for the SCSI *  midlayer, which means it may be called after any interrupt occurs, *  while another kernel thread is suspended due to the interrupt. *  Since this may be one of our threads which is holding a spinlock, *  to prevent deadlocks the spinlocks used by the queue() function must *  be last in the locking order.  Also, the bottom-half handler must somehow  *  be locally disabled when holding any lock that might be used by queue(),  *  to prevent the lock holder being suspended by an interrupt, and then  *  the queue() function called (which would deadlock).  While 2.4 kernels *  have a spin_lock_bh() function, we don't use it, because spin_unlock_bh() *  may immediately run bottom-halves, and the driver sometimes would have *  needed to call spin_unlock_bh() will interrupts were off and the  *  io_request_lock was already held, which could cause deadlocks.  Instead, *  the driver always uses spin_lock_irqsave. * *  Also, since any interrupt may try to acquire the io_request_lock, we  *  want the io_request_lock as late in the lock order as possible, since *  interrupts must be disabled when holding any lock that follows the  *  io_request_lock in the locking order.  The locks needed in queue() *  follow the io_request_lock so that interrupts may call the queue() *  entry point.  The eh_*_handlers all release the io_request_lock, since *  they all may invoke the scheduler, and that can't be done with a spinlock  *  held.  Likewise, since scheduling in an interrupt will panic the kernel, *  all of the eh_*_handlers may fail if called from interrupt context. * *  As of 1-2-2002, various threads may be in the following lock states *  (ignoring the trace_lock, since the tracing code is largely unmaintained): * *  queue: (interrupts off) io_request_lock  *         (interrupts off) io_request_lock, hba->session_lock  *         (interrupts off) io_request_lock, hba->session_lock, session->scsi_cmnd_lock  * *  tx: none *      (an interrupt acquires) io_request_lock  *      hba->free_task_lock, *      hba->free_task_lock, (an interrupt acquires) io_request_lock  *      session->task_lock,  *      session->task_lock, (an interrupt acquires) io_request_lock  *      session->task_lock, (interrupts off) session->scsi_cmnd_lock  *      (interrupts off) session->scsi_cmnd_lock  * *  rx: none *      (an interrupt acquires) io_request_lock  *      session->task_lock *      session->task_lock, (an interrupt acquires) io_request_lock  *      session->task_lock, (interrupts off) io_request_lock  *      hba->free_task_lock *      hba->free_task_lock, (an interrupt acquires) io_request_lock  *      (interrupts off) session->scsi_cmnd_lock  *      session->task_lock, (interrupts off) session->scsi_cmnd_lock  * *  timer: none *         hba_list_lock *         hba_list_lock, (an interrupt acquires) io_request_lock *         hba_list_lock, (interrupts off) hba->session_lock  *         hba_list_lock, (interrupts off) hba->session_lock, io_request_lock * *  ioctl: none *         (an interrupt acquires) io_request_lock  *         hba_list_lock *         hba_list_lock, (an interrupt acquires) io_request_lock *         (interrupts off) hba->session_lock  *         session->task_lock *         session->task_lock, (an interrupt acquires) io_request_lock *         session->task_lock, (interrupts off) session->scsi_cmnd_lock *         session->task_lock, (interrupts off) io_request_lock  *        (interrupts off) session->scsi_cmnd_lock  *          *  eh_*_handler: (interrupts off) io_request_lock *                none *                (an interrupt acquires) io_request_lock *                (interrupts off) session->scsi_cmnd_lock *                session->task_lock *                session->task_lock, (an interrupt acquires) io_request_lock * *  This driver assumes the eh_*_handler functions can safely release *  the io_request_lock and locally enable interrupts, which is true *  on 2.4 kernels, but unclear on 2.2 kernels. * *  The eh_*_handler functions may fail if called from interrupt context, *  since they typically need to block and wait for a response from the *  target, and scheduling in interrupt context would panic the kernel. * *  The driver assumes that calling the following kernel primitives may invoke the *  scheduler and preempt the caller, and thus no spinlocks can be held when they *  are called, nor can interrupts or bottom-half handlers be disabled: * *  sock_sendmsg *  sock_recvmsg *  kmalloc *  schedule_timeout  (duh) *  kernel_thread *  waitpid * *  The following kernel primitives probably don't schedule, but the driver *  could handle it even if they did: * *  signal_pending *  get_ds *  get_fs *  set_fs *  fget *  fput *   *  The driver assumes that calling the following kernel primitives WILL NOT invoke the *  scheduler, and thus cannot cause a preemption.  If this assumption is violated, *  the driver will break badly: * *  wake_up *  kill_proc *  printk *  kfree *  *  The following driver functions may invoke the scheduler, and must not be *  called while holding any spinlock: * *  iscsi_sendmsg *  iscsi_recvmsg *  alloc_task *  cold_target_reset *  warm_target_reset */MODULE_AUTHOR("Cisco Systems, Inc.");MODULE_DESCRIPTION("iSCSI Driver");MODULE_LICENSE("GPL");#ifndef UINT32_MAX# define UINT32_MAX 0xFFFFFFFFU#endif/* useful 2.4-ism */#ifndef set_current_state# define set_current_state(state_value) do { current->state = state_value; mb(); } while(0)#endif/* determine if a particular signal is pending or not */# if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) )#  define SIGNAL_IS_PENDING(SIG) sigismember(&current->pending.signal, (SIG))# else#  define SIGNAL_IS_PENDING(SIG) sigismember(&current->signal, (SIG))# endif#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) )typedef unsigned long cpu_flags_t;#elsetypedef unsigned int cpu_flags_t;#endif/* we'd prefer to do all the locking ourselves, but the SCSI mid-layer  * tends to call us with the io_request_lock held, and requires that we * get the lock before calling a SCSI command's done() callback. * This is supposed to be removed in lk 2.5, so make it conditional at compile-time. */#define MIDLAYER_USES_IO_REQUEST_LOCK#ifdef MIDLAYER_USES_IO_REQUEST_LOCK/* for releasing the lock when we don't want it, but have it */# define RELEASE_IO_REQUEST_LOCK  spin_unlock_irq(&io_request_lock)# define REACQUIRE_IO_REQUEST_LOCK spin_lock_irq(&io_request_lock)/* for getting the lock when we need it to call done(), but don't have it */# define DECLARE_IO_REQUEST_FLAGS cpu_flags_t io_request_flags_# define LOCK_IO_REQUEST_LOCK spin_lock_irqsave(&io_request_lock, io_request_flags_);# define UNLOCK_IO_REQUEST_LOCK spin_unlock_irqrestore(&io_request_lock, io_request_flags_);#else# define RELEASE_IO_REQUEST_LOCK# define REACQUIRE_IO_REQUEST_LOCK# define DECLARE_IO_REQUEST_FLAGS # define LOCK_IO_REQUEST_LOCK# define UNLOCK_IO_REQUEST_LOCK#endif/* we need to ensure the SCSI midlayer won't call the queuecommand() * entry point from a bottom-half handler while a thread holding locks * that queuecommand() will need to acquire is suspended by an interrupt. * we don't use spin_lock_bh() on 2.4 kernels, because spin_unlock_bh() * will run bottom-half handlers, which is bad if interrupts are turned off * and the io_request_lock is held, since the SCSI bottom-half handler will * try to acquire the io_request_lock again and deadlock. */#define DECLARE_NOQUEUE_FLAGS cpu_flags_t noqueue_flags_#define SPIN_LOCK_NOQUEUE(lock) spin_lock_irqsave((lock), noqueue_flags_)#define SPIN_UNLOCK_NOQUEUE(lock) spin_unlock_irqrestore((lock), noqueue_flags_)/* Scsi_cmnd->result */#define DRIVER_BYTE(byte)   ((byte) << 24)#define HOST_BYTE(byte)     ((byte) << 16) /* HBA codes */#define MSG_BYTE(byte)      ((byte) << 8)#define STATUS_BYTE(byte)   ((byte))  /* SCSI status *//* extract parts of the sense data from an (unsigned char *) to the beginning of sense data */#define SENSE_KEY(sensebuf) ((sensebuf)[2] & 0x0F)#define ASC(sensebuf)       ((sensebuf)[12])#define ASCQ(sensebuf)       ((sensebuf)[13])static int ctl_open(struct inode *inode, struct file *file);static int ctl_close(struct inode *inode, struct file *file);static int ctl_ioctl(struct inode *inode,                      struct file *file,                      unsigned int cmd,                      unsigned long arg );static int control_major;static const char *control_name = "iscsictl";#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) )static struct file_operations control_fops = {    owner: THIS_MODULE,    ioctl: ctl_ioctl,    /* ioctl */    open: ctl_open,      /* open */    release: ctl_close,  /* release */};#elsestatic struct file_operations control_fops = {    NULL,                   /* lseek */    NULL,                   /* read */    NULL,                   /* write */    NULL,                   /* readdir */    NULL,                   /* poll */    ctl_ioctl,              /* ioctl */    NULL,                   /* mmap */    ctl_open,               /* open */    NULL,                   /* flush */    ctl_close,              /* release */};#endifspinlock_t iscsi_hba_list_lock = SPIN_LOCK_UNLOCKED;static iscsi_hba_t *iscsi_hba_list = NULL;static unsigned int init_module_complete = 0;static volatile int iscsi_timer_running = 0;static volatile pid_t iscsi_timer_pid = 0;volatile unsigned int iscsi_log_settings = LOG_SET(ISCSI_LOG_ERR);#if DEBUG_TRACEspinlock_t iscsi_trace_lock = SPIN_LOCK_UNLOCKED;static iscsi_trace_entry_t trace_table[ISCSI_TRACE_COUNT];static int trace_index=0;# define ISCSI_TRACE(P_TYPE, P_CMND, P_TASK, P_DATA1, P_DATA2) \           iscsi_fill_trace((P_TYPE), (P_CMND), (P_TASK), (P_DATA1), (P_DATA2))#else# define ISCSI_TRACE(P_TYPE, P_CMND, P_TASK, P_DATA1, P_DATA2) #endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,3,27) )/* note change modeled per linux2.4 drivers/scsi/ips.c */struct proc_dir_entry proc_dir_iscsi = {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -