📄 iscsi.c
字号:
/* * iSCSI driver for Linux * Copyright (C) 2001 Cisco Systems, Inc. * maintained by linux-iscsi@cisco.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * See the file COPYING included with this distribution for more details. * * $Id: iscsi.c,v 1.97 2002/10/16 20:53:39 smferris Exp $ * */#include <linux/config.h>#include <linux/version.h>#include <linux/module.h>#include <linux/sched.h>#include <asm/io.h>#include <asm/byteorder.h>#include <linux/stddef.h>#include <linux/string.h>#include <linux/errno.h>#include <linux/file.h>#include <linux/kernel.h>#include <linux/ioport.h>#include <linux/slab.h>#include <linux/delay.h>#include <linux/proc_fs.h>#include <linux/blk.h>#include <linux/types.h>#include <linux/stat.h>#include <linux/config.h>#include <linux/poll.h>#include <linux/smp_lock.h>#include <linux/kernel.h>#include <linux/wait.h>#include <linux/net.h>#include <linux/in.h>#include <linux/tcp.h>#include <net/sock.h>#include <linux/socket.h>#include <linux/errno.h>#include <linux/unistd.h>#include <linux/timer.h>#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) )# include <linux/init.h># define INIT_MODIFIER __init# define EXIT_MODIFIER __exit#else# define INIT_MODIFIER # define EXIT_MODIFIER #endif#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) )# include <asm/semaphore.h>#else# include <asm/spinlock.h>#endif#include <asm/uaccess.h>#include <scsi/sg.h>#include <sd.h>#include <scsi.h>#include <hosts.h>/* if set, do a TCP Abort when a session drops, instead of (attempting) a graceful TCP Close */#define TCP_ABORT_ON_DROP 0#define TEST_ABORTS 0#define ABORT_FREQUENCY 5000#define ABORT_COUNT 6/* requires TEST_ABORTS 1 */#define TEST_LUN_RESETS 0#define LUN_RESET_FREQUENCY 2#define LUN_RESET_COUNT 4/* requires TEST_LUN_RESETS 1 */#define TEST_WARM_RESETS 0#define WARM_RESET_FREQUENCY 2#define WARM_RESET_COUNT 2/* requires TEST_WARM_RESETS 1 */#define TEST_COLD_RESETS 0/* periodically stall reading data to test data arriving after aborts have started */#define TEST_DELAYED_DATA 0/* drop sessions during error recovery */#define TEST_ERROR_RECOVERY_SESSION_DROP 0/* periodically fake unit attention sense data to test bugs in Linux */#define FAKE_DEFERRED_ERRORS 0#define FAKE_DEFERRED_ERROR_FREQUENCY 100/* fake sense indicating ILLEGAL_REQUEST for all REPORT_LUNS commands */#define FAKE_NO_REPORT_LUNS 0/* fake check conditions on the first 2 attempts for each probe command */#define FAKE_PROBE_CHECK_CONDITIONS 0/* fake underflows on the first 4 attempts for each probe command */#define FAKE_PROBE_UNDERFLOW 0#define FAKE_PDU_REJECTS 0/* always fail error recovery in eh_strategy, to test devices going offline */#define FAIL_ERROR_RECOVERY 0#define MULTIPATH_SUPPORT 0#include "iscsi-common.h"#include "iscsi-protocol.h"#include "iscsi-ioctl.h"#include "iscsi-io.h"#include "iscsi-login.h"#include "iscsi-trace.h"#include "iscsi.h"#include "iscsi-session.h"#include "iscsi-version.h"#include "iscsi-probe.h"/* * IMPORTANT NOTE: to prevent deadlock, when holding multiple locks, * the following locking order must be followed at all times: * * hba_list_lock - access to collection of HBA instances * session->task_lock - access to a session's collections of tasks * hba->free_task_lock - for task alloc/free from the HBA's task pool * io_request_lock/host_lock - mid-layer acquires before calling queuecommand, eh_*, * we must acquire before done() callback * hba->session_lock - access to an HBA's collection of sessions * session->scsi_cmnd_lock - access to a session's list of Scsi_Cmnds * iscsi_trace_lock - for the tracing code * * * The locking order is somewhat counter-intuitive. The queue() * function may get called by a bottom-half handler for the SCSI * midlayer, which means it may be called after any interrupt occurs, * while another kernel thread is suspended due to the interrupt. * Since this may be one of our threads which is holding a spinlock, * to prevent deadlocks the spinlocks used by the queue() function must * be last in the locking order. Also, the bottom-half handler must somehow * be locally disabled when holding any lock that might be used by queue(), * to prevent the lock holder being suspended by an interrupt, and then * the queue() function called (which would deadlock). While 2.4 kernels * have a spin_lock_bh() function, we don't use it, because spin_unlock_bh() * may immediately run bottom-halves, and the driver sometimes would have * needed to call spin_unlock_bh() while interrupts were off and the * io_request_lock was already held, which could cause deadlocks. Instead, * the driver always uses spin_lock_irqsave. * * Also, since any interrupt may try to acquire the io_request_lock, we * want the io_request_lock as late in the lock order as possible, since * interrupts must be disabled when holding any lock that follows the * io_request_lock in the locking order. The locks needed in queue() * follow the io_request_lock so that interrupts may call the queue() * entry point. The eh_*_handlers all release the io_request_lock, since * they all may invoke the scheduler, and that can't be done with a spinlock * held. Likewise, since scheduling in an interrupt will panic the kernel, * all of the eh_*_handlers may fail if called from interrupt context. * * As of 1-2-2002, various threads may be in the following lock states * (ignoring the trace_lock, since the tracing code is largely unmaintained): * * queue: (interrupts off) io_request_lock * (interrupts off) io_request_lock, hba->session_lock * (interrupts off) io_request_lock, hba->session_lock, session->scsi_cmnd_lock * * tx: none * (an interrupt acquires) io_request_lock * hba->free_task_lock, * hba->free_task_lock, (an interrupt acquires) io_request_lock * session->task_lock, * session->task_lock, (an interrupt acquires) io_request_lock * session->task_lock, (interrupts off) session->scsi_cmnd_lock * (interrupts off) session->scsi_cmnd_lock * * rx: none * (an interrupt acquires) io_request_lock * session->task_lock * session->task_lock, (an interrupt acquires) io_request_lock * session->task_lock, (interrupts off) io_request_lock * hba->free_task_lock * hba->free_task_lock, (an interrupt acquires) io_request_lock * (interrupts off) session->scsi_cmnd_lock * session->task_lock, (interrupts off) session->scsi_cmnd_lock * * timer: none * hba_list_lock * hba_list_lock, (an interrupt acquires) io_request_lock * hba_list_lock, (interrupts off) hba->session_lock * * ioctl: none * (an interrupt acquires) io_request_lock * hba_list_lock * hba_list_lock, (an interrupt acquires) io_request_lock * (interrupts off) hba->session_lock * session->task_lock * session->task_lock, (an interrupt acquires) io_request_lock * session->task_lock, (interrupts off) session->scsi_cmnd_lock * session->task_lock, (interrupts off) io_request_lock * (interrupts off) session->scsi_cmnd_lock * * eh_*_handler: (interrupts off) io_request_lock * none * (an interrupt acquires) io_request_lock * (interrupts off) session->scsi_cmnd_lock * session->task_lock * session->task_lock, (an interrupt acquires) io_request_lock * * This driver assumes the eh_*_handler functions can safely release * the io_request_lock and locally enable interrupts, which is true * on 2.4 kernels, but unclear on 2.2 kernels. * * The eh_*_handler functions may fail if called from interrupt context, * since they typically need to block and wait for a response from the * target, and scheduling in interrupt context would panic the kernel. * * The driver assumes that calling the following kernel primitives may invoke the * scheduler and preempt the caller, and thus no spinlocks can be held when they * are called, nor can interrupts or bottom-half handlers be disabled: * * sock_sendmsg * sock_recvmsg * kmalloc * schedule_timeout (duh) * kernel_thread * waitpid * * The following kernel primitives probably don't schedule, but the driver * could handle it even if they did: * * signal_pending * get_ds * get_fs * set_fs * fget * fput * * The driver assumes that calling the following kernel primitives WILL NOT invoke the * scheduler, and thus cannot cause a preemption. If this assumption is violated, * the driver will break badly: * * wake_up * kill_proc * printk * kfree * * The following driver functions may invoke the scheduler, and must not be * called while holding any spinlock: * * iscsi_sendmsg * iscsi_recvmsg * alloc_task * cold_reset_target * warm_reset_target */MODULE_AUTHOR("Cisco Systems, Inc.");MODULE_DESCRIPTION("iSCSI driver");MODULE_LICENSE("GPL");/* Force tagged command queueing for all devices, regardless of whether they say they support it */static int force_tcq = 0;MODULE_PARM(force_tcq, "i");MODULE_PARM_DESC(force_tcq, "when non-zero, force tagged command queueing for all devices");/* Queue depth for devices that don't support tagged command queueing. * The driver used to use ISCSI_CMDS_PER_LUN, which was probably a bug. * Default to 1 now, but let people who want to the old behavior set it higher. */static int untagged_queue_depth = 1;MODULE_PARM(untagged_queue_depth, "i");MODULE_PARM_DESC(untagged_queue_depth, "queue depth to use for devices that don't support tagged command queueing");static int translate_deferred_sense = 1;MODULE_PARM(translate_deferred_sense, "i");MODULE_PARM_DESC(translate_deferred_sense, "translate deferred sense data to current sense data in command responses");#ifndef UINT32_MAX# define UINT32_MAX 0xFFFFFFFFU#endifstatic int ctl_open(struct inode *inode, struct file *file);static int ctl_close(struct inode *inode, struct file *file);static int ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg );static int control_major;static const char *control_name = "iscsictl";static struct file_operations control_fops = { owner: THIS_MODULE, ioctl: ctl_ioctl, /* ioctl */ open: ctl_open, /* open */ release: ctl_close, /* release */};spinlock_t iscsi_hba_list_lock = SPIN_LOCK_UNLOCKED;static iscsi_hba_t *iscsi_hba_list = NULL;static volatile unsigned long init_module_complete = 0;static volatile unsigned long iscsi_timer_running = 0;static volatile pid_t iscsi_timer_pid = 0;volatile unsigned int iscsi_log_settings = LOG_SET(ISCSI_LOG_ERR);/* use the otherwise unused Scsi_Pointer Status to record * the stage of error recovery each command is in. */# define CMND_ERROR_STAGE(c) (c)->SCp.Status# define ISCSI_ERROR_STAGE_UNKNOWN 0# define ISCSI_ERROR_STAGE_ABORT 1# define ISCSI_ERROR_STAGE_LUN_RESET 2# define ISCSI_ERROR_STAGE_WARM_RESET 3# define ISCSI_ERROR_STAGE_COLD_RESET 4# define ISCSI_ERROR_STAGE_OFFLINE 99# define ISCSI_ERROR_STAGE_RECOVERED 100#define is_digit(c) (((c) >= '0') && ((c) <= '9'))#define is_hex_lower(c) (((c) >= 'a') && ((c) <= 'f'))#define is_hex_upper(c) (((c) >= 'A') && ((c) <= 'F'))#define is_space(c) ((c) == ' ' || (c) == '\t' || (c) == '\n' || (c) == '\0')#if DEBUG_TRACEspinlock_t iscsi_trace_lock = SPIN_LOCK_UNLOCKED;static iscsi_trace_entry_t trace_table[ISCSI_TRACE_COUNT];static int trace_index=0;# define ISCSI_TRACE(P_TYPE, P_CMND, P_TASK, P_DATA1, P_DATA2) \ iscsi_fill_trace((P_TYPE), (P_CMND), (P_TASK), (P_DATA1), (P_DATA2))#else# define ISCSI_TRACE(P_TYPE, P_CMND, P_TASK, P_DATA1, P_DATA2) #endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,3,27) )/* note change modeled per linux2.4 drivers/scsi/ips.c */struct proc_dir_entry proc_dir_iscsi = {# ifdef PROC_SCSI_ISCSI PROC_SCSI_ISCSI,# else PROC_SCSI_NOT_PRESENT,# endif 5, "iscsi", S_IFDIR|S_IRUGO|S_IXUGO, 2};#endif/* become a daemon kernel thread. Some kernels provide this functionality * already, and some even do it correctly */void iscsi_daemonize(void){ struct task_struct *this_task = current; # if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,10) ) /* use the kernel's daemonize */ daemonize(); /* Reparent to init */ reparent_to_init(); /* increase priority like the md driver does for it's kernel threads */ this_task->policy = SCHED_NORMAL; set_user_nice(this_task, -20); wmb();# elif ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) ) /* use the kernel's daemonize */ daemonize(); /* We'd like to reparent to init, but don't have a function to do it, and
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -