📄 iucv.c
字号:
/* * IUCV base infrastructure. * * Copyright 2001, 2006 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): * Original source: * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 * Xenia Tkatschow (xenia@us.ibm.com) * 2Gb awareness and general cleanup: * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) * Rewritten for af_iucv: * Martin Schwidefsky <schwidefsky@de.ibm.com> * * Documentation used: * The original source * CP Programming Service, IBM document # SC24-5760 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */#include <linux/module.h>#include <linux/moduleparam.h>#include <linux/spinlock.h>#include <linux/kernel.h>#include <linux/slab.h>#include <linux/init.h>#include <linux/interrupt.h>#include <linux/list.h>#include <linux/errno.h>#include <linux/err.h>#include <linux/device.h>#include <linux/cpu.h>#include <net/iucv/iucv.h>#include <asm/atomic.h>#include <asm/ebcdic.h>#include <asm/io.h>#include <asm/s390_ext.h>#include <asm/s390_rdev.h>#include <asm/smp.h>/* * FLAGS: * All flags are defined in the field IPFLAGS1 of each function * and can be found in CP Programming Services. * IPSRCCLS - Indicates you have specified a source class. * IPTRGCLS - Indicates you have specified a target class. * IPFGPID - Indicates you have specified a pathid. * IPFGMID - Indicates you have specified a message ID. * IPNORPY - Indicates a one-way message. No reply expected. * IPALL - Indicates that all paths are affected. */#define IUCV_IPSRCCLS 0x01#define IUCV_IPTRGCLS 0x01#define IUCV_IPFGPID 0x02#define IUCV_IPFGMID 0x04#define IUCV_IPNORPY 0x10#define IUCV_IPALL 0x80static int iucv_bus_match(struct device *dev, struct device_driver *drv){ return 0;}struct bus_type iucv_bus = { .name = "iucv", .match = iucv_bus_match,};EXPORT_SYMBOL(iucv_bus);struct device *iucv_root;EXPORT_SYMBOL(iucv_root);static int iucv_available;/* General IUCV interrupt structure */struct iucv_irq_data { u16 ippathid; u8 ipflags1; u8 iptype; u32 res2[8];};struct iucv_irq_list { struct list_head list; struct iucv_irq_data data;};static struct iucv_irq_data *iucv_irq_data[NR_CPUS];static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;/* * Queue of interrupt buffers lock for delivery via the tasklet * (fast but can't call smp_call_function). */static LIST_HEAD(iucv_task_queue);/* * The tasklet for fast delivery of iucv interrupts. */static void iucv_tasklet_fn(unsigned long);static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0);/* * Queue of interrupt buffers for delivery via a work queue * (slower but can call smp_call_function). */static LIST_HEAD(iucv_work_queue);/* * The work element to deliver path pending interrupts. */static void iucv_work_fn(struct work_struct *work);static DECLARE_WORK(iucv_work, iucv_work_fn);/* * Spinlock protecting task and work queue. */static DEFINE_SPINLOCK(iucv_queue_lock);enum iucv_command_codes { IUCV_QUERY = 0, IUCV_RETRIEVE_BUFFER = 2, IUCV_SEND = 4, IUCV_RECEIVE = 5, IUCV_REPLY = 6, IUCV_REJECT = 8, IUCV_PURGE = 9, IUCV_ACCEPT = 10, IUCV_CONNECT = 11, IUCV_DECLARE_BUFFER = 12, IUCV_QUIESCE = 13, IUCV_RESUME = 14, IUCV_SEVER = 15, IUCV_SETMASK = 16,};/* * Error messages that are used with the iucv_sever function. They get * converted to EBCDIC. */static char iucv_error_no_listener[16] = "NO LISTENER";static char iucv_error_no_memory[16] = "NO MEMORY";static char iucv_error_pathid[16] = "INVALID PATHID";/* * iucv_handler_list: List of registered handlers. */static LIST_HEAD(iucv_handler_list);/* * iucv_path_table: an array of iucv_path structures. */static struct iucv_path **iucv_path_table;static unsigned long iucv_max_pathid;/* * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table */static DEFINE_SPINLOCK(iucv_table_lock);/* * iucv_active_cpu: contains the number of the cpu executing the tasklet * or the work handler. Needed for iucv_path_sever called from tasklet. */static int iucv_active_cpu = -1;/* * Mutex and wait queue for iucv_register/iucv_unregister. */static DEFINE_MUTEX(iucv_register_mutex);/* * Counter for number of non-smp capable handlers. */static int iucv_nonsmp_handler;/* * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, * iucv_path_quiesce and iucv_path_sever. */struct iucv_cmd_control { u16 ippathid; u8 ipflags1; u8 iprcode; u16 ipmsglim; u16 res1; u8 ipvmid[8]; u8 ipuser[16]; u8 iptarget[8];} __attribute__ ((packed,aligned(8)));/* * Data in parameter list iucv structure. Used by iucv_message_send, * iucv_message_send2way and iucv_message_reply. */struct iucv_cmd_dpl { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u32 iptrgcls; u8 iprmmsg[8]; u32 ipsrccls; u32 ipmsgtag; u32 ipbfadr2; u32 ipbfln2f; u32 res;} __attribute__ ((packed,aligned(8)));/* * Data in buffer iucv structure. Used by iucv_message_receive, * iucv_message_reject, iucv_message_send, iucv_message_send2way * and iucv_declare_cpu. */struct iucv_cmd_db { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u32 iptrgcls; u32 ipbfadr1; u32 ipbfln1f; u32 ipsrccls; u32 ipmsgtag; u32 ipbfadr2; u32 ipbfln2f; u32 res;} __attribute__ ((packed,aligned(8)));/* * Purge message iucv structure. Used by iucv_message_purge. */struct iucv_cmd_purge { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u8 ipaudit[3]; u8 res1[5]; u32 res2; u32 ipsrccls; u32 ipmsgtag; u32 res3[3];} __attribute__ ((packed,aligned(8)));/* * Set mask iucv structure. Used by iucv_enable_cpu. */struct iucv_cmd_set_mask { u8 ipmask; u8 res1[2]; u8 iprcode; u32 res2[9];} __attribute__ ((packed,aligned(8)));union iucv_param { struct iucv_cmd_control ctrl; struct iucv_cmd_dpl dpl; struct iucv_cmd_db db; struct iucv_cmd_purge purge; struct iucv_cmd_set_mask set_mask;};/* * Anchor for per-cpu IUCV command parameter block. */static union iucv_param *iucv_param[NR_CPUS];/** * iucv_call_b2f0 * @code: identifier of IUCV call to CP. * @parm: pointer to a struct iucv_parm block * * Calls CP to execute IUCV commands. * * Returns the result of the CP IUCV call. */static inline int iucv_call_b2f0(int command, union iucv_param *parm){ register unsigned long reg0 asm ("0"); register unsigned long reg1 asm ("1"); int ccode; reg0 = command; reg1 = virt_to_phys(parm); asm volatile( " .long 0xb2f01000\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) : "m" (*parm) : "cc"); return (ccode == 1) ? parm->ctrl.iprcode : ccode;}/** * iucv_query_maxconn * * Determines the maximum number of connections that may be established. * * Returns the maximum number of connections or -EPERM is IUCV is not * available. */static int iucv_query_maxconn(void){ register unsigned long reg0 asm ("0"); register unsigned long reg1 asm ("1"); void *param; int ccode; param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); if (!param) return -ENOMEM; reg0 = IUCV_QUERY; reg1 = (unsigned long) param; asm volatile ( " .long 0xb2f01000\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); if (ccode == 0) iucv_max_pathid = reg0; kfree(param); return ccode ? -EPERM : 0;}/** * iucv_allow_cpu * @data: unused * * Allow iucv interrupts on this cpu. */static void iucv_allow_cpu(void *data){ int cpu = smp_processor_id(); union iucv_param *parm; /* * Enable all iucv interrupts. * ipmask contains bits for the different interrupts * 0x80 - Flag to allow nonpriority message pending interrupts * 0x40 - Flag to allow priority message pending interrupts * 0x20 - Flag to allow nonpriority message completion interrupts * 0x10 - Flag to allow priority message completion interrupts * 0x08 - Flag to allow IUCV control interrupts */ parm = iucv_param[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0xf8; iucv_call_b2f0(IUCV_SETMASK, parm); /* Set indication that iucv interrupts are allowed for this cpu. */ cpu_set(cpu, iucv_irq_cpumask);}/** * iucv_block_cpu * @data: unused * * Block iucv interrupts on this cpu. */static void iucv_block_cpu(void *data){ int cpu = smp_processor_id(); union iucv_param *parm; /* Disable all iucv interrupts. */ parm = iucv_param[cpu]; memset(parm, 0, sizeof(union iucv_param)); iucv_call_b2f0(IUCV_SETMASK, parm); /* Clear indication that iucv interrupts are allowed for this cpu. */ cpu_clear(cpu, iucv_irq_cpumask);}/** * iucv_declare_cpu * @data: unused * * Declare a interrupt buffer on this cpu. */static void iucv_declare_cpu(void *data){ int cpu = smp_processor_id(); union iucv_param *parm; int rc; if (cpu_isset(cpu, iucv_buffer_cpumask)) return; /* Declare interrupt buffer. */ parm = iucv_param[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); if (rc) { char *err = "Unknown"; switch (rc) { case 0x03: err = "Directory error"; break; case 0x0a: err = "Invalid length"; break; case 0x13: err = "Buffer already exists"; break; case 0x3e: err = "Buffer overlap"; break; case 0x5c: err = "Paging or storage error"; break; } printk(KERN_WARNING "iucv_register: iucv_declare_buffer " "on cpu %i returned error 0x%02x (%s)\n", cpu, rc, err); return; } /* Set indication that an iucv buffer exists for this cpu. */ cpu_set(cpu, iucv_buffer_cpumask); if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask)) /* Enable iucv interrupts on this cpu. */ iucv_allow_cpu(NULL); else /* Disable iucv interrupts on this cpu. */ iucv_block_cpu(NULL);}/** * iucv_retrieve_cpu * @data: unused * * Retrieve interrupt buffer on this cpu. */static void iucv_retrieve_cpu(void *data){ int cpu = smp_processor_id(); union iucv_param *parm; if (!cpu_isset(cpu, iucv_buffer_cpumask)) return; /* Block iucv interrupts. */ iucv_block_cpu(NULL); /* Retrieve interrupt buffer. */ parm = iucv_param[cpu]; iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); /* Clear indication that an iucv buffer exists for this cpu. */ cpu_clear(cpu, iucv_buffer_cpumask);}/** * iucv_setmask_smp * * Allow iucv interrupts on all cpus. */static void iucv_setmask_mp(void){ int cpu; preempt_disable(); for_each_online_cpu(cpu) /* Enable all cpus with a declared buffer. */ if (cpu_isset(cpu, iucv_buffer_cpumask) && !cpu_isset(cpu, iucv_irq_cpumask)) smp_call_function_single(cpu, iucv_allow_cpu, NULL, 0, 1); preempt_enable();}/** * iucv_setmask_up * * Allow iucv interrupts on a single cpu. */static void iucv_setmask_up(void){ cpumask_t cpumask; int cpu; /* Disable all cpu but the first in cpu_irq_cpumask. */ cpumask = iucv_irq_cpumask; cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); for_each_cpu_mask(cpu, cpumask) smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1);}/** * iucv_enable * * This function makes iucv ready for use. It allocates the pathid * table, declares an iucv interrupt buffer and enables the iucv * interrupts. Called when the first user has registered an iucv * handler. */static int iucv_enable(void){ size_t alloc_size; int cpu, rc; rc = -ENOMEM; alloc_size = iucv_max_pathid * sizeof(struct iucv_path); iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); if (!iucv_path_table) goto out; /* Declare per cpu buffers. */ rc = -EIO; preempt_disable(); for_each_online_cpu(cpu) smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); preempt_enable(); if (cpus_empty(iucv_buffer_cpumask)) /* No cpu could declare an iucv buffer. */ goto out_path; return 0;out_path: kfree(iucv_path_table);out: return rc;}/** * iucv_disable * * This function shuts down iucv. It disables iucv interrupts, retrieves * the iucv interrupt buffer and frees the pathid table. Called after the * last user unregister its iucv handler. */static void iucv_disable(void){ on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1); kfree(iucv_path_table);}static int __cpuinit iucv_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu){ cpumask_t cpumask; long cpu = (long) hcpu; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_irq_data[cpu]) return NOTIFY_BAD; iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -