📄 ioc4.c
字号:
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 Silicon Graphics, Inc. All Rights Reserved. *//* This is the top level IOC4 device driver. It does very little, farming * out actual tasks to the various slave IOC4 drivers (serial, keyboard/mouse, * and real-time interrupt). */#include <linux/config.h>#include <asm/sn/types.h>#include <asm/sn/sgi.h>#include <asm/sn/invent.h>#include <asm/sn/iograph.h>#include <asm/atomic.h>#include <asm/sn/pci/pci_defs.h>#include <asm/sn/pci/pciio.h>#include <linux/pci.h>#include <asm/sn/ioc4.h>#include <asm/sn/pci/pci_bus_cvlink.h>/* #define DEBUG_INTERRUPTS */#define SUPPORT_ATOMICS#ifdef SUPPORT_ATOMICS/* * support routines for local atomic operations. */static spinlock_t local_lock;static inline unsigned intatomicSetInt(atomic_t *a, unsigned int b){ unsigned long s; unsigned int ret, new; spin_lock_irqsave(&local_lock, s); new = ret = atomic_read(a); new |= b; atomic_set(a, new); spin_unlock_irqrestore(&local_lock, s); return ret;}static unsigned intatomicClearInt(atomic_t *a, unsigned int b){ unsigned long s; unsigned int ret, new; spin_lock_irqsave(&local_lock, s); new = ret = atomic_read(a); new &= ~b; atomic_set(a, new); spin_unlock_irqrestore(&local_lock, s); return ret;}#else#define atomicAddInt(a,b) *(a) += ((unsigned int)(b))static inline unsigned intatomicSetInt(unsigned int *a, unsigned int b){ unsigned int ret = *a; *a |= b; return ret;}#define atomicSetUint64(a,b) *(a) |= ((unsigned long long )(b))static inline unsigned intatomicClearInt(unsigned int *a, unsigned int b){ unsigned int ret = *a; *a &= ~b; return ret;}#define atomicClearUint64(a,b) *(a) &= ~((unsigned long long)(b))#endif /* SUPPORT_ATOMICS *//* pci device struct */static const struct pci_device_id __devinitdata ioc4_s_id_table[] ={ { IOC4_VENDOR_ID_NUM, IOC4_DEVICE_ID_NUM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0 }};int __devinit ioc4_attach(struct pci_dev *, const struct pci_device_id *);struct pci_driver ioc4_s_driver ={ name : "IOC4 Serial", id_table: ioc4_s_id_table, probe: ioc4_attach,};int __initioc4_serial_detect(void){ int rc; rc = pci_register_driver(&ioc4_s_driver); return 0;}module_init(ioc4_serial_detect);/* * Some external functions we still need. */extern int ioc4_serial_attach(vertex_hdl_t conn, void *mem);extern cpuid_t cpuvertex_to_cpuid(vertex_hdl_t vhdl);/* * per-IOC4 data structure */typedef struct ioc4_soft_s { vertex_hdl_t is_ioc4_vhdl; vertex_hdl_t is_conn_vhdl; struct pci_dev *is_pci_dev; ioc4_mem_t *is_ioc4_mem; /* Each interrupt type has an entry in the array */ struct ioc4_intr_type { /* * Each in-use entry in this array contains at least * one nonzero bit in sd_bits; no two entries in this * array have overlapping sd_bits values. */#define MAX_IOC4_INTR_ENTS (8 * sizeof(ioc4reg_t)) struct ioc4_intr_info { ioc4reg_t sd_bits; ioc4_intr_func_f *sd_intr; intr_arg_t sd_info; vertex_hdl_t sd_vhdl; struct ioc4_soft_s *sd_soft; } is_intr_info[MAX_IOC4_INTR_ENTS]; /* Number of entries active in the above array */ atomic_t is_num_intrs; atomic_t is_intr_bits_busy; /* Bits assigned */ atomic_t is_intr_ents_free; /* Free active entries mask*/ } is_intr_type[ioc4_num_intr_types]; /* is_ir_lock must be held while * modifying sio_ie values, so * we can be sure that sio_ie is * not changing when we read it * along with sio_ir. */ spinlock_t is_ir_lock; /* SIO_IE[SC] mod lock */} ioc4_soft_t;#define ioc4_soft_set(v,i) hwgraph_fastinfo_set((v), (arbitrary_info_t)(i))#define ioc4_soft_get(v) ((ioc4_soft_t *)hwgraph_fastinfo_get(v))/* ===================================================================== * Function Table of Contents *//* The IOC4 hardware provides no atomic way to determine if interrupts * are pending since two reads are required to do so. The handler must * read the SIO_IR and the SIO_IES, and take the logical and of the * two. When this value is zero, all interrupts have been serviced and * the handler may return. * * This has the unfortunate "hole" that, if some other CPU or * some other thread or some higher level interrupt manages to * modify SIO_IE between our reads of SIO_IR and SIO_IE, we may * think we have observed SIO_IR&SIO_IE==0 when in fact this * condition never really occurred. * * To solve this, we use a simple spinlock that must be held * whenever modifying SIO_IE; holding this lock while observing * both SIO_IR and SIO_IE guarantees that we do not falsely * conclude that no enabled interrupts are pending. */voidioc4_write_ireg(void *ioc4_soft, ioc4reg_t val, int which, ioc4_intr_type_t type){ ioc4_mem_t *mem = ((ioc4_soft_t *) ioc4_soft)->is_ioc4_mem; spinlock_t *lp = &((ioc4_soft_t *) ioc4_soft)->is_ir_lock; unsigned long s; spin_lock_irqsave(lp, s); switch (type) { case ioc4_sio_intr_type: switch (which) { case IOC4_W_IES: mem->sio_ies_ro = val; break; case IOC4_W_IEC: mem->sio_iec_ro = val; break; } break; case ioc4_other_intr_type: switch (which) { case IOC4_W_IES: mem->other_ies_ro = val; break; case IOC4_W_IEC: mem->other_iec_ro = val; break; } break; case ioc4_num_intr_types: break; } spin_unlock_irqrestore(lp, s);}static inline ioc4reg_tioc4_pending_intrs(ioc4_soft_t * ioc4_soft, ioc4_intr_type_t type){ ioc4_mem_t *mem = ioc4_soft->is_ioc4_mem; spinlock_t *lp = &ioc4_soft->is_ir_lock; unsigned long s; ioc4reg_t intrs = (ioc4reg_t)0; ASSERT((type == ioc4_sio_intr_type) || (type == ioc4_other_intr_type)); spin_lock_irqsave(lp, s); switch (type) { case ioc4_sio_intr_type: intrs = mem->sio_ir & mem->sio_ies_ro; break; case ioc4_other_intr_type: intrs = mem->other_ir & mem->other_ies_ro; /* Don't process any ATA interrupte, leave them for the ATA driver */ intrs &= ~(IOC4_OTHER_IR_ATA_INT | IOC4_OTHER_IR_ATA_MEMERR); break; case ioc4_num_intr_types: break; } spin_unlock_irqrestore(lp, s); return intrs;}int __devinitioc4_attach(struct pci_dev *pci_handle, const struct pci_device_id *pci_id){ ioc4_mem_t *mem;/*REFERENCED*/ graph_error_t rc; vertex_hdl_t ioc4_vhdl; ioc4_soft_t *soft; vertex_hdl_t conn_vhdl = PCIDEV_VERTEX(pci_handle); int tmp; extern void ioc4_ss_connect_interrupt(int, void *, void *); extern void ioc4_intr(int, void *, struct pt_regs *); if ( pci_enable_device(pci_handle) ) { printk("ioc4_attach: Failed to enable device with pci_dev 0x%p... returning\n", (void *)pci_handle); return(-1); } pci_set_master(pci_handle); snia_pciio_endian_set(pci_handle, PCIDMA_ENDIAN_LITTLE, PCIDMA_ENDIAN_BIG); /* * Get PIO mappings through our "primary" * connection point to the IOC4's CFG and * MEM spaces. */ /* * Map in the ioc4 memory - we'll do config accesses thru the pci_????() interfaces. */ mem = (ioc4_mem_t *)pci_resource_start(pci_handle, 0); if ( !mem ) { printk(KERN_ALERT "%p/" EDGE_LBL_IOC4 ": unable to get PIO mapping for my MEM space\n", (void *)pci_handle); return -1; } if ( !request_region((unsigned long)mem, sizeof(*mem), "sioc4_mem")) { printk(KERN_ALERT "%p/" EDGE_LBL_IOC4 ": unable to get request region for my MEM space\n", (void *)pci_handle); return -1; } /* * Create the "ioc4" vertex which hangs off of
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -