📄 irq.c
字号:
*(irq_work(cpu, irq)) = 0; bucket = get_ino_in_irqaction(action) + ivector_table; floppy_interrupt(irq, dev_cookie, regs); upa_writel(ICLR_IDLE, bucket->iclr); irq_exit(cpu, irq);}#endif/* The following assumes that the branch lies before the place we * are branching to. This is the case for a trap vector... * You have been warned. */#define SPARC_BRANCH(dest_addr, inst_addr) \ (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))#define SPARC_NOP (0x01000000)static void install_fast_irq(unsigned int cpu_irq, void (*handler)(int, void *, struct pt_regs *)){ extern unsigned long sparc64_ttable_tl0; unsigned long ttent = (unsigned long) &sparc64_ttable_tl0; unsigned int *insns; ttent += 0x820; ttent += (cpu_irq - 1) << 5; insns = (unsigned int *) ttent; insns[0] = SPARC_BRANCH(((unsigned long) handler), ((unsigned long)&insns[0])); insns[1] = SPARC_NOP; __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));}int request_fast_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *name, void *dev_id){ struct irqaction *action; struct ino_bucket *bucket = __bucket(irq); unsigned long flags; /* No pil0 dummy buckets allowed here. */ if (bucket < &ivector_table[0] || bucket >= &ivector_table[NUM_IVECS]) { unsigned int *caller; __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt " "from %p, irq %08x.\n", caller, irq); return -EINVAL; } if(!handler) return -EINVAL; if ((bucket->pil == 0) || (bucket->pil == 14)) { printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n"); return -EBUSY; } action = *(bucket->pil + irq_action); if(action) { if(action->flags & SA_SHIRQ) panic("Trying to register fast irq when already shared.\n"); if(irqflags & SA_SHIRQ) panic("Trying to register fast irq as shared.\n"); printk("request_fast_irq: Trying to register yet already owned.\n"); return -EBUSY; } /* * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we * support smp intr affinity in this path. */ save_and_cli(flags); if(irqflags & SA_STATIC_ALLOC) { if(static_irq_count < MAX_STATIC_ALLOC) action = &static_irqaction[static_irq_count++]; else printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " "using kmalloc\n", bucket->pil, name); } if(action == NULL) action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_ATOMIC); if(!action) { restore_flags(flags); return -ENOMEM; } install_fast_irq(bucket->pil, handler); bucket->irq_info = action; bucket->flags |= IBF_ACTIVE; action->handler = handler; action->flags = irqflags; action->dev_id = NULL; action->name = name; action->next = NULL; put_ino_in_irqaction(action, irq); put_smpaff_in_irqaction(action, 0); *(bucket->pil + irq_action) = action; enable_irq(irq); restore_flags(flags);#ifdef CONFIG_SMP distribute_irqs();#endif return 0;}/* We really don't need these at all on the Sparc. We only have * stubs here because they are exported to modules. */unsigned long probe_irq_on(void){ return 0;}int probe_irq_off(unsigned long mask){ return 0;}#ifdef CONFIG_SMPstatic int retarget_one_irq(struct irqaction *p, int goal_cpu){ struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; unsigned long imap = bucket->imap; unsigned int tid; if (tlb_type == cheetah || tlb_type == cheetah_plus) { tid = __cpu_logical_map[goal_cpu] << 26; tid &= IMAP_AID_SAFARI; } else if (this_is_starfire == 0) { tid = __cpu_logical_map[goal_cpu] << 26; tid &= IMAP_TID_UPA; } else { tid = (starfire_translate(imap, __cpu_logical_map[goal_cpu]) << 26); tid &= IMAP_TID_UPA; } upa_writel(tid | IMAP_VALID, imap); goal_cpu++; if(goal_cpu >= NR_CPUS || __cpu_logical_map[goal_cpu] == -1) goal_cpu = 0; return goal_cpu;}/* Called from request_irq. */static void distribute_irqs(void){ unsigned long flags; int cpu, level; save_and_cli(flags); cpu = 0; /* * Skip the timer at [0], and very rare error/power intrs at [15]. * Also level [12], it causes problems on Ex000 systems. */ for(level = 1; level < NR_IRQS; level++) { struct irqaction *p = irq_action[level]; if (level == 12) continue; while(p) { cpu = retarget_one_irq(p, cpu); p = p->next; } } restore_flags(flags);}#endifstruct sun5_timer *prom_timers;static u64 prom_limit0, prom_limit1;static void map_prom_timers(void){ unsigned int addr[3]; int tnode, err; /* PROM timer node hangs out in the top level of device siblings... */ tnode = prom_finddevice("/counter-timer"); /* Assume if node is not present, PROM uses different tick mechanism * which we should not care about. */ if(tnode == 0 || tnode == -1) { prom_timers = (struct sun5_timer *) 0; return; } /* If PROM is really using this, it must be mapped by him. */ err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr)); if(err == -1) { prom_printf("PROM does not have timer mapped, trying to continue.\n"); prom_timers = (struct sun5_timer *) 0; return; } prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);}static void kill_prom_timer(void){ if(!prom_timers) return; /* Save them away for later. */ prom_limit0 = prom_timers->limit0; prom_limit1 = prom_timers->limit1; /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14. * We turn both off here just to be paranoid. */ prom_timers->limit0 = 0; prom_timers->limit1 = 0; /* Wheee, eat the interrupt packet too... */ __asm__ __volatile__(" mov 0x40, %%g2\n"" ldxa [%%g0] %0, %%g1\n"" ldxa [%%g2] %1, %%g1\n"" stxa %%g0, [%%g0] %0\n"" membar #Sync\n" : /* no outputs */ : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) : "g1", "g2");}void enable_prom_timer(void){ if(!prom_timers) return; /* Set it to whatever was there before. */ prom_timers->limit1 = prom_limit1; prom_timers->count1 = 0; prom_timers->limit0 = prom_limit0; prom_timers->count0 = 0;}void __init init_IRQ(void){ static int called = 0; if (called == 0) { called = 1; map_prom_timers(); kill_prom_timer(); memset(&ivector_table[0], 0, sizeof(ivector_table));#ifndef CONFIG_SMP memset(&__up_workvec[0], 0, sizeof(__up_workvec));#endif } /* We need to clear any IRQ's pending in the soft interrupt * registers, a spurious one could be left around from the * PROM timer which we just disabled. */ clear_softint(get_softint()); /* Now that ivector table is initialized, it is safe * to receive IRQ vector traps. We will normally take * one or two right now, in case some device PROM used * to boot us wants to speak to us. We just ignore them. */ __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" "or %%g1, %0, %%g1\n\t" "wrpr %%g1, 0x0, %%pstate" : /* No outputs */ : "i" (PSTATE_IE) : "g1");}static struct proc_dir_entry * root_irq_dir;static struct proc_dir_entry * irq_dir [NUM_IVECS];#ifdef CONFIG_SMP#define HEX_DIGITS 16static unsigned int parse_hex_value (const char *buffer, unsigned long count, unsigned long *ret){ unsigned char hexnum [HEX_DIGITS]; unsigned long value; int i; if (!count) return -EINVAL; if (count > HEX_DIGITS) count = HEX_DIGITS; if (copy_from_user(hexnum, buffer, count)) return -EFAULT; /* * Parse the first 8 characters as a hex string, any non-hex char * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same. */ value = 0; for (i = 0; i < count; i++) { unsigned int c = hexnum[i]; switch (c) { case '0' ... '9': c -= '0'; break; case 'a' ... 'f': c -= 'a'-10; break; case 'A' ... 'F': c -= 'A'-10; break; default: goto out; } value = (value << 4) | c; }out: *ret = value; return 0;}static unsigned long hw_to_logical(unsigned long mask){ unsigned long new_mask = 0UL; int i; for (i = 0; i < NR_CPUS; i++) { if (mask & (1UL << i)) { int logical = cpu_number_map(i); new_mask |= (1UL << logical); } } return new_mask;}static unsigned long logical_to_hw(unsigned long mask){ unsigned long new_mask = 0UL; int i; for (i = 0; i < NR_CPUS; i++) { if (mask & (1UL << i)) { int hw = cpu_logical_map(i); new_mask |= (1UL << hw); } } return new_mask;}static int irq_affinity_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data){ struct ino_bucket *bp = ivector_table + (long)data; struct irqaction *ap = bp->irq_info; unsigned long mask = get_smpaff_in_irqaction(ap); mask = logical_to_hw(mask); if (count < HEX_DIGITS+1) return -EINVAL; return sprintf (page, "%016lx\n", mask == 0 ? ~0UL : mask);}static inline void set_intr_affinity(int irq, unsigned long hw_aff){ struct ino_bucket *bp = ivector_table + irq; unsigned long aff = hw_to_logical(hw_aff); /* * Users specify affinity in terms of cpu ids, which is what * is displayed via /proc/cpuinfo. As soon as we do this, * handler_irq() might see and take action. */ put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, aff); /* Migration is simply done by the next cpu to service this * interrupt. */}static int irq_affinity_write_proc (struct file *file, const char *buffer, unsigned long count, void *data){ int irq = (long) data, full_count = count, err; unsigned long new_value; err = parse_hex_value(buffer, count, &new_value); /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ new_value &= cpu_online_map; if (!new_value) return -EINVAL; set_intr_affinity(irq, new_value); return full_count;}#endif#define MAX_NAMELEN 10static void register_irq_proc (unsigned int irq){ char name [MAX_NAMELEN]; if (!root_irq_dir || irq_dir[irq]) return; memset(name, 0, MAX_NAMELEN); sprintf(name, "%x", irq); /* create /proc/irq/1234 */ irq_dir[irq] = proc_mkdir(name, root_irq_dir);#ifdef CONFIG_SMP /* XXX SMP affinity not supported on starfire yet. */ if (this_is_starfire == 0) { struct proc_dir_entry *entry; /* create /proc/irq/1234/smp_affinity */ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); if (entry) { entry->nlink = 1; entry->data = (void *)(long)irq; entry->read_proc = irq_affinity_read_proc; entry->write_proc = irq_affinity_write_proc; } }#endif}void init_irq_proc (void){ /* create /proc/irq */ root_irq_dir = proc_mkdir("irq", 0);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -