📄 adeos-ipipe-2.4.25-ppc-denx-1.0-03.patch
字号:
+#define ipipe_load_cpuid() do { } while(0)+#define ipipe_lock_cpu(flags) local_irq_save_hw(flags)+#define ipipe_unlock_cpu(flags) local_irq_restore_hw(flags)+#define ipipe_get_cpu(flags) do { flags = 0; } while(0)+#define ipipe_put_cpu(flags) do { } while(0)+#define ipipe_current_domain (ipipe_percpu_domain[0])++#endif /* CONFIG_SMP */++#define ipipe_virtual_irq_p(irq) ((irq) >= IPIPE_VIRQ_BASE && \+ (irq) < IPIPE_NR_IRQS)++typedef void (*ipipe_irq_handler_t)(unsigned irq,+ void *cookie);++typedef int (*ipipe_irq_ackfn_t)(unsigned irq);++#define IPIPE_SAME_HANDLER ((ipipe_irq_handler_t)(-1))++struct ipipe_domain {++ struct list_head p_link; /* Link in pipeline */++ struct ipcpudata {+ unsigned long status;+ unsigned long irq_pending_hi;+ unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];+ struct ipirqcnt {+ unsigned long pending_hits;+ unsigned long total_hits;+ } irq_counters[IPIPE_NR_IRQS];+ } ____cacheline_aligned_in_smp cpudata[IPIPE_NR_CPUS];++ struct {+ unsigned long control;+ ipipe_irq_ackfn_t acknowledge;+ ipipe_irq_handler_t handler;+ void *cookie;+ } ____cacheline_aligned irqs[IPIPE_NR_IRQS];++ int (*evhand[IPIPE_NR_EVENTS])(unsigned event,+ struct ipipe_domain *from,+ void *data); /* Event handlers. */+ unsigned long long evself; /* Self-monitored event bits. */++#ifdef CONFIG_IPIPE_STATS+ struct ipipe_stats { /* All in timebase units. */+ unsigned long long last_stall_date;+ unsigned long last_stall_eip;+ unsigned long max_stall_time;+ unsigned long max_stall_eip;+ struct ipipe_irq_stats {+ unsigned long long last_receipt_date;+ unsigned long max_delivery_time;+ } irq_stats[IPIPE_NR_IRQS];+ } ____cacheline_aligned_in_smp stats[IPIPE_NR_CPUS];+#endif /* CONFIG_IPIPE_STATS */+ unsigned long flags;+ unsigned domid;+ const char *name;+ int priority;+ void *pdd;+};++struct ipipe_domain_attr {++ unsigned domid; /* Domain identifier -- Magic value set by caller */+ const char *name; /* Domain name -- Warning: won't be dup'ed! */+ int priority; /* Priority in interrupt pipeline */+ void (*entry) (void); /* Domain entry point */+ void *pdd; /* Per-domain (opaque) data pointer */+};++/* The following macros must be used hw interrupts off. */++#define __ipipe_irq_cookie(ipd,irq) (ipd)->irqs[irq].cookie+#define __ipipe_irq_handler(ipd,irq) (ipd)->irqs[irq].handler++#define __ipipe_cpudata_irq_hits(ipd,cpuid,irq) ((ipd)->cpudata[cpuid].irq_counters[irq].total_hits)++#define __ipipe_set_irq_bit(ipd,cpuid,irq) \+do { \+ if (!test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { \+ __set_bit(irq & IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT]); \+ __set_bit(irq >> IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); \+ } \+} while(0)++#define __ipipe_clear_pend(ipd,cpuid,irq) \+do { \+ __clear_bit(irq & IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT]); \+ if ((ipd)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT] == 0) \+ __clear_bit(irq >> IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); \+} while(0)++#define __ipipe_lock_irq(ipd,cpuid,irq) \+do { \+ if (!test_and_set_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) \+ __ipipe_clear_pend(ipd,cpuid,irq); \+} while(0)++#define __ipipe_unlock_irq(ipd,irq) \+do { \+ int __cpuid, __nr_cpus = num_online_cpus(); \+ if (test_and_clear_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) \+ for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) \+ if ((ipd)->cpudata[__cpuid].irq_counters[irq].pending_hits > 0) { /* We need atomic ops next. */ \+ set_bit(irq & IPIPE_IRQ_IMASK,&(ipd)->cpudata[__cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT]); \+ set_bit(irq >> IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[__cpuid].irq_pending_hi); \+ } \+} while(0)++#define __ipipe_clear_irq(ipd,irq) \+do { \+ int __cpuid, __nr_cpus = num_online_cpus(); \+ clear_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control); \+ for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) { \+ (ipd)->cpudata[__cpuid].irq_counters[irq].pending_hits = 0; \+ __ipipe_clear_pend(ipd,__cpuid,irq); \+ } \+} while(0)++#define spin_lock_hw(x) spin_lock(x)+#define spin_unlock_hw(x) spin_unlock(x)+#define spin_trylock_hw(x) spin_trylock(x)+#define write_lock_hw(x) write_lock(x)+#define write_unlock_hw(x) write_unlock(x)+#define write_trylock_hw(x) write_trylock(x)+#define read_lock_hw(x) read_lock(x)+#define read_unlock_hw(x) read_unlock(x)++typedef spinlock_t ipipe_spinlock_t;+typedef rwlock_t ipipe_rwlock_t;+#define IPIPE_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED+#define IPIPE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED++#define spin_lock_irqsave_hw(x,flags) \+do { \+ local_irq_save_hw(flags); \+ spin_lock_hw(x); \+} while (0)++#define spin_unlock_irqrestore_hw(x,flags) \+do { \+ spin_unlock_hw(x); \+ local_irq_restore_hw(flags); \+} while (0)++#define spin_lock_irq_hw(x) \+do { \+ local_irq_disable_hw(); \+ spin_lock_hw(x); \+} while (0)++#define spin_unlock_irq_hw(x) \+do { \+ spin_unlock_hw(x); \+ local_irq_enable_hw(); \+} while (0)++#define read_lock_irqsave_hw(lock, flags) \+do { \+ local_irq_save_hw(flags); \+ read_lock_hw(lock); \+} while (0)++#define read_unlock_irqrestore_hw(lock, flags) \+do { \+ read_unlock_hw(lock); \+ local_irq_restore_hw(flags); \+} while (0)++#define write_lock_irqsave_hw(lock, flags) \+do { \+ local_irq_save_hw(flags); \+ write_lock_hw(lock); \+} while (0)++#define write_unlock_irqrestore_hw(lock, flags) \+do { \+ write_unlock_hw(lock); \+ local_irq_restore_hw(flags); \+} while (0)++extern struct ipipe_domain *ipipe_percpu_domain[], *ipipe_root_domain;++extern unsigned __ipipe_printk_virq;++extern unsigned long __ipipe_virtual_irq_map;++extern struct list_head __ipipe_pipeline;++extern ipipe_spinlock_t __ipipe_pipelock;++extern int __ipipe_event_monitors[];++/* Private interface */++void ipipe_init(void);++#ifdef CONFIG_PROC_FS+void ipipe_init_proc(void);+#else /* !CONFIG_PROC_FS */+#define ipipe_init_proc() do { } while(0)+#endif /* CONFIG_PROC_FS */++void __ipipe_init_stage(struct ipipe_domain *ipd);++void __ipipe_cleanup_domain(struct ipipe_domain *ipd);++void __ipipe_add_domain_proc(struct ipipe_domain *ipd);++void __ipipe_remove_domain_proc(struct ipipe_domain *ipd);++void __ipipe_flush_printk(unsigned irq, void *cookie);++void __ipipe_stall_root(void);++void __ipipe_unstall_root(void);++unsigned long __ipipe_test_root(void);++unsigned long __ipipe_test_and_stall_root(void);++void FASTCALL(__ipipe_restore_root(unsigned long flags));++int FASTCALL(__ipipe_schedule_irq(unsigned irq, struct list_head *head));++int FASTCALL(__ipipe_dispatch_event(unsigned event, void *data));++#define __ipipe_pipeline_head_p(ipd) (&(ipd)->p_link == __ipipe_pipeline.next)++#define __ipipe_event_pipelined_p(ev) \+ (__ipipe_event_monitors[ev] > 0 || (ipipe_current_domain->evself & (1LL << ev)))++#ifdef CONFIG_SMP++cpumask_t __ipipe_set_irq_affinity(unsigned irq,+ cpumask_t cpumask);++int FASTCALL(__ipipe_send_ipi(unsigned ipi,+ cpumask_t cpumask));++#endif /* CONFIG_SMP */++/* Called with hw interrupts off. */+static inline void __ipipe_switch_to(struct ipipe_domain *out,+ struct ipipe_domain *in, int cpuid)+{+ void ipipe_suspend_domain(void);++ /*+ * "in" is guaranteed to be closer than "out" from the head of the+ * pipeline (and obviously different).+ */++ ipipe_percpu_domain[cpuid] = in;++ ipipe_suspend_domain(); /* Sync stage and propagate interrupts. */+ ipipe_load_cpuid(); /* Processor might have changed. */++ if (ipipe_percpu_domain[cpuid] == in)+ /*+ * Otherwise, something has changed the current domain under+ * our feet recycling the register set; do not override.+ */+ ipipe_percpu_domain[cpuid] = out;+}++static inline void ipipe_sigwake_notify(struct task_struct *p)+{+ if (__ipipe_event_pipelined_p(IPIPE_EVENT_SIGWAKE))+ __ipipe_dispatch_event(IPIPE_EVENT_SIGWAKE,p);+}++static inline void ipipe_setsched_notify(struct task_struct *p)+{+ if (__ipipe_event_pipelined_p(IPIPE_EVENT_SETSCHED))+ __ipipe_dispatch_event(IPIPE_EVENT_SETSCHED,p);+}++static inline void ipipe_exit_notify(struct task_struct *p)+{+ if (__ipipe_event_pipelined_p(IPIPE_EVENT_EXIT))+ __ipipe_dispatch_event(IPIPE_EVENT_EXIT,p);+}++static inline int ipipe_trap_notify(int ex, struct pt_regs *regs)+{+ return __ipipe_event_pipelined_p(ex) ? __ipipe_dispatch_event(ex,regs) : 0;+}++#ifdef CONFIG_IPIPE_STATS++#define ipipe_mark_domain_stall(ipd, cpuid) \+do { \+ __label__ here; \+ struct ipipe_stats *ips; \+here: \+ ips = (ipd)->stats + cpuid; \+ if (ips->last_stall_date == 0) { \+ ipipe_read_tsc(ips->last_stall_date); \+ ips->last_stall_eip = (unsigned long)&&here; \+ } \+} while(0)++static inline void ipipe_mark_domain_unstall(struct ipipe_domain *ipd, int cpuid)+{ /* Called w/ hw interrupts off. */+ struct ipipe_stats *ips = ipd->stats + cpuid;+ unsigned long long t, d;++ if (ips->last_stall_date != 0) {+ ipipe_read_tsc(t);+ d = t - ips->last_stall_date;+ if (d > ips->max_stall_time) {+ ips->max_stall_time = d;+ ips->max_stall_eip = ips->last_stall_eip;+ }+ ips->last_stall_date = 0;+ }+}++static inline void ipipe_mark_irq_receipt(struct ipipe_domain *ipd, unsigned irq, int cpuid)+{+ struct ipipe_stats *ips = ipd->stats + cpuid;++ if (ips->irq_stats[irq].last_receipt_date == 0) {+ ipipe_read_tsc(ips->irq_stats[irq].last_receipt_date);+ }+}++static inline void ipipe_mark_irq_delivery(struct ipipe_domain *ipd, unsigned irq, int cpuid)+{ /* Called w/ hw interrupts off. */+ struct ipipe_stats *ips = ipd->stats + cpuid;+ unsigned long long t, d;++ if (ips->irq_stats[irq].last_receipt_date != 0) {+ ipipe_read_tsc(t);+ d = t - ips->irq_stats[irq].last_receipt_date;+ ips->irq_stats[irq].last_receipt_date = 0;+ if (d > ips->irq_stats[irq].max_delivery_time)+ ips->irq_stats[irq].max_delivery_time = d;+ }+}++static inline void ipipe_reset_stats (void)+{+ int cpu, irq;+ for_each_online_cpu(cpu) {+ ipipe_root_domain->stats[cpu].last_stall_date = 0LL;+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++)+ ipipe_root_domain->stats[cpu].irq_stats[irq].last_receipt_date = 0LL;+ }+}++#else /* !CONFIG_IPIPE_STATS */++#define ipipe_mark_domain_stall(ipd,cpuid) do { } while(0)+#define ipipe_mark_domain_unstall(ipd,cpuid) do { } while(0)+#define ipipe_mark_irq_receipt(ipd,irq,cpuid) do { } while(0)+#define ipipe_mark_irq_delivery(ipd,irq,cpuid) do { } while(0)+#define ipipe_reset_stats() do { } while(0)++#endif /* CONFIG_IPIPE_STATS */++/* Public interface */++int ipipe_register_domain(struct ipipe_domain *ipd,+ struct ipipe_domain_attr *attr);++int ipipe_unregister_domain(struct ipipe_domain *ipd);++void ipipe_suspend_domain(void);++int ipipe_virtualize_irq(struct ipipe_domain *ipd,+ unsigned irq,+ ipipe_irq_handler_t handler,+ void *cookie,+ ipipe_irq_ackfn_t acknowledge,+ unsigned modemask);++static inline int ipipe_share_irq(unsigned irq,+ ipipe_irq_ackfn_t acknowledge)+{+ return ipipe_virtualize_irq(ipipe_current_domain,+ irq,+ IPIPE_SAME_HANDLER,+ NULL,+ acknowledge,+ IPIPE_SHARED_MASK | IPIPE_HANDLE_MASK |+ IPIPE_PASS_MASK);+}++int ipipe_control_irq(unsigned irq,+ unsigned clrmask,+ unsigned setmask);++unsigned ipipe_alloc_virq(void);++int ipipe_free_virq(unsigned virq);++int FASTCALL(ipipe_trigger_irq(unsigned irq));++static inline int ipipe_propagate_irq(unsigned irq)+{++ return __ipipe_schedule_irq(irq, ipipe_current_domain->p_link.next);+}++static inline int ipipe_schedule_irq(unsigned irq)+{++ return __ipipe_schedule_irq(irq, &ipipe_current_domain->p_link);+}++static inline void ipipe_stall_pipeline_from(struct ipipe_domain *ipd)+{+ ipipe_declare_cpuid;+#ifdef CONFIG_SMP+ unsigned long flags;++ ipipe_lock_cpu(flags); /* Care for migration. */++ __set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);+ ipipe_mark_domain_stall(ipd, cpuid);++ if (!__ipipe_pipeline_head_p(ipd))+ ipipe_unlock_cpu(flags);+#else /* CONFIG_SMP */+ set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].st
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -