📄 xpc.h
字号:
variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags);}static inline voidxpc_request_partition_disengage(struct xpc_partition *part){ unsigned long irq_flags; AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); local_irq_save(irq_flags); /* set bit corresponding to our partid in remote partition's AMO */ FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, (1UL << sn_partition_id)); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IPIs and AMOs to it until the heartbeat times out. */ (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags);}static inline voidxpc_cancel_partition_disengage_request(struct xpc_partition *part){ unsigned long irq_flags; AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); local_irq_save(irq_flags); /* clear bit corresponding to our partid in remote partition's AMO */ FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, ~(1UL << sn_partition_id)); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IPIs and AMOs to it until the heartbeat times out. */ (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags);}static inline u64xpc_partition_engaged(u64 partid_mask){ AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; /* return our partition's AMO variable ANDed with partid_mask */ return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & partid_mask);}static inline u64xpc_partition_disengage_requested(u64 partid_mask){ AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; /* return our partition's AMO variable ANDed with partid_mask */ return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & partid_mask);}static inline voidxpc_clear_partition_engaged(u64 partid_mask){ AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; /* clear bit(s) based on partid_mask in our partition's AMO */ FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, ~partid_mask);}static inline voidxpc_clear_partition_disengage_request(u64 partid_mask){ AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; /* clear bit(s) based on partid_mask in our partition's AMO */ FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, ~partid_mask);}/* * The following set of macros and inlines are used for the sending and * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, * one that is associated with partition activity (SGI_XPC_ACTIVATE) and * the other that is associated with channel activity (SGI_XPC_NOTIFY). */static inline u64xpc_IPI_receive(AMO_t *amo){ return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);}static inline enum xpc_retvalxpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector){ int ret = 0; unsigned long irq_flags; local_irq_save(irq_flags); FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag); sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IPIs and AMOs to it until the heartbeat times out. */ ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); return ((ret == 0) ? xpcSuccess : xpcPioReadError);}/* * IPIs associated with SGI_XPC_ACTIVATE IRQ. *//* * Flag the appropriate AMO variable and send an IPI to the specified node. */static inline voidxpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid, int to_phys_cpuid){ int w_index = XPC_NASID_W_INDEX(from_nasid); int b_index = XPC_NASID_B_INDEX(from_nasid); AMO_t *amos = (AMO_t *) __va(amos_page_pa + (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, to_phys_cpuid, SGI_XPC_ACTIVATE);}static inline voidxpc_IPI_send_activate(struct xpc_vars *vars){ xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), vars->act_nasid, vars->act_phys_cpuid);}static inline voidxpc_IPI_send_activated(struct xpc_partition *part){ xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), part->remote_act_nasid, part->remote_act_phys_cpuid);}static inline voidxpc_IPI_send_reactivate(struct xpc_partition *part){ xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);}static inline voidxpc_IPI_send_disengage(struct xpc_partition *part){ xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), part->remote_act_nasid, part->remote_act_phys_cpuid);}/* * IPIs associated with SGI_XPC_NOTIFY IRQ. *//* * Send an IPI to the remote partition that is associated with the * specified channel. */#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \ xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)static inline voidxpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, unsigned long *irq_flags){ struct xpc_partition *part = &xpc_partitions[ch->partid]; enum xpc_retval ret; if (likely(part->act_state != XPC_P_DEACTIVATING)) { ret = xpc_IPI_send(part->remote_IPI_amo_va, (u64) ipi_flag << (ch->number * 8), part->remote_IPI_nasid, part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY); dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", ipi_flag_string, ch->partid, ch->number, ret); if (unlikely(ret != xpcSuccess)) { if (irq_flags != NULL) { spin_unlock_irqrestore(&ch->lock, *irq_flags); } XPC_DEACTIVATE_PARTITION(part, ret); if (irq_flags != NULL) { spin_lock_irqsave(&ch->lock, *irq_flags); } } }}/* * Make it look like the remote partition, which is associated with the * specified channel, sent us an IPI. This faked IPI will be handled * by xpc_dropped_IPI_check(). */#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \ xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)static inline voidxpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string){ struct xpc_partition *part = &xpc_partitions[ch->partid]; FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable), FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8))); dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", ipi_flag_string, ch->partid, ch->number);}/* * The sending and receiving of IPIs includes the setting of an AMO variable * to indicate the reason the IPI was sent. The 64-bit variable is divided * up into eight bytes, ordered from right to left. Byte zero pertains to * channel 0, byte one to channel 1, and so on. Each byte is described by * the following IPI flags. */#define XPC_IPI_CLOSEREQUEST 0x01#define XPC_IPI_CLOSEREPLY 0x02#define XPC_IPI_OPENREQUEST 0x04#define XPC_IPI_OPENREPLY 0x08#define XPC_IPI_MSGREQUEST 0x10/* given an AMO variable and a channel#, get its associated IPI flags */#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f)#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010)static inline voidxpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags){ struct xpc_openclose_args *args = ch->local_openclose_args; args->reason = ch->reason; XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);}static inline voidxpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags){ XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);}static inline voidxpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags){ struct xpc_openclose_args *args = ch->local_openclose_args; args->msg_size = ch->msg_size; args->local_nentries = ch->local_nentries; XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);}static inline voidxpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags){ struct xpc_openclose_args *args = ch->local_openclose_args; args->remote_nentries = ch->remote_nentries; args->local_nentries = ch->local_nentries; args->local_msgqueue_pa = __pa(ch->local_msgqueue); XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);}static inline voidxpc_IPI_send_msgrequest(struct xpc_channel *ch){ XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);}static inline voidxpc_IPI_send_local_msgrequest(struct xpc_channel *ch){ XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);}/* * Memory for XPC's AMO variables is allocated by the MSPEC driver. These * pages are located in the lowest granule. The lowest granule uses 4k pages * for cached references and an alternate TLB handler to never provide a * cacheable mapping for the entire region. This will prevent speculative * reading of cached copies of our lines from being issued which will cause * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition * activation and 2 AMO variables for partition deactivation. */static inline AMO_t *xpc_IPI_init(int index){ AMO_t *amo = xpc_vars->amos_page + index; (void) xpc_IPI_receive(amo); /* clear AMO variable */ return amo;}static inline enum xpc_retvalxpc_map_bte_errors(bte_result_t error){ switch (error) { case BTE_SUCCESS: return xpcSuccess; case BTEFAIL_DIR: return xpcBteDirectoryError; case BTEFAIL_POISON: return xpcBtePoisonError; case BTEFAIL_WERR: return xpcBteWriteError; case BTEFAIL_ACCESS: return xpcBteAccessError; case BTEFAIL_PWERR: return xpcBtePWriteError; case BTEFAIL_PRERR: return xpcBtePReadError; case BTEFAIL_TOUT: return xpcBteTimeOutError; case BTEFAIL_XTERR: return xpcBteXtalkError; case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable; default: return xpcBteUnmappedError; }}static inline void *xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base){ /* see if kmalloc will give us cachline aligned memory by default */ *base = kmalloc(size, flags); if (*base == NULL) { return NULL; } if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { return *base; } kfree(*base); /* nope, we'll have to do it ourselves */ *base = kmalloc(size + L1_CACHE_BYTES, flags); if (*base == NULL) { return NULL; } return (void *) L1_CACHE_ALIGN((u64) *base);}/* * Check to see if there is any channel activity to/from the specified * partition. */static inline voidxpc_check_for_channel_activity(struct xpc_partition *part){ u64 IPI_amo; unsigned long irq_flags; IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); if (IPI_amo == 0) { return; } spin_lock_irqsave(&part->IPI_lock, irq_flags); part->local_IPI_amo |= IPI_amo; spin_unlock_irqrestore(&part->IPI_lock, irq_flags); dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n", XPC_PARTID(part), IPI_amo); xpc_wakeup_channel_mgr(part);}#endif /* _ASM_IA64_SN_XPC_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -