📄 xpc.h
字号:
u16 number; /* channel # */ u16 msg_size; /* sizeof each msg entry */ u16 local_nentries; /* #of msg entries in local msg queue */ u16 remote_nentries; /* #of msg entries in remote msg queue*/ void *local_msgqueue_base; /* base address of kmalloc'd space */ struct xpc_msg *local_msgqueue; /* local message queue */ void *remote_msgqueue_base; /* base address of kmalloc'd space */ struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */ /* local message queue */ u64 remote_msgqueue_pa; /* phys addr of remote partition's */ /* local message queue */ atomic_t references; /* #of external references to queues */ atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ /* queue of msg senders who want to be notified when msg received */ atomic_t n_to_notify; /* #of msg senders to notify */ struct xpc_notify *notify_queue;/* notify queue for messages sent */ xpc_channel_func func; /* user's channel function */ void *key; /* pointer to user's key */ struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ struct semaphore teardown_sema; /* wait for teardown completion */ struct xpc_openclose_args *local_openclose_args; /* args passed on */ /* opening or closing of channel */ /* various flavors of local and remote Get/Put values */ struct xpc_gp *local_GP; /* local Get/Put values */ struct xpc_gp remote_GP; /* remote Get/Put values */ struct xpc_gp w_local_GP; /* working local Get/Put values */ struct xpc_gp w_remote_GP; /* working remote Get/Put values */ s64 next_msg_to_pull; /* Put value of next msg to pull */ /* kthread management related fields */// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps// >>> allow the assigned limit be unbounded and let the idle limit be dynamic// >>> dependent on activity over the last interval of time atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ u32 kthreads_idle_limit; /* limit on #of kthreads idle */ atomic_t kthreads_active; /* #of kthreads actively working */ // >>> following field is temporary u32 kthreads_created; /* total #of kthreads created */ wait_queue_head_t idle_wq; /* idle kthread wait queue */} ____cacheline_aligned;/* struct xpc_channel flags */#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */#define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */#define XPC_C_CONNECTED 0x00000080 /* local channel is connected */#define XPC_C_CONNECTING 0x00000100 /* channel is being connected */#define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */#define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */#define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */#define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected *//* * Manages channels on a partition basis. There is one of these structures * for each partition (a partition will never utilize the structure that * represents itself). */struct xpc_partition { /* XPC HB infrastructure */ u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ u64 remote_vars_pa; /* phys addr of partition's vars */ u64 remote_vars_part_pa; /* phys addr of partition's vars part */ u64 last_heartbeat; /* HB at last read */ u64 remote_amos_page_pa; /* phys addr of partition's amos page */ int remote_act_nasid; /* active part's act/deact nasid */ int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ u32 act_IRQ_rcvd; /* IRQs since activation */ spinlock_t act_lock; /* protect updating of act_state */ u8 act_state; /* from XPC HB viewpoint */ enum xpc_retval reason; /* reason partition is deactivating */ int reason_line; /* line# deactivation initiated from */ int reactivate_nasid; /* nasid in partition to reactivate */ /* XPC infrastructure referencing and teardown control */ volatile u8 setup_state; /* infrastructure setup state */ wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ atomic_t references; /* #of references to infrastructure */ /* * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) */ u8 nchannels; /* #of defined channels supported */ atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ struct xpc_channel *channels;/* array of channel structures */ void *local_GPs_base; /* base address of kmalloc'd space */ struct xpc_gp *local_GPs; /* local Get/Put values */ void *remote_GPs_base; /* base address of kmalloc'd space */ struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */ /* values */ u64 remote_GPs_pa; /* phys address of remote partition's local */ /* Get/Put values */ /* fields used to pass args when opening or closing a channel */ void *local_openclose_args_base; /* base address of kmalloc'd space */ struct xpc_openclose_args *local_openclose_args; /* local's args */ void *remote_openclose_args_base; /* base address of kmalloc'd space */ struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ /* args */ u64 remote_openclose_args_pa; /* phys addr of remote's args */ /* IPI sending, receiving and handling related fields */ int remote_IPI_nasid; /* nasid of where to send IPIs */ int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ u64 local_IPI_amo; /* IPI amo flags yet to be handled */ char IPI_owner[8]; /* IPI owner's name */ struct timer_list dropped_IPI_timer; /* dropped IPI timer */ spinlock_t IPI_lock; /* IPI handler lock */ /* channel manager related fields */ atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */} ____cacheline_aligned;/* struct xpc_partition act_state values (for XPC HB) */#define XPC_P_INACTIVE 0x00 /* partition is not active */#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */#define XPC_P_ACTIVATING 0x02 /* activation thread started */#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ xpc_deactivate_partition(__LINE__, (_p), (_reason))/* struct xpc_partition setup_state values */#define XPC_P_UNSET 0x00 /* infrastructure was never setup */#define XPC_P_SETUP 0x01 /* infrastructure is setup */#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown *//* * struct xpc_partition IPI_timer #of seconds to wait before checking for * dropped IPIs. These occur whenever an IPI amo write doesn't complete until * after the IPI was received. */#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))/* found in xp_main.c */extern struct xpc_registration xpc_registrations[];/* >>> found in xpc_main.c only */extern struct device *xpc_part;extern struct device *xpc_chan;extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *);extern void xpc_dropped_IPI_check(struct xpc_partition *);extern void xpc_activate_kthreads(struct xpc_channel *, int);extern void xpc_create_kthreads(struct xpc_channel *, int);extern void xpc_disconnect_wait(int);/* found in xpc_main.c and efi-xpc.c */extern void xpc_activate_partition(struct xpc_partition *);/* found in xpc_partition.c */extern int xpc_exiting;extern int xpc_hb_interval;extern int xpc_hb_check_interval;extern struct xpc_vars *xpc_vars;extern struct xpc_rsvd_page *xpc_rsvd_page;extern struct xpc_vars_part *xpc_vars_part;extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];extern char xpc_remote_copy_buffer[];extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);extern void xpc_allow_IPI_ops(void);extern void xpc_restrict_IPI_ops(void);extern int xpc_identify_act_IRQ_sender(void);extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);extern void xpc_mark_partition_inactive(struct xpc_partition *);extern void xpc_discovery(void);extern void xpc_check_remote_hb(void);extern void xpc_deactivate_partition(const int, struct xpc_partition *, enum xpc_retval);extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);/* found in xpc_channel.c */extern void xpc_initiate_connect(int);extern void xpc_initiate_disconnect(int);extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *, xpc_notify_func, void *);extern void xpc_initiate_received(partid_t, int, void *);extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);extern void xpc_process_channel_activity(struct xpc_partition *);extern void xpc_connected_callout(struct xpc_channel *);extern void xpc_deliver_msg(struct xpc_channel *);extern void xpc_disconnect_channel(const int, struct xpc_channel *, enum xpc_retval, unsigned long *);extern void xpc_disconnected_callout(struct xpc_channel *);extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval);extern void xpc_teardown_infrastructure(struct xpc_partition *);static inline voidxpc_wakeup_channel_mgr(struct xpc_partition *part){ if (atomic_inc_return(&part->channel_mgr_requests) == 1) { wake_up(&part->channel_mgr_wq); }}/* * These next two inlines are used to keep us from tearing down a channel's * msg queues while a thread may be referencing them. */static inline voidxpc_msgqueue_ref(struct xpc_channel *ch){ atomic_inc(&ch->references);}static inline voidxpc_msgqueue_deref(struct xpc_channel *ch){ s32 refs = atomic_dec_return(&ch->references); DBUG_ON(refs < 0); if (refs == 0) { xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); }}#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)/* * These two inlines are used to keep us from tearing down a partition's * setup infrastructure while a thread may be referencing it. */static inline voidxpc_part_deref(struct xpc_partition *part){ s32 refs = atomic_dec_return(&part->references); DBUG_ON(refs < 0); if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { wake_up(&part->teardown_wq); }}static inline intxpc_part_ref(struct xpc_partition *part){ int setup; atomic_inc(&part->references); setup = (part->setup_state == XPC_P_SETUP); if (!setup) { xpc_part_deref(part); } return setup;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -