⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xpc_main.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 3 页
字号:
	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);	spin_lock_irqsave(&part->act_lock, irq_flags);	if (part->act_state == XPC_P_DEACTIVATING) {		part->act_state = XPC_P_INACTIVE;		spin_unlock_irqrestore(&part->act_lock, irq_flags);		part->remote_rp_pa = 0;		return 0;	}	/* indicate the thread is activating */	DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);	part->act_state = XPC_P_ACTIVATING;	XPC_SET_REASON(part, 0, 0);	spin_unlock_irqrestore(&part->act_lock, irq_flags);	dev_dbg(xpc_part, "bringing partition %d up\n", partid);	daemonize("xpc%02d", partid);	/*	 * This thread needs to run at a realtime priority to prevent a	 * significant performance degradation.	 */	ret = sched_setscheduler(current, SCHED_FIFO, &param);	if (ret != 0) {		dev_warn(xpc_part, "unable to set pid %d to a realtime "			"priority, ret=%d\n", current->pid, ret);	}	/* allow this thread and its children to run on any CPU */	set_cpus_allowed(current, CPU_MASK_ALL);	/*	 * Register the remote partition's AMOs with SAL so it can handle	 * and cleanup errors within that address range should the remote	 * partition go down. We don't unregister this range because it is	 * difficult to tell when outstanding writes to the remote partition	 * are finished and thus when it is safe to unregister. This should	 * not result in wasted space in the SAL xp_addr_region table because	 * we should get the same page for remote_amos_page_pa after module	 * reloads and system reboots.	 */	if (sn_register_xp_addr_region(part->remote_amos_page_pa,							PAGE_SIZE, 1) < 0) {		dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "			"xp_addr region\n", partid);		spin_lock_irqsave(&part->act_lock, irq_flags);		part->act_state = XPC_P_INACTIVE;		XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__);		spin_unlock_irqrestore(&part->act_lock, irq_flags);		part->remote_rp_pa = 0;		return 0;	}	xpc_allow_hb(partid, xpc_vars);	xpc_IPI_send_activated(part);	/*	 * xpc_partition_up() holds this thread and marks this partition as	 * XPC_P_ACTIVE by calling xpc_hb_mark_active().	 */	(void) xpc_partition_up(part);	xpc_disallow_hb(partid, xpc_vars);	xpc_mark_partition_inactive(part);	if (part->reason == xpcReactivating) {		/* interrupting ourselves results in activating partition */		xpc_IPI_send_reactivate(part);	}	return 0;}voidxpc_activate_partition(struct xpc_partition *part){	partid_t partid = XPC_PARTID(part);	unsigned long irq_flags;	pid_t pid;	spin_lock_irqsave(&part->act_lock, irq_flags);	DBUG_ON(part->act_state != XPC_P_INACTIVE);	part->act_state = XPC_P_ACTIVATION_REQ;	XPC_SET_REASON(part, xpcCloneKThread, __LINE__);	spin_unlock_irqrestore(&part->act_lock, irq_flags);	pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);	if (unlikely(pid <= 0)) {		spin_lock_irqsave(&part->act_lock, irq_flags);		part->act_state = XPC_P_INACTIVE;		XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);		spin_unlock_irqrestore(&part->act_lock, irq_flags);	}}/* * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more * than one partition, we use an AMO_t structure per partition to indicate * whether a partition has sent an IPI or not.  >>> If it has, then wake up the * associated kthread to handle it. * * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC * running on other partitions. * * Noteworthy Arguments: * *	irq - Interrupt ReQuest number. NOT USED. * *	dev_id - partid of IPI's potential sender. */irqreturn_txpc_notify_IRQ_handler(int irq, void *dev_id){	partid_t partid = (partid_t) (u64) dev_id;	struct xpc_partition *part = &xpc_partitions[partid];	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);	if (xpc_part_ref(part)) {		xpc_check_for_channel_activity(part);		xpc_part_deref(part);	}	return IRQ_HANDLED;}/* * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor * because the write to their associated IPI amo completed after the IRQ/IPI * was received. */voidxpc_dropped_IPI_check(struct xpc_partition *part){	if (xpc_part_ref(part)) {		xpc_check_for_channel_activity(part);		part->dropped_IPI_timer.expires = jiffies +							XPC_P_DROPPED_IPI_WAIT;		add_timer(&part->dropped_IPI_timer);		xpc_part_deref(part);	}}voidxpc_activate_kthreads(struct xpc_channel *ch, int needed){	int idle = atomic_read(&ch->kthreads_idle);	int assigned = atomic_read(&ch->kthreads_assigned);	int wakeup;	DBUG_ON(needed <= 0);	if (idle > 0) {		wakeup = (needed > idle) ? idle : needed;		needed -= wakeup;		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "			"channel=%d\n", wakeup, ch->partid, ch->number);		/* only wakeup the requested number of kthreads */		wake_up_nr(&ch->idle_wq, wakeup);	}	if (needed <= 0) {		return;	}	if (needed + assigned > ch->kthreads_assigned_limit) {		needed = ch->kthreads_assigned_limit - assigned;		// >>>should never be less than 0		if (needed <= 0) {			return;		}	}	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",		needed, ch->partid, ch->number);	xpc_create_kthreads(ch, needed, 0);}/* * This function is where XPC's kthreads wait for messages to deliver. */static voidxpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch){	do {		/* deliver messages to their intended recipients */		while ((volatile s64) ch->w_local_GP.get <				(volatile s64) ch->w_remote_GP.put &&					!((volatile u32) ch->flags &						XPC_C_DISCONNECTING)) {			xpc_deliver_msg(ch);		}		if (atomic_inc_return(&ch->kthreads_idle) >						ch->kthreads_idle_limit) {			/* too many idle kthreads on this channel */			atomic_dec(&ch->kthreads_idle);			break;		}		dev_dbg(xpc_chan, "idle kthread calling "			"wait_event_interruptible_exclusive()\n");		(void) wait_event_interruptible_exclusive(ch->idle_wq,				((volatile s64) ch->w_local_GP.get <					(volatile s64) ch->w_remote_GP.put ||				((volatile u32) ch->flags &						XPC_C_DISCONNECTING)));		atomic_dec(&ch->kthreads_idle);	} while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING));}static intxpc_daemonize_kthread(void *args){	partid_t partid = XPC_UNPACK_ARG1(args);	u16 ch_number = XPC_UNPACK_ARG2(args);	struct xpc_partition *part = &xpc_partitions[partid];	struct xpc_channel *ch;	int n_needed;	unsigned long irq_flags;	daemonize("xpc%02dc%d", partid, ch_number);	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",		partid, ch_number);	ch = &part->channels[ch_number];	if (!(ch->flags & XPC_C_DISCONNECTING)) {		/* let registerer know that connection has been established */		spin_lock_irqsave(&ch->lock, irq_flags);		if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {			ch->flags |= XPC_C_CONNECTEDCALLOUT;			spin_unlock_irqrestore(&ch->lock, irq_flags);			xpc_connected_callout(ch);			spin_lock_irqsave(&ch->lock, irq_flags);			ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;			spin_unlock_irqrestore(&ch->lock, irq_flags);			/*			 * It is possible that while the callout was being			 * made that the remote partition sent some messages.			 * If that is the case, we may need to activate			 * additional kthreads to help deliver them. We only			 * need one less than total #of messages to deliver.			 */			n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;			if (n_needed > 0 &&					!(ch->flags & XPC_C_DISCONNECTING)) {				xpc_activate_kthreads(ch, n_needed);			}		} else {			spin_unlock_irqrestore(&ch->lock, irq_flags);		}		xpc_kthread_waitmsgs(part, ch);	}	/* let registerer know that connection is disconnecting */	spin_lock_irqsave(&ch->lock, irq_flags);	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&			!(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;		spin_unlock_irqrestore(&ch->lock, irq_flags);		xpc_disconnect_callout(ch, xpcDisconnecting);		spin_lock_irqsave(&ch->lock, irq_flags);		ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;	}	spin_unlock_irqrestore(&ch->lock, irq_flags);	if (atomic_dec_return(&ch->kthreads_assigned) == 0) {		if (atomic_dec_return(&part->nchannels_engaged) == 0) {			xpc_mark_partition_disengaged(part);			xpc_IPI_send_disengage(part);		}	}	xpc_msgqueue_deref(ch);	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",		partid, ch_number);	xpc_part_deref(part);	return 0;}/* * For each partition that XPC has established communications with, there is * a minimum of one kernel thread assigned to perform any operation that * may potentially sleep or block (basically the callouts to the asynchronous * functions registered via xpc_connect()). * * Additional kthreads are created and destroyed by XPC as the workload * demands. * * A kthread is assigned to one of the active channels that exists for a given * partition. */voidxpc_create_kthreads(struct xpc_channel *ch, int needed,			int ignore_disconnecting){	unsigned long irq_flags;	pid_t pid;	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);	struct xpc_partition *part = &xpc_partitions[ch->partid];	while (needed-- > 0) {		/*		 * The following is done on behalf of the newly created		 * kthread. That kthread is responsible for doing the		 * counterpart to the following before it exits.		 */		if (ignore_disconnecting) {			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {				/* kthreads assigned had gone to zero */				BUG_ON(!(ch->flags &					XPC_C_DISCONNECTINGCALLOUT_MADE));				break;			}		} else if (ch->flags & XPC_C_DISCONNECTING) {			break;		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1) {			if (atomic_inc_return(&part->nchannels_engaged) == 1)				xpc_mark_partition_engaged(part);		}		(void) xpc_part_ref(part);		xpc_msgqueue_ref(ch);		pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);		if (pid < 0) {			/* the fork failed */			/*			 * NOTE: if (ignore_disconnecting &&			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,			 * then we'll deadlock if all other kthreads assigned			 * to this channel are blocked in the channel's			 * registerer, because the only thing that will unblock			 * them is the xpcDisconnecting callout that this			 * failed kernel_thread would have made.			 */			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&			    atomic_dec_return(&part->nchannels_engaged) == 0) {				xpc_mark_partition_disengaged(part);				xpc_IPI_send_disengage(part);			}			xpc_msgqueue_deref(ch);			xpc_part_deref(part);			if (atomic_read(&ch->kthreads_assigned) <						ch->kthreads_idle_limit) {				/*				 * Flag this as an error only if we have an				 * insufficient #of kthreads for the channel				 * to function.				 */				spin_lock_irqsave(&ch->lock, irq_flags);				XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,								&irq_flags);				spin_unlock_irqrestore(&ch->lock, irq_flags);			}			break;		}		ch->kthreads_created++;	// >>> temporary debug only!!!	}}voidxpc_disconnect_wait(int ch_number){	unsigned long irq_flags;	partid_t partid;	struct xpc_partition *part;	struct xpc_channel *ch;	int wakeup_channel_mgr;	/* now wait for all callouts to the caller's function to cease */	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {		part = &xpc_partitions[partid];		if (!xpc_part_ref(part)) {			continue;		}		ch = &part->channels[ch_number];		if (!(ch->flags & XPC_C_WDISCONNECT)) {			xpc_part_deref(part);			continue;		}		wait_for_completion(&ch->wdisconnect_wait);		spin_lock_irqsave(&ch->lock, irq_flags);		DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));		wakeup_channel_mgr = 0;		if (ch->delayed_IPI_flags) {			if (part->act_state != XPC_P_DEACTIVATING) {				spin_lock(&part->IPI_lock);				XPC_SET_IPI_FLAGS(part->local_IPI_amo,					ch->number, ch->delayed_IPI_flags);				spin_unlock(&part->IPI_lock);				wakeup_channel_mgr = 1;			}			ch->delayed_IPI_flags = 0;		}		ch->flags &= ~XPC_C_WDISCONNECT;		spin_unlock_irqrestore(&ch->lock, irq_flags);		if (wakeup_channel_mgr) {			xpc_wakeup_channel_mgr(part);		}		xpc_part_deref(part);	}}static voidxpc_do_exit(enum xpc_retval reason){	partid_t partid;	int active_part_count, printed_waiting_msg = 0;	struct xpc_partition *part;	unsigned long printmsg_time, disengage_request_timeout = 0;	/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */	DBUG_ON(xpc_exiting == 1);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -