processor_idle.c

来自「linux 内核源代码」· C语言 代码 · 共 1,790 行 · 第 1/4 页

C
1,790
字号
 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry * @cx: cstate data */static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx){	if (cx->space_id == ACPI_CSTATE_FFH) {		/* Call into architectural FFH based C-state */		acpi_processor_ffh_cstate_enter(cx);	} else {		int unused;		/* IO port based C-state */		inb(cx->address);		/* Dummy wait op - must do something useless after P_LVL2 read		   because chipsets cannot guarantee that STPCLK# signal		   gets asserted in time to freeze execution properly. */		unused = inl(acpi_gbl_FADT.xpm_timer_block.address);	}}/** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU * @state: the state data * * This is equivalent to the HALT instruction. */static int acpi_idle_enter_c1(struct cpuidle_device *dev,			      struct cpuidle_state *state){	struct acpi_processor *pr;	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);	pr = processors[smp_processor_id()];	if (unlikely(!pr))		return 0;	if (pr->flags.bm_check)		acpi_idle_update_bm_rld(pr, cx);	acpi_safe_halt();	cx->usage++;	return 0;}/** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU * @state: the state data */static int acpi_idle_enter_simple(struct cpuidle_device *dev,				  struct cpuidle_state *state){	struct acpi_processor *pr;	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);	u32 t1, t2;	int sleep_ticks = 0;	pr = processors[smp_processor_id()];	if (unlikely(!pr))		return 0;	if (acpi_idle_suspend)		return(acpi_idle_enter_c1(dev, state));	local_irq_disable();	current_thread_info()->status &= ~TS_POLLING;	/*	 * TS_POLLING-cleared state must be visible before we test	 * NEED_RESCHED:	 */	smp_mb();	if (unlikely(need_resched())) {		current_thread_info()->status |= TS_POLLING;		local_irq_enable();		return 0;	}	/*	 * Must be done before busmaster disable as we might need to	 * access HPET !	 */	acpi_state_timer_broadcast(pr, cx, 1);	if (pr->flags.bm_check)		acpi_idle_update_bm_rld(pr, cx);	if (cx->type == ACPI_STATE_C3)		ACPI_FLUSH_CPU_CACHE();	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);	/* Tell the scheduler that we are going deep-idle: */	sched_clock_idle_sleep_event();	acpi_idle_do_entry(cx);	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)	/* TSC could halt in idle, so notify users */	mark_tsc_unstable("TSC halts in idle");;#endif	sleep_ticks = ticks_elapsed(t1, t2);	/* Tell the scheduler how much we idled: */	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);	local_irq_enable();	current_thread_info()->status |= TS_POLLING;	cx->usage++;	acpi_state_timer_broadcast(pr, cx, 0);	cx->time += sleep_ticks;	return ticks_elapsed_in_us(t1, t2);}static int c3_cpu_count;static DEFINE_SPINLOCK(c3_lock);/** * acpi_idle_enter_bm - enters C3 with proper BM handling * @dev: the target CPU * @state: the state data * * If BM is detected, the deepest non-C3 idle state is entered instead. */static int acpi_idle_enter_bm(struct cpuidle_device *dev,			      struct cpuidle_state *state){	struct acpi_processor *pr;	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);	u32 t1, t2;	int sleep_ticks = 0;	pr = processors[smp_processor_id()];	if (unlikely(!pr))		return 0;	if (acpi_idle_suspend)		return(acpi_idle_enter_c1(dev, state));	if (acpi_idle_bm_check()) {		if (dev->safe_state) {			return dev->safe_state->enter(dev, dev->safe_state);		} else {			acpi_safe_halt();			return 0;		}	}	local_irq_disable();	current_thread_info()->status &= ~TS_POLLING;	/*	 * TS_POLLING-cleared state must be visible before we test	 * NEED_RESCHED:	 */	smp_mb();	if (unlikely(need_resched())) {		current_thread_info()->status |= TS_POLLING;		local_irq_enable();		return 0;	}	/* Tell the scheduler that we are going deep-idle: */	sched_clock_idle_sleep_event();	/*	 * Must be done before busmaster disable as we might need to	 * access HPET !	 */	acpi_state_timer_broadcast(pr, cx, 1);	acpi_idle_update_bm_rld(pr, cx);	/*	 * disable bus master	 * bm_check implies we need ARB_DIS	 * !bm_check implies we need cache flush	 * bm_control implies whether we can do ARB_DIS	 *	 * That leaves a case where bm_check is set and bm_control is	 * not set. In that case we cannot do much, we enter C3	 * without doing anything.	 */	if (pr->flags.bm_check && pr->flags.bm_control) {		spin_lock(&c3_lock);		c3_cpu_count++;		/* Disable bus master arbitration when all CPUs are in C3 */		if (c3_cpu_count == num_online_cpus())			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);		spin_unlock(&c3_lock);	} else if (!pr->flags.bm_check) {		ACPI_FLUSH_CPU_CACHE();	}	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);	acpi_idle_do_entry(cx);	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);	/* Re-enable bus master arbitration */	if (pr->flags.bm_check && pr->flags.bm_control) {		spin_lock(&c3_lock);		acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);		c3_cpu_count--;		spin_unlock(&c3_lock);	}#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)	/* TSC could halt in idle, so notify users */	mark_tsc_unstable("TSC halts in idle");#endif	sleep_ticks = ticks_elapsed(t1, t2);	/* Tell the scheduler how much we idled: */	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);	local_irq_enable();	current_thread_info()->status |= TS_POLLING;	cx->usage++;	acpi_state_timer_broadcast(pr, cx, 0);	cx->time += sleep_ticks;	return ticks_elapsed_in_us(t1, t2);}struct cpuidle_driver acpi_idle_driver = {	.name =		"acpi_idle",	.owner =	THIS_MODULE,};/** * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE * @pr: the ACPI processor */static int acpi_processor_setup_cpuidle(struct acpi_processor *pr){	int i, count = 0;	struct acpi_processor_cx *cx;	struct cpuidle_state *state;	struct cpuidle_device *dev = &pr->power.dev;	if (!pr->flags.power_setup_done)		return -EINVAL;	if (pr->flags.power == 0) {		return -EINVAL;	}	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {		cx = &pr->power.states[i];		state = &dev->states[count];		if (!cx->valid)			continue;#ifdef CONFIG_HOTPLUG_CPU		if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&		    !pr->flags.has_cst &&		    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))			continue;#endif		cpuidle_set_statedata(state, cx);		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);		state->exit_latency = cx->latency;		state->target_residency = cx->latency * 6;		state->power_usage = cx->power;		state->flags = 0;		switch (cx->type) {			case ACPI_STATE_C1:			state->flags |= CPUIDLE_FLAG_SHALLOW;			state->enter = acpi_idle_enter_c1;			dev->safe_state = state;			break;			case ACPI_STATE_C2:			state->flags |= CPUIDLE_FLAG_BALANCED;			state->flags |= CPUIDLE_FLAG_TIME_VALID;			state->enter = acpi_idle_enter_simple;			dev->safe_state = state;			break;			case ACPI_STATE_C3:			state->flags |= CPUIDLE_FLAG_DEEP;			state->flags |= CPUIDLE_FLAG_TIME_VALID;			state->flags |= CPUIDLE_FLAG_CHECK_BM;			state->enter = pr->flags.bm_check ?					acpi_idle_enter_bm :					acpi_idle_enter_simple;			break;		}		count++;	}	dev->state_count = count;	if (!count)		return -EINVAL;	return 0;}int acpi_processor_cst_has_changed(struct acpi_processor *pr){	int ret;	if (!pr)		return -EINVAL;	if (nocst) {		return -ENODEV;	}	if (!pr->flags.power_setup_done)		return -ENODEV;	cpuidle_pause_and_lock();	cpuidle_disable_device(&pr->power.dev);	acpi_processor_get_power_info(pr);	acpi_processor_setup_cpuidle(pr);	ret = cpuidle_enable_device(&pr->power.dev);	cpuidle_resume_and_unlock();	return ret;}#endif /* CONFIG_CPU_IDLE */int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,			      struct acpi_device *device){	acpi_status status = 0;	static int first_run;	struct proc_dir_entry *entry = NULL;	unsigned int i;	if (!first_run) {		dmi_check_system(processor_power_dmi_table);		max_cstate = acpi_processor_cstate_check(max_cstate);		if (max_cstate < ACPI_C_STATES_MAX)			printk(KERN_NOTICE			       "ACPI: processor limited to max C-state %d\n",			       max_cstate);		first_run++;#if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP)		register_latency_notifier(&acpi_processor_latency_notifier);#endif	}	if (!pr)		return -EINVAL;	if (acpi_gbl_FADT.cst_control && !nocst) {		status =		    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);		if (ACPI_FAILURE(status)) {			ACPI_EXCEPTION((AE_INFO, status,					"Notifying BIOS of _CST ability failed"));		}	}	acpi_processor_get_power_info(pr);	pr->flags.power_setup_done = 1;	/*	 * Install the idle handler if processor power management is supported.	 * Note that we use previously set idle handler will be used on	 * platforms that only support C1.	 */	if ((pr->flags.power) && (!boot_option_idle_override)) {#ifdef CONFIG_CPU_IDLE		acpi_processor_setup_cpuidle(pr);		pr->power.dev.cpu = pr->id;		if (cpuidle_register_device(&pr->power.dev))			return -EIO;#endif		printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);		for (i = 1; i <= pr->power.count; i++)			if (pr->power.states[i].valid)				printk(" C%d[C%d]", i,				       pr->power.states[i].type);		printk(")\n");#ifndef CONFIG_CPU_IDLE		if (pr->id == 0) {			pm_idle_save = pm_idle;			pm_idle = acpi_processor_idle;		}#endif	}	/* 'power' [R] */	entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,				  S_IRUGO, acpi_device_dir(device));	if (!entry)		return -EIO;	else {		entry->proc_fops = &acpi_processor_power_fops;		entry->data = acpi_driver_data(device);		entry->owner = THIS_MODULE;	}	return 0;}int acpi_processor_power_exit(struct acpi_processor *pr,			      struct acpi_device *device){#ifdef CONFIG_CPU_IDLE	if ((pr->flags.power) && (!boot_option_idle_override))		cpuidle_unregister_device(&pr->power.dev);#endif	pr->flags.power_setup_done = 0;	if (acpi_device_dir(device))		remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,				  acpi_device_dir(device));#ifndef CONFIG_CPU_IDLE	/* Unregister the idle handler when processor #0 is removed. */	if (pr->id == 0) {		pm_idle = pm_idle_save;		/*		 * We are about to unload the current idle thread pm callback		 * (pm_idle), Wait for all processors to update cached/local		 * copies of pm_idle before proceeding.		 */		cpu_idle_wait();#ifdef CONFIG_SMP		unregister_latency_notifier(&acpi_processor_latency_notifier);#endif	}#endif	return 0;}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?