⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cpu_idle.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 2 页
字号:
                    {                        next_state = cx->promotion.state;                        goto end;                    }                }                else                {                    next_state = cx->promotion.state;                    goto end;                }            }        }    }    /*     * Demotion?     * ---------     * Track the number of shorts (time asleep is less than time threshold)     * and demote when the usage threshold is reached.     */    if ( cx->demotion.state )    {        if ( sleep_ticks < cx->demotion.threshold.ticks )        {            cx->demotion.count++;            cx->promotion.count = 0;            if ( cx->demotion.count >= cx->demotion.threshold.count )            {                next_state = cx->demotion.state;                goto end;            }        }    }end:    /*     * Demote if current state exceeds max_cstate     */    if ( (power->state - power->states) > max_cstate )    {        if ( cx->demotion.state )            next_state = cx->demotion.state;    }    /*     * New Cx State?     * -------------     * If we're going to start using a new Cx state we must clean up     * from the previous and prepare to use the new.     */    if ( next_state != power->state )        acpi_processor_power_activate(power, next_state);}static int acpi_processor_set_power_policy(struct acpi_processor_power *power){    unsigned int i;    unsigned int state_is_set = 0;    struct acpi_processor_cx *lower = NULL;    struct acpi_processor_cx *higher = NULL;    struct acpi_processor_cx *cx;    if ( !power )        return -EINVAL;    /*     * This function sets the default Cx state policy (OS idle handler).     * Our scheme is to promote quickly to C2 but more conservatively     * to C3.  We're favoring C2  for its characteristics of low latency     * (quick response), good power savings, and ability to allow bus     * mastering activity.  Note that the Cx state policy is completely     * customizable and can be altered dynamically.     */    /* startup state */    for ( i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++ )    {        cx = &power->states[i];        if ( !cx->valid )            continue;        if ( !state_is_set )            power->state = cx;        state_is_set++;        break;    }    if ( !state_is_set )        return -ENODEV;    /* demotion */    for ( i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++ )    {        cx = &power->states[i];        if ( !cx->valid )            continue;        if ( lower )        {            cx->demotion.state = lower;            cx->demotion.threshold.ticks = cx->latency_ticks;            cx->demotion.threshold.count = 1;            if ( cx->type == ACPI_STATE_C3 )                cx->demotion.threshold.bm = bm_history;        }        lower = cx;    }    /* promotion */    for ( i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i-- )    {        cx = &power->states[i];        if ( !cx->valid )            continue;        if ( higher )        {            cx->promotion.state = higher;            cx->promotion.threshold.ticks = cx->latency_ticks;            if ( cx->type >= ACPI_STATE_C2 )                cx->promotion.threshold.count = 4;            else                cx->promotion.threshold.count = 10;            if ( higher->type == ACPI_STATE_C3 )                cx->promotion.threshold.bm = bm_history;        }        higher = cx;    }    return 0;}static int init_cx_pminfo(struct acpi_processor_power *acpi_power){    memset(acpi_power, 0, sizeof(*acpi_power));    acpi_power->states[ACPI_STATE_C1].type = ACPI_STATE_C1;    acpi_power->states[ACPI_STATE_C0].valid = 1;    acpi_power->states[ACPI_STATE_C1].valid = 1;    acpi_power->count = 2;    return 0;}#define CPUID_MWAIT_LEAF (5)#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)#define CPUID5_ECX_INTERRUPT_BREAK      (0x2)#define MWAIT_ECX_INTERRUPT_BREAK       (0x1)#define MWAIT_SUBSTATE_MASK (0xf)#define MWAIT_SUBSTATE_SIZE (4)static int acpi_processor_ffh_cstate_probe(xen_processor_cx_t *cx){    struct cpuinfo_x86 *c = &current_cpu_data;    unsigned int eax, ebx, ecx, edx;    unsigned int edx_part;    unsigned int cstate_type; /* C-state type and not ACPI C-state type */    unsigned int num_cstate_subtype;    if ( c->cpuid_level < CPUID_MWAIT_LEAF )    {        printk(XENLOG_INFO "MWAIT leaf not supported by cpuid\n");        return -EFAULT;    }    cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);    printk(XENLOG_DEBUG "cpuid.MWAIT[.eax=%x, .ebx=%x, .ecx=%x, .edx=%x]\n",           eax, ebx, ecx, edx);    /* Check whether this particular cx_type (in CST) is supported or not */    cstate_type = (cx->reg.address >> MWAIT_SUBSTATE_SIZE) + 1;    edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);    num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;    if ( num_cstate_subtype < (cx->reg.address & MWAIT_SUBSTATE_MASK) )        return -EFAULT;    /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */    if ( !(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||         !(ecx & CPUID5_ECX_INTERRUPT_BREAK) )        return -EFAULT;    printk(XENLOG_INFO "Monitor-Mwait will be used to enter C-%d state\n", cx->type);    return 0;}/* * Initialize bm_flags based on the CPU cache properties * On SMP it depends on cache configuration * - When cache is not shared among all CPUs, we flush cache *   before entering C3. * - When cache is shared among all CPUs, we use bm_check *   mechanism as in UP case * * This routine is called only after all the CPUs are online */static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags){    struct cpuinfo_x86 *c = &current_cpu_data;    flags->bm_check = 0;    if ( num_online_cpus() == 1 )        flags->bm_check = 1;    else if ( c->x86_vendor == X86_VENDOR_INTEL )    {        /*         * Today all CPUs that support C3 share cache.         * TBD: This needs to look at cache shared map, once         * multi-core detection patch makes to the base.         */        flags->bm_check = 1;    }}#define VENDOR_INTEL                   (1)#define NATIVE_CSTATE_BEYOND_HALT      (2)static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx){    static int bm_check_flag;    switch ( cx->reg.space_id )    {    case ACPI_ADR_SPACE_SYSTEM_IO:        if ( cx->reg.address == 0 )            return -EINVAL;        break;    case ACPI_ADR_SPACE_FIXED_HARDWARE:        if ( cx->type > ACPI_STATE_C1 )        {            if ( cx->reg.bit_width != VENDOR_INTEL ||                  cx->reg.bit_offset != NATIVE_CSTATE_BEYOND_HALT )                return -EINVAL;            /* assume all logical cpu has the same support for mwait */            if ( acpi_processor_ffh_cstate_probe(cx) )                return -EINVAL;        }        break;    default:        return -ENODEV;    }    if ( cx->type == ACPI_STATE_C3 )    {        /* We must be able to use HPET in place of LAPIC timers. */        if ( hpet_broadcast_is_available() )        {            lapic_timer_off = hpet_broadcast_enter;            lapic_timer_on = hpet_broadcast_exit;        }        else if ( pit_broadcast_is_available() )        {            lapic_timer_off = pit_broadcast_enter;            lapic_timer_on = pit_broadcast_exit;        }        else        {            return -EINVAL;        }        /* All the logic here assumes flags.bm_check is same across all CPUs */        if ( !bm_check_flag )        {            /* Determine whether bm_check is needed based on CPU  */            acpi_processor_power_init_bm_check(&(power->flags));            bm_check_flag = power->flags.bm_check;        }        else        {            power->flags.bm_check = bm_check_flag;        }        if ( power->flags.bm_check )        {            if ( !power->flags.bm_control )            {                if ( power->flags.has_cst != 1 )                {                    /* bus mastering control is necessary */                    ACPI_DEBUG_PRINT((ACPI_DB_INFO,                        "C3 support requires BM control\n"));                    return -EINVAL;                }                else                {                    /* Here we enter C3 without bus mastering */                    ACPI_DEBUG_PRINT((ACPI_DB_INFO,                        "C3 support without BM control\n"));                }            }        }        else        {            /*             * WBINVD should be set in fadt, for C3 state to be             * supported on when bm_check is not required.             */            if ( !(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD) )            {                ACPI_DEBUG_PRINT((ACPI_DB_INFO,                          "Cache invalidation should work properly"                          " for C3 to be enabled on SMP systems\n"));                return -EINVAL;            }            acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);        }    }    return 0;}static void set_cx(    struct acpi_processor_power *acpi_power,    xen_processor_cx_t *xen_cx){    struct acpi_processor_cx *cx;    if ( check_cx(acpi_power, xen_cx) != 0 )        return;    cx = &acpi_power->states[xen_cx->type];    if ( !cx->valid )        acpi_power->count++;    cx->valid    = 1;    cx->type     = xen_cx->type;    cx->address  = xen_cx->reg.address;    cx->space_id = xen_cx->reg.space_id;    cx->latency  = xen_cx->latency;    cx->power    = xen_cx->power;        cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);}int get_cpu_id(u8 acpi_id){    int i;    u8 apic_id;    apic_id = x86_acpiid_to_apicid[acpi_id];    if ( apic_id == 0xff )        return -1;    for ( i = 0; i < NR_CPUS; i++ )    {        if ( apic_id == x86_cpu_to_apicid[i] )            return i;    }    return -1;}#ifdef DEBUG_PM_CXstatic void print_cx_pminfo(uint32_t cpu, struct xen_processor_power *power){    XEN_GUEST_HANDLE(xen_processor_cx_t) states;    xen_processor_cx_t  state;    XEN_GUEST_HANDLE(xen_processor_csd_t) csd;    xen_processor_csd_t dp;    uint32_t i;    printk("cpu%d cx acpi info:\n", cpu);    printk("\tcount = %d\n", power->count);    printk("\tflags: bm_cntl[%d], bm_chk[%d], has_cst[%d],\n"           "\t       pwr_setup_done[%d], bm_rld_set[%d]\n",           power->flags.bm_control, power->flags.bm_check, power->flags.has_cst,           power->flags.power_setup_done, power->flags.bm_rld_set);        states = power->states;        for ( i = 0; i < power->count; i++ )    {        if ( unlikely(copy_from_guest_offset(&state, states, i, 1)) )            return;                printk("\tstates[%d]:\n", i);        printk("\t\treg.space_id = 0x%x\n", state.reg.space_id);        printk("\t\treg.bit_width = 0x%x\n", state.reg.bit_width);        printk("\t\treg.bit_offset = 0x%x\n", state.reg.bit_offset);        printk("\t\treg.access_size = 0x%x\n", state.reg.access_size);        printk("\t\treg.address = 0x%"PRIx64"\n", state.reg.address);        printk("\t\ttype    = %d\n", state.type);        printk("\t\tlatency = %d\n", state.latency);        printk("\t\tpower   = %d\n", state.power);        csd = state.dp;        printk("\t\tdp(@0x%p)\n", csd.p);                if ( csd.p != NULL )        {            if ( unlikely(copy_from_guest(&dp, csd, 1)) )                return;            printk("\t\t\tdomain = %d\n", dp.domain);            printk("\t\t\tcoord_type   = %d\n", dp.coord_type);            printk("\t\t\tnum = %d\n", dp.num);        }    }}#else#define print_cx_pminfo(c, p)#endiflong set_cx_pminfo(uint32_t cpu, struct xen_processor_power *power){    XEN_GUEST_HANDLE(xen_processor_cx_t) states;    xen_processor_cx_t xen_cx;    struct acpi_processor_power *acpi_power;    int cpu_id, i;    if ( unlikely(!guest_handle_okay(power->states, power->count)) )        return -EFAULT;    print_cx_pminfo(cpu, power);    /* map from acpi_id to cpu_id */    cpu_id = get_cpu_id((u8)cpu);    if ( cpu_id == -1 )    {        printk(XENLOG_ERR "no cpu_id for acpi_id %d\n", cpu);        return -EFAULT;    }    acpi_power = &processor_powers[cpu_id];    init_cx_pminfo(acpi_power);    acpi_power->flags.bm_check = power->flags.bm_check;    acpi_power->flags.bm_control = power->flags.bm_control;    acpi_power->flags.has_cst = power->flags.has_cst;    states = power->states;    for ( i = 0; i < power->count; i++ )    {        if ( unlikely(copy_from_guest_offset(&xen_cx, states, i, 1)) )            return -EFAULT;        set_cx(acpi_power, &xen_cx);    }    /* FIXME: C-state dependency is not supported by far */        /* initialize default policy */    acpi_processor_set_power_policy(acpi_power);    print_acpi_power(cpu_id, acpi_power);    if ( cpu_id == 0 && pm_idle_save == NULL )    {        pm_idle_save = pm_idle;        pm_idle = acpi_processor_idle;    }            return 0;}uint32_t pmstat_get_cx_nr(uint32_t cpuid){    return processor_powers[cpuid].count;}int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat){    struct acpi_processor_power *power = &processor_powers[cpuid];    struct vcpu *v = idle_vcpu[cpuid];    uint64_t usage;    int i;    stat->last = (power->state) ? power->state->type : 0;    stat->nr = processor_powers[cpuid].count;    stat->idle_time = v->runstate.time[RUNSTATE_running];    if ( v->is_running )        stat->idle_time += NOW() - v->runstate.state_entry_time;    for ( i = 0; i < power->count; i++ )    {        usage = power->states[i].usage;        if ( copy_to_guest_offset(stat->triggers, i, &usage, 1) )            return -EFAULT;    }    for ( i = 0; i < power->count; i++ )        if ( copy_to_guest_offset(stat->residencies, i,                                   &power->states[i].time, 1) )            return -EFAULT;    return 0;}int pmstat_reset_cx_stat(uint32_t cpuid){    return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -