irq.c

来自「xen虚拟机源代码安装包」· C语言 代码 · 共 764 行 · 第 1/2 页

C
764
字号
/****************************************************************************** * arch/x86/irq.c *  * Portions of this file are: *  Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar */#include <xen/config.h>#include <xen/init.h>#include <xen/errno.h>#include <xen/event.h>#include <xen/irq.h>#include <xen/perfc.h>#include <xen/sched.h>#include <xen/keyhandler.h>#include <xen/compat.h>#include <asm/current.h>#include <xen/iommu.h>/* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */int opt_noirqbalance = 0;boolean_param("noirqbalance", opt_noirqbalance);irq_desc_t irq_desc[NR_IRQS];static void __do_IRQ_guest(int vector);void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }static void enable_none(unsigned int vector) { }static unsigned int startup_none(unsigned int vector) { return 0; }static void disable_none(unsigned int vector) { }static void ack_none(unsigned int vector){    ack_bad_irq(vector);}#define shutdown_none   disable_none#define end_none        enable_nonestruct hw_interrupt_type no_irq_type = {    "none",    startup_none,    shutdown_none,    enable_none,    disable_none,    ack_none,    end_none};atomic_t irq_err_count;asmlinkage void do_IRQ(struct cpu_user_regs *regs){    unsigned int      vector = regs->entry_vector;    irq_desc_t       *desc = &irq_desc[vector];    struct irqaction *action;    perfc_incr(irqs);    spin_lock(&desc->lock);    desc->handler->ack(vector);    if ( likely(desc->status & IRQ_GUEST) )    {        __do_IRQ_guest(vector);        spin_unlock(&desc->lock);        return;    }    desc->status &= ~IRQ_REPLAY;    desc->status |= IRQ_PENDING;    /*     * Since we set PENDING, if another processor is handling a different      * instance of this same irq, the other processor will take care of it.     */    if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) )        goto out;    desc->status |= IRQ_INPROGRESS;    action = desc->action;    while ( desc->status & IRQ_PENDING )    {        desc->status &= ~IRQ_PENDING;        irq_enter();        spin_unlock_irq(&desc->lock);        action->handler(vector_to_irq(vector), action->dev_id, regs);        spin_lock_irq(&desc->lock);        irq_exit();    }    desc->status &= ~IRQ_INPROGRESS; out:    desc->handler->end(vector);    spin_unlock(&desc->lock);}int request_irq(unsigned int irq,        void (*handler)(int, void *, struct cpu_user_regs *),        unsigned long irqflags, const char * devname, void *dev_id){    struct irqaction * action;    int retval;    /*     * Sanity-check: shared interrupts must pass in a real dev-ID,     * otherwise we'll have trouble later trying to figure out     * which interrupt is which (messes up the interrupt freeing     * logic etc).     */    if (irq >= NR_IRQS)        return -EINVAL;    if (!handler)        return -EINVAL;    action = xmalloc(struct irqaction);    if (!action)        return -ENOMEM;    action->handler = handler;    action->name = devname;    action->dev_id = dev_id;    retval = setup_irq(irq, action);    if (retval)        xfree(action);    return retval;}void free_irq(unsigned int irq){    unsigned int  vector = irq_to_vector(irq);    irq_desc_t   *desc = &irq_desc[vector];    unsigned long flags;    spin_lock_irqsave(&desc->lock,flags);    desc->action  = NULL;    desc->depth   = 1;    desc->status |= IRQ_DISABLED;    desc->handler->shutdown(irq);    spin_unlock_irqrestore(&desc->lock,flags);    /* Wait to make sure it's not being used on another CPU */    do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS );}int setup_irq(unsigned int irq, struct irqaction *new){    unsigned int  vector = irq_to_vector(irq);    irq_desc_t   *desc = &irq_desc[vector];    unsigned long flags;     spin_lock_irqsave(&desc->lock,flags);    if ( desc->action != NULL )    {        spin_unlock_irqrestore(&desc->lock,flags);        return -EBUSY;    }    desc->action  = new;    desc->depth   = 0;    desc->status &= ~IRQ_DISABLED;    desc->handler->startup(vector);    spin_unlock_irqrestore(&desc->lock,flags);    return 0;}/* * HANDLING OF GUEST-BOUND PHYSICAL IRQS */#define IRQ_MAX_GUESTS 7typedef struct {    u8 nr_guests;    u8 in_flight;    u8 shareable;    u8 ack_type;#define ACKTYPE_NONE   0     /* No final acknowledgement is required */#define ACKTYPE_UNMASK 1     /* Unmask PIC hardware (from any CPU)   */#define ACKTYPE_EOI    2     /* EOI on the CPU that was interrupted  */    cpumask_t cpu_eoi_map;   /* CPUs that need to EOI this interrupt */    struct domain *guest[IRQ_MAX_GUESTS];} irq_guest_action_t;/* * Stack of interrupts awaiting EOI on each CPU. These must be popped in * order, as only the current highest-priority pending irq can be EOIed. */struct pending_eoi {    u8 vector; /* Vector awaiting EOI */    u8 ready;  /* Ready for EOI now?  */};static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]);#define pending_eoi_sp(p) ((p)[NR_VECTORS-1].vector)static struct timer irq_guest_eoi_timer[NR_IRQS];static void irq_guest_eoi_timer_fn(void *data){    irq_desc_t *desc = data;    unsigned vector = desc - irq_desc;    unsigned long flags;    spin_lock_irqsave(&desc->lock, flags);    desc->status &= ~IRQ_INPROGRESS;    desc->handler->enable(vector);    spin_unlock_irqrestore(&desc->lock, flags);}static void __do_IRQ_guest(int vector){    irq_desc_t         *desc = &irq_desc[vector];    irq_guest_action_t *action = (irq_guest_action_t *)desc->action;    struct domain      *d;    int                 i, sp, already_pending = 0;    struct pending_eoi *peoi = this_cpu(pending_eoi);    if ( unlikely(action->nr_guests == 0) )    {        /* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */        ASSERT(action->ack_type == ACKTYPE_EOI);        ASSERT(desc->status & IRQ_DISABLED);        desc->handler->end(vector);        return;    }    if ( action->ack_type == ACKTYPE_EOI )    {        sp = pending_eoi_sp(peoi);        ASSERT((sp == 0) || (peoi[sp-1].vector < vector));        ASSERT(sp < (NR_VECTORS-1));        peoi[sp].vector = vector;        peoi[sp].ready = 0;        pending_eoi_sp(peoi) = sp+1;        cpu_set(smp_processor_id(), action->cpu_eoi_map);    }    for ( i = 0; i < action->nr_guests; i++ )    {        unsigned int irq;        d = action->guest[i];        irq = domain_vector_to_irq(d, vector);        if ( (action->ack_type != ACKTYPE_NONE) &&             !test_and_set_bit(irq, d->pirq_mask) )            action->in_flight++;        if ( hvm_do_IRQ_dpci(d, irq) )        {            if ( action->ack_type == ACKTYPE_NONE )            {                already_pending += !!(desc->status & IRQ_INPROGRESS);                desc->status |= IRQ_INPROGRESS; /* cleared during hvm eoi */            }        }        else if ( send_guest_pirq(d, irq) &&                  (action->ack_type == ACKTYPE_NONE) )        {            already_pending++;        }    }    if ( already_pending == action->nr_guests )    {        desc->handler->disable(vector);        stop_timer(&irq_guest_eoi_timer[vector]);        init_timer(&irq_guest_eoi_timer[vector],                   irq_guest_eoi_timer_fn, desc, smp_processor_id());        set_timer(&irq_guest_eoi_timer[vector], NOW() + MILLISECS(1));    }}/* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */static void flush_ready_eoi(void *unused){    struct pending_eoi *peoi = this_cpu(pending_eoi);    irq_desc_t         *desc;    int                 vector, sp;    ASSERT(!local_irq_is_enabled());    sp = pending_eoi_sp(peoi);    while ( (--sp >= 0) && peoi[sp].ready )    {        vector = peoi[sp].vector;        desc = &irq_desc[vector];        spin_lock(&desc->lock);        desc->handler->end(vector);        spin_unlock(&desc->lock);    }    pending_eoi_sp(peoi) = sp+1;}static void __set_eoi_ready(irq_desc_t *desc){    irq_guest_action_t *action = (irq_guest_action_t *)desc->action;    struct pending_eoi *peoi = this_cpu(pending_eoi);    int                 vector, sp;    vector = desc - irq_desc;    if ( !(desc->status & IRQ_GUEST) ||         (action->in_flight != 0) ||         !cpu_test_and_clear(smp_processor_id(), action->cpu_eoi_map) )        return;    sp = pending_eoi_sp(peoi);    do {        ASSERT(sp > 0);    } while ( peoi[--sp].vector != vector );    ASSERT(!peoi[sp].ready);    peoi[sp].ready = 1;}/* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */static void set_eoi_ready(void *data){    irq_desc_t *desc = data;    ASSERT(!local_irq_is_enabled());    spin_lock(&desc->lock);    __set_eoi_ready(desc);    spin_unlock(&desc->lock);    flush_ready_eoi(NULL);}static void __pirq_guest_eoi(struct domain *d, int irq){    irq_desc_t         *desc;    irq_guest_action_t *action;    cpumask_t           cpu_eoi_map;    int                 vector;    vector = domain_irq_to_vector(d, irq);    desc   = &irq_desc[vector];    action = (irq_guest_action_t *)desc->action;    spin_lock_irq(&desc->lock);    ASSERT(!test_bit(irq, d->pirq_mask) ||           (action->ack_type != ACKTYPE_NONE));    if ( unlikely(!test_and_clear_bit(irq, d->pirq_mask)) ||         unlikely(--action->in_flight != 0) )    {        spin_unlock_irq(&desc->lock);        return;    }    if ( action->ack_type == ACKTYPE_UNMASK )    {        ASSERT(cpus_empty(action->cpu_eoi_map));        desc->handler->end(vector);        spin_unlock_irq(&desc->lock);        return;    }    ASSERT(action->ack_type == ACKTYPE_EOI);            cpu_eoi_map = action->cpu_eoi_map;    if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) )    {        __set_eoi_ready(desc);        spin_unlock(&desc->lock);        flush_ready_eoi(NULL);        local_irq_enable();    }    else    {        spin_unlock_irq(&desc->lock);    }

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?