ipmi_si_intf.c
来自「优龙2410linux2.6.8内核源代码」· C语言 代码 · 共 2,083 行 · 第 1/4 页
C
2,083 行
/* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { printk(KERN_WARNING "ipmi_si: Could not enable interrupts" ", failed set, using polled mode.\n"); } smi_info->si_state = SI_NORMAL; break; } }}/* Called on timeouts and events. Timeouts should pass the elapsed time, interrupts should pass in zero. */static enum si_sm_result smi_event_handler(struct smi_info *smi_info, int time){ enum si_sm_result si_sm_result; restart: /* There used to be a loop here that waited a little while (around 25us) before giving up. That turned out to be pointless, the minimum delays I was seeing were in the 300us range, which is far too long to wait in an interrupt. So we just run until the state machine tells us something happened or it needs a delay. */ si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); time = 0; while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) { si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); } if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { spin_lock(&smi_info->count_lock); smi_info->complete_transactions++; spin_unlock(&smi_info->count_lock); handle_transaction_done(smi_info); si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); } else if (si_sm_result == SI_SM_HOSED) { spin_lock(&smi_info->count_lock); smi_info->hosed_count++; spin_unlock(&smi_info->count_lock); if (smi_info->curr_msg != NULL) { /* If we were handling a user message, format a response to send to the upper layer to tell it about the error. */ return_hosed_msg(smi_info); } si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); smi_info->si_state = SI_NORMAL; } /* We prefer handling attn over new messages. */ if (si_sm_result == SI_SM_ATTN) { unsigned char msg[2]; spin_lock(&smi_info->count_lock); smi_info->attentions++; spin_unlock(&smi_info->count_lock); /* Got a attn, send down a get message flags to see what's causing it. It would be better to handle this in the upper layer, but due to the way interrupts work with the SMI, that's not really possible. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_MSG_FLAGS_CMD; smi_info->handlers->start_transaction( smi_info->si_sm, msg, 2); smi_info->si_state = SI_GETTING_FLAGS; goto restart; } /* If we are currently idle, try to start the next message. */ if (si_sm_result == SI_SM_IDLE) { spin_lock(&smi_info->count_lock); smi_info->idles++; spin_unlock(&smi_info->count_lock); si_sm_result = start_next_msg(smi_info); if (si_sm_result != SI_SM_IDLE) goto restart; } if ((si_sm_result == SI_SM_IDLE) && (atomic_read(&smi_info->req_events))) { /* We are idle and the upper layer requested that I fetch events, so do so. */ unsigned char msg[2]; spin_lock(&smi_info->count_lock); smi_info->flag_fetches++; spin_unlock(&smi_info->count_lock); atomic_set(&smi_info->req_events, 0); msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_MSG_FLAGS_CMD; smi_info->handlers->start_transaction( smi_info->si_sm, msg, 2); smi_info->si_state = SI_GETTING_FLAGS; goto restart; } return si_sm_result;}static void sender(void *send_info, struct ipmi_smi_msg *msg, int priority){ struct smi_info *smi_info = send_info; enum si_sm_result result; unsigned long flags;#ifdef DEBUG_TIMING struct timeval t;#endif spin_lock_irqsave(&(smi_info->msg_lock), flags);#ifdef DEBUG_TIMING do_gettimeofday(&t); printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);#endif if (smi_info->run_to_completion) { /* If we are running to completion, then throw it in the list and run transactions until everything is clear. Priority doesn't matter here. */ list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); /* We have to release the msg lock and claim the smi lock in this case, because of race conditions. */ spin_unlock_irqrestore(&(smi_info->msg_lock), flags); spin_lock_irqsave(&(smi_info->si_lock), flags); result = smi_event_handler(smi_info, 0); while (result != SI_SM_IDLE) { udelay(SI_SHORT_TIMEOUT_USEC); result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); } spin_unlock_irqrestore(&(smi_info->si_lock), flags); return; } else { if (priority > 0) { list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs)); } else { list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); } } spin_unlock_irqrestore(&(smi_info->msg_lock), flags); spin_lock_irqsave(&(smi_info->si_lock), flags); if ((smi_info->si_state == SI_NORMAL) && (smi_info->curr_msg == NULL)) { start_next_msg(smi_info); si_restart_short_timer(smi_info); } spin_unlock_irqrestore(&(smi_info->si_lock), flags);}static void set_run_to_completion(void *send_info, int i_run_to_completion){ struct smi_info *smi_info = send_info; enum si_sm_result result; unsigned long flags; spin_lock_irqsave(&(smi_info->si_lock), flags); smi_info->run_to_completion = i_run_to_completion; if (i_run_to_completion) { result = smi_event_handler(smi_info, 0); while (result != SI_SM_IDLE) { udelay(SI_SHORT_TIMEOUT_USEC); result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); } } spin_unlock_irqrestore(&(smi_info->si_lock), flags);}static void poll(void *send_info){ struct smi_info *smi_info = send_info; smi_event_handler(smi_info, 0);}static void request_events(void *send_info){ struct smi_info *smi_info = send_info; atomic_set(&smi_info->req_events, 1);}static int initialized = 0;/* Must be called with interrupts off and with the si_lock held. */static void si_restart_short_timer(struct smi_info *smi_info){#if defined(CONFIG_HIGH_RES_TIMERS) unsigned long flags; unsigned long jiffies_now; if (del_timer(&(smi_info->si_timer))) { /* If we don't delete the timer, then it will go off immediately, anyway. So we only process if we actually delete the timer. */ /* We already have irqsave on, so no need for it here. */ read_lock(&xtime_lock); jiffies_now = jiffies; smi_info->si_timer.expires = jiffies_now; smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now); add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); add_timer(&(smi_info->si_timer)); spin_lock_irqsave(&smi_info->count_lock, flags); smi_info->timeout_restarts++; spin_unlock_irqrestore(&smi_info->count_lock, flags); }#endif}static void smi_timeout(unsigned long data){ struct smi_info *smi_info = (struct smi_info *) data; enum si_sm_result smi_result; unsigned long flags; unsigned long jiffies_now; unsigned long time_diff;#ifdef DEBUG_TIMING struct timeval t;#endif if (smi_info->stop_operation) { smi_info->timer_stopped = 1; return; } spin_lock_irqsave(&(smi_info->si_lock), flags);#ifdef DEBUG_TIMING do_gettimeofday(&t); printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);#endif jiffies_now = jiffies; time_diff = ((jiffies_now - smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); smi_result = smi_event_handler(smi_info, time_diff); spin_unlock_irqrestore(&(smi_info->si_lock), flags); smi_info->last_timeout_jiffies = jiffies_now; if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { /* Running with interrupts, only do long timeouts. */ smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; spin_lock_irqsave(&smi_info->count_lock, flags); smi_info->long_timeouts++; spin_unlock_irqrestore(&smi_info->count_lock, flags); goto do_add_timer; } /* If the state machine asks for a short delay, then shorten the timer timeout. */ if (smi_result == SI_SM_CALL_WITH_DELAY) { spin_lock_irqsave(&smi_info->count_lock, flags); smi_info->short_timeouts++; spin_unlock_irqrestore(&smi_info->count_lock, flags);#if defined(CONFIG_HIGH_RES_TIMERS) read_lock(&xtime_lock); smi_info->si_timer.expires = jiffies; smi_info->si_timer.sub_expires = get_arch_cycles(smi_info->si_timer.expires); read_unlock(&xtime_lock); add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);#else smi_info->si_timer.expires = jiffies + 1;#endif } else { spin_lock_irqsave(&smi_info->count_lock, flags); smi_info->long_timeouts++; spin_unlock_irqrestore(&smi_info->count_lock, flags); smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;#if defined(CONFIG_HIGH_RES_TIMERS) smi_info->si_timer.sub_expires = 0;#endif } do_add_timer: add_timer(&(smi_info->si_timer));}static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs){ struct smi_info *smi_info = data; unsigned long flags;#ifdef DEBUG_TIMING struct timeval t;#endif spin_lock_irqsave(&(smi_info->si_lock), flags); spin_lock(&smi_info->count_lock); smi_info->interrupts++; spin_unlock(&smi_info->count_lock); if (smi_info->stop_operation) goto out;#ifdef DEBUG_TIMING do_gettimeofday(&t); printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);#endif smi_event_handler(smi_info, 0); out: spin_unlock_irqrestore(&(smi_info->si_lock), flags); return IRQ_HANDLED;}static struct ipmi_smi_handlers handlers ={ .owner = THIS_MODULE, .sender = sender, .request_events = request_events, .set_run_to_completion = set_run_to_completion, .poll = poll,};/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses, a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */#define SI_MAX_PARMS 4#define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)static struct smi_info *smi_infos[SI_MAX_DRIVERS] ={ NULL, NULL, NULL, NULL };#define DEVICE_NAME "ipmi_si"#define DEFAULT_KCS_IO_PORT 0xca2#define DEFAULT_SMIC_IO_PORT 0xca9#define DEFAULT_BT_IO_PORT 0xe4static int si_trydefaults = 1;static char *si_type[SI_MAX_PARMS] = { NULL, NULL, NULL, NULL };#define MAX_SI_TYPE_STR 30static char si_type_str[MAX_SI_TYPE_STR];static unsigned long addrs[SI_MAX_PARMS] = { 0, 0, 0, 0 };static int num_addrs = 0;static unsigned int ports[SI_MAX_PARMS] = { 0, 0, 0, 0 };static int num_ports = 0;static int irqs[SI_MAX_PARMS] = { 0, 0, 0, 0 };static int num_irqs = 0;module_param_named(trydefaults, si_trydefaults, bool, 0);MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" " default scan of the KCS and SMIC interface at the standard" " address");module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);MODULE_PARM_DESC(type, "Defines the type of each interface, each" " interface separated by commas. The types are 'kcs'," " 'smic', and 'bt'. For example si_type=kcs,bt will set" " the first interface to kcs and the second to bt");module_param_array(addrs, long, num_addrs, 0);MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the" " addresses separated by commas. Only use if an interface" " is in memory. Otherwise, set it to zero or leave" " it blank.");module_param_array(ports, int, num_ports, 0);MODULE_PARM_DESC(ports, "Sets the port address of each interface, the" " addresses separated by commas. Only use if an interface" " is a port. Otherwise, set it to zero or leave" " it blank.");module_param_array(irqs, int, num_irqs, 0);MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the" " addresses separated by commas. Only use if an interface" " has an interrupt. Otherwise, set it to zero or leave" " it blank.");#define IPMI_MEM_ADDR_SPACE 1#define IPMI_IO_ADDR_SPACE 2#if defined(CONFIG_ACPI_INTERPETER) || defined(CONFIG_X86) || defined(CONFIG_PCI)static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr){ int i; for (i = 0; i < SI_MAX_PARMS; ++i) { /* Don't check our address. */ if (i == intf) continue; if (si_type[i] != NULL) { if ((addr_space == IPMI_MEM_ADDR_SPACE && base_addr == addrs[i]) || (addr_space == IPMI_IO_ADDR_SPACE && base_addr == ports[i])) return 0; } else break; } return 1;}#endifstatic int std_irq_setup(struct smi_info *info){ int rv; if (!info->irq) return 0; rv = request_irq(info->irq, si_irq_handler, SA_INTERRUPT, DEVICE_NAME, info); if (rv) { printk(KERN_WARNING "ipmi_si: %s unable to claim interrupt %d," " running polled\n", DEVICE_NAME, info->irq); info->irq = 0; } else { printk(" Using irq %d\n", info->irq); } return rv;}static void std_irq_cleanup(struct smi_info *info){ if (!info->irq) return; free_irq(info->irq, info);}static unsigned char port_inb(struct si_sm_io *io, unsigned int offset){ unsigned int *addr = io->info; return inb((*addr)+offset);}static void port_outb(struct si_sm_io *io, unsigned int offset, unsigned char b){ unsigned int *addr = io->info; outb(b, (*addr)+offset);}static int port_setup(struct smi_info *info){ unsigned int *addr = info->io.info; if (!addr || (!*addr)) return -ENODEV; if (request_region(*addr, info->io_size, DEVICE_NAME) == NULL) return -EIO; return 0;}static void port_cleanup(struct smi_info *info){ unsigned int *addr = info->io.info; if (addr && (*addr)) release_region (*addr, info->io_size); kfree(info);}static int try_init_port(int intf_num, struct smi_info **new_info){ struct smi_info *info; if (!ports[intf_num]) return -ENODEV; if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE, ports[intf_num])) return -ENODEV; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n"); return -ENOMEM; } memset(info, 0, sizeof(*info)); info->io_setup = port_setup; info->io_cleanup = port_cleanup; info->io.inputb = port_inb; info->io.outputb = port_outb; info->io.info = &(ports[intf_num]); info->io.addr = NULL; info->irq = 0; info->irq_setup = NULL; *new_info = info; if (si_type[intf_num] == NULL) si_type[intf_num] = "kcs";
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?