📄 ipmi_si_intf.c
字号:
/* Error clearing flags */ printk(KERN_WARNING "ipmi_si: Error clearing flags: %2.2x\n", msg[2]); } if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) start_enable_irq(smi_info); else smi_info->si_state = SI_NORMAL; break; } case SI_GETTING_EVENTS: { smi_info->curr_msg->rsp_size = smi_info->handlers->get_result( smi_info->si_sm, smi_info->curr_msg->rsp, IPMI_MAX_MSG_LENGTH); /* Do this here becase deliver_recv_msg() releases the lock, and a new message can be put in during the time the lock is released. */ msg = smi_info->curr_msg; smi_info->curr_msg = NULL; if (msg->rsp[2] != 0) { /* Error getting event, probably done. */ msg->done(msg); /* Take off the event flag. */ smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; handle_flags(smi_info); } else { spin_lock(&smi_info->count_lock); smi_info->events++; spin_unlock(&smi_info->count_lock); /* Do this before we deliver the message because delivering the message releases the lock and something else can mess with the state. */ handle_flags(smi_info); deliver_recv_msg(smi_info, msg); } break; } case SI_GETTING_MESSAGES: { smi_info->curr_msg->rsp_size = smi_info->handlers->get_result( smi_info->si_sm, smi_info->curr_msg->rsp, IPMI_MAX_MSG_LENGTH); /* Do this here becase deliver_recv_msg() releases the lock, and a new message can be put in during the time the lock is released. */ msg = smi_info->curr_msg; smi_info->curr_msg = NULL; if (msg->rsp[2] != 0) { /* Error getting event, probably done. */ msg->done(msg); /* Take off the msg flag. */ smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; handle_flags(smi_info); } else { spin_lock(&smi_info->count_lock); smi_info->incoming_messages++; spin_unlock(&smi_info->count_lock); /* Do this before we deliver the message because delivering the message releases the lock and something else can mess with the state. */ handle_flags(smi_info); deliver_recv_msg(smi_info, msg); } break; } case SI_ENABLE_INTERRUPTS1: { unsigned char msg[4]; /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { printk(KERN_WARNING "ipmi_si: Could not enable interrupts" ", failed get, using polled mode.\n"); smi_info->si_state = SI_NORMAL; } else { msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = msg[3] | 1; /* enable msg queue int */ smi_info->handlers->start_transaction( smi_info->si_sm, msg, 3); smi_info->si_state = SI_ENABLE_INTERRUPTS2; } break; } case SI_ENABLE_INTERRUPTS2: { unsigned char msg[4]; /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { printk(KERN_WARNING "ipmi_si: Could not enable interrupts" ", failed set, using polled mode.\n"); } smi_info->si_state = SI_NORMAL; break; } }}/* Called on timeouts and events. Timeouts should pass the elapsed time, interrupts should pass in zero. */static enum si_sm_result smi_event_handler(struct smi_info *smi_info, int time){ enum si_sm_result si_sm_result; restart: /* There used to be a loop here that waited a little while (around 25us) before giving up. That turned out to be pointless, the minimum delays I was seeing were in the 300us range, which is far too long to wait in an interrupt. So we just run until the state machine tells us something happened or it needs a delay. */ si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); time = 0; while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) { si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); } if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { spin_lock(&smi_info->count_lock); smi_info->complete_transactions++; spin_unlock(&smi_info->count_lock); handle_transaction_done(smi_info); si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); } else if (si_sm_result == SI_SM_HOSED) { spin_lock(&smi_info->count_lock); smi_info->hosed_count++; spin_unlock(&smi_info->count_lock); /* Do the before return_hosed_msg, because that releases the lock. */ smi_info->si_state = SI_NORMAL; if (smi_info->curr_msg != NULL) { /* If we were handling a user message, format a response to send to the upper layer to tell it about the error. */ return_hosed_msg(smi_info); } si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); } /* We prefer handling attn over new messages. */ if (si_sm_result == SI_SM_ATTN) { unsigned char msg[2]; spin_lock(&smi_info->count_lock); smi_info->attentions++; spin_unlock(&smi_info->count_lock); /* Got a attn, send down a get message flags to see what's causing it. It would be better to handle this in the upper layer, but due to the way interrupts work with the SMI, that's not really possible. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_MSG_FLAGS_CMD; smi_info->handlers->start_transaction( smi_info->si_sm, msg, 2); smi_info->si_state = SI_GETTING_FLAGS; goto restart; } /* If we are currently idle, try to start the next message. */ if (si_sm_result == SI_SM_IDLE) { spin_lock(&smi_info->count_lock); smi_info->idles++; spin_unlock(&smi_info->count_lock); si_sm_result = start_next_msg(smi_info); if (si_sm_result != SI_SM_IDLE) goto restart; } if ((si_sm_result == SI_SM_IDLE) && (atomic_read(&smi_info->req_events))) { /* We are idle and the upper layer requested that I fetch events, so do so. */ unsigned char msg[2]; spin_lock(&smi_info->count_lock); smi_info->flag_fetches++; spin_unlock(&smi_info->count_lock); atomic_set(&smi_info->req_events, 0); msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_MSG_FLAGS_CMD; smi_info->handlers->start_transaction( smi_info->si_sm, msg, 2); smi_info->si_state = SI_GETTING_FLAGS; goto restart; } return si_sm_result;}static void sender(void *send_info, struct ipmi_smi_msg *msg, int priority){ struct smi_info *smi_info = send_info; enum si_sm_result result; unsigned long flags;#ifdef DEBUG_TIMING struct timeval t;#endif spin_lock_irqsave(&(smi_info->msg_lock), flags);#ifdef DEBUG_TIMING do_gettimeofday(&t); printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);#endif if (smi_info->run_to_completion) { /* If we are running to completion, then throw it in the list and run transactions until everything is clear. Priority doesn't matter here. */ list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); /* We have to release the msg lock and claim the smi lock in this case, because of race conditions. */ spin_unlock_irqrestore(&(smi_info->msg_lock), flags); spin_lock_irqsave(&(smi_info->si_lock), flags); result = smi_event_handler(smi_info, 0); while (result != SI_SM_IDLE) { udelay(SI_SHORT_TIMEOUT_USEC); result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); } spin_unlock_irqrestore(&(smi_info->si_lock), flags); return; } else { if (priority > 0) { list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs)); } else { list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); } } spin_unlock_irqrestore(&(smi_info->msg_lock), flags); spin_lock_irqsave(&(smi_info->si_lock), flags); if ((smi_info->si_state == SI_NORMAL) && (smi_info->curr_msg == NULL)) { start_next_msg(smi_info); si_restart_short_timer(smi_info); } spin_unlock_irqrestore(&(smi_info->si_lock), flags);}static void set_run_to_completion(void *send_info, int i_run_to_completion){ struct smi_info *smi_info = send_info; enum si_sm_result result; unsigned long flags; spin_lock_irqsave(&(smi_info->si_lock), flags); smi_info->run_to_completion = i_run_to_completion; if (i_run_to_completion) { result = smi_event_handler(smi_info, 0); while (result != SI_SM_IDLE) { udelay(SI_SHORT_TIMEOUT_USEC); result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); } } spin_unlock_irqrestore(&(smi_info->si_lock), flags);}static int ipmi_thread(void *data){ struct smi_info *smi_info = data; unsigned long flags; enum si_sm_result smi_result; set_user_nice(current, 19); while (!kthread_should_stop()) { spin_lock_irqsave(&(smi_info->si_lock), flags); smi_result=smi_event_handler(smi_info, 0); spin_unlock_irqrestore(&(smi_info->si_lock), flags); if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { /* do nothing */ } else if (smi_result == SI_SM_CALL_WITH_DELAY) udelay(1); else schedule_timeout_interruptible(1); } return 0;}static void poll(void *send_info){ struct smi_info *smi_info = send_info; smi_event_handler(smi_info, 0);}static void request_events(void *send_info){ struct smi_info *smi_info = send_info; atomic_set(&smi_info->req_events, 1);}static int initialized = 0;/* Must be called with interrupts off and with the si_lock held. */static void si_restart_short_timer(struct smi_info *smi_info){#if defined(CONFIG_HIGH_RES_TIMERS) unsigned long flags; unsigned long jiffies_now; unsigned long seq; if (del_timer(&(smi_info->si_timer))) { /* If we don't delete the timer, then it will go off immediately, anyway. So we only process if we actually delete the timer. */ do { seq = read_seqbegin_irqsave(&xtime_lock, flags); jiffies_now = jiffies; smi_info->si_timer.expires = jiffies_now; smi_info->si_timer.arch_cycle_expires = get_arch_cycles(jiffies_now); } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); add_timer(&(smi_info->si_timer)); spin_lock_irqsave(&smi_info->count_lock, flags); smi_info->timeout_restarts++; spin_unlock_irqrestore(&smi_info->count_lock, flags); }#endif}static void smi_timeout(unsigned long data){ struct smi_info *smi_info = (struct smi_info *) data; enum si_sm_result smi_result; unsigned long flags; unsigned long jiffies_now; long time_diff;#ifdef DEBUG_TIMING struct timeval t;#endif if (atomic_read(&smi_info->stop_operation)) return; spin_lock_irqsave(&(smi_info->si_lock), flags);#ifdef DEBUG_TIMING do_gettimeofday(&t); printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);#endif jiffies_now = jiffies; time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); smi_result = smi_event_handler(smi_info, time_diff); spin_unlock_irqrestore(&(smi_info->si_lock), flags); smi_info->last_timeout_jiffies = jiffies_now; if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { /* Running with interrupts, only do long timeouts. */ smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; spin_lock_irqsave(&smi_info->count_lock, flags); smi_info->long_timeouts++; spin_unlock_irqrestore(&smi_info->count_lock, flags); goto do_add_timer; } /* If the state machine asks for a short delay, then shorten the timer timeout. */ if (smi_result == SI_SM_CALL_WITH_DELAY) {#if defined(CONFIG_HIGH_RES_TIMERS) unsigned long seq;#endif spin_lock_irqsave(&smi_info->count_lock, flags); smi_info->short_timeouts++; spin_unlock_irqrestore(&smi_info->count_lock, flags);#if defined(CONFIG_HIGH_RES_TIMERS) do { seq = read_seqbegin_irqsave(&xtime_lock, flags); smi_info->si_timer.expires = jiffies; smi_info->si_timer.arch_cycle_expires = get_arch_cycles(smi_info->si_timer.expires); } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);#else smi_info->si_timer.expires = jiffies + 1;#endif } else { spin_lock_irqsave(&smi_info->count_lock, flags); smi_info->long_timeouts++; spin_unlock_irqrestore(&smi_info->count_lock, flags); smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;#if defined(CONFIG_HIGH_RES_TIMERS) smi_info->si_timer.arch_cycle_expires = 0;#endif } do_add_timer: add_timer(&(smi_info->si_timer));}static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs){ struct smi_info *smi_info = data; unsigned long flags;#ifdef DEBUG_TIMING struct timeval t;#endif spin_lock_irqsave(&(smi_info->si_lock), flags); spin_lock(&smi_info->count_lock); smi_info->interrupts++; spin_unlock(&smi_info->count_lock); if (atomic_read(&smi_info->stop_operation)) goto out;#ifdef DEBUG_TIMING do_gettimeofday(&t); printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);#endif smi_event_handler(smi_info, 0); out: spin_unlock_irqrestore(&(smi_info->si_lock), flags); return IRQ_HANDLED;}static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs){ struct smi_info *smi_info = data; /* We need to clear the IRQ flag for the BT interface. */ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_CLEAR_IRQ_BIT
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -