⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ipmi_kcs_intf.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 3 页
字号:
	case KCS_ENABLE_INTERRUPTS1:	{		unsigned char msg[4];		/* We got the flags from the KCS, now handle them. */		kcs_get_result(kcs_info->kcs_sm, msg, 4);		if (msg[2] != 0) {			printk(KERN_WARNING			       "ipmi_kcs: Could not enable interrupts"			       ", failed get, using polled mode.\n");			kcs_info->kcs_state = KCS_NORMAL;		} else {			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);			msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;			msg[2] = msg[3] | 1; /* enable msg queue int */			start_kcs_transaction(kcs_info->kcs_sm, msg,3);			kcs_info->kcs_state = KCS_ENABLE_INTERRUPTS2;		}		break;	}	case KCS_ENABLE_INTERRUPTS2:	{		unsigned char msg[4];		/* We got the flags from the KCS, now handle them. */		kcs_get_result(kcs_info->kcs_sm, msg, 4);		if (msg[2] != 0) {			printk(KERN_WARNING			       "ipmi_kcs: Could not enable interrupts"			       ", failed set, using polled mode.\n");		}		kcs_info->kcs_state = KCS_NORMAL;		break;	}	}}/* Called on timeouts and events.  Timeouts should pass the elapsed   time, interrupts should pass in zero. */static enum kcs_result kcs_event_handler(struct kcs_info *kcs_info, int time){	enum kcs_result kcs_result; restart:	/* There used to be a loop here that waited a little while	   (around 25us) before giving up.  That turned out to be	   pointless, the minimum delays I was seeing were in the 300us	   range, which is far too long to wait in an interrupt.  So	   we just run until the state machine tells us something	   happened or it needs a delay. */	kcs_result = kcs_event(kcs_info->kcs_sm, time);	time = 0;	while (kcs_result == KCS_CALL_WITHOUT_DELAY)	{		kcs_result = kcs_event(kcs_info->kcs_sm, 0);	}	if (kcs_result == KCS_TRANSACTION_COMPLETE)	{		handle_transaction_done(kcs_info);		kcs_result = kcs_event(kcs_info->kcs_sm, 0);	}	else if (kcs_result == KCS_SM_HOSED)	{		if (kcs_info->curr_msg != NULL) {			/* If we were handling a user message, format                           a response to send to the upper layer to                           tell it about the error. */			return_hosed_msg(kcs_info);		}		kcs_result = kcs_event(kcs_info->kcs_sm, 0);		kcs_info->kcs_state = KCS_NORMAL;	}	/* We prefer handling attn over new messages. */	if (kcs_result == KCS_ATTN)	{		unsigned char msg[2];		/* Got a attn, send down a get message flags to see                   what's causing it.  It would be better to handle                   this in the upper layer, but due to the way                   interrupts work with the KCS, that's not really                   possible. */		msg[0] = (IPMI_NETFN_APP_REQUEST << 2);		msg[1] = IPMI_GET_MSG_FLAGS_CMD;		start_kcs_transaction(kcs_info->kcs_sm, msg, 2);		kcs_info->kcs_state = KCS_GETTING_FLAGS;		goto restart;	}	/* If we are currently idle, try to start the next message. */	if (kcs_result == KCS_SM_IDLE) {		kcs_result = start_next_msg(kcs_info);		if (kcs_result != KCS_SM_IDLE)			goto restart;        }	if ((kcs_result == KCS_SM_IDLE)	    && (atomic_read(&kcs_info->req_events)))	{		/* We are idle and the upper layer requested that I fetch		   events, so do so. */		unsigned char msg[2];		atomic_set(&kcs_info->req_events, 0);		msg[0] = (IPMI_NETFN_APP_REQUEST << 2);		msg[1] = IPMI_GET_MSG_FLAGS_CMD;		start_kcs_transaction(kcs_info->kcs_sm, msg, 2);		kcs_info->kcs_state = KCS_GETTING_FLAGS;		goto restart;	}	return kcs_result;}static void sender(void                *send_info,		   struct ipmi_smi_msg *msg,		   int                 priority){	struct kcs_info *kcs_info = (struct kcs_info *) send_info;	enum kcs_result result;	unsigned long   flags;#ifdef DEBUG_TIMING	struct timeval t;#endif	spin_lock_irqsave(&(kcs_info->msg_lock), flags);#ifdef DEBUG_TIMING	do_gettimeofday(&t);	printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);#endif	if (kcs_info->run_to_completion) {		/* If we are running to completion, then throw it in		   the list and run transactions until everything is		   clear.  Priority doesn't matter here. */		list_add_tail(&(msg->link), &(kcs_info->xmit_msgs));		/* We have to release the msg lock and claim the kcs		   lock in this case, because of race conditions. */		spin_unlock_irqrestore(&(kcs_info->msg_lock), flags);		spin_lock_irqsave(&(kcs_info->kcs_lock), flags);		result = kcs_event_handler(kcs_info, 0);		while (result != KCS_SM_IDLE) {			udelay(500);			result = kcs_event_handler(kcs_info, 500);		}		spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);		return;	} else {		if (priority > 0) {			list_add_tail(&(msg->link), &(kcs_info->hp_xmit_msgs));		} else {			list_add_tail(&(msg->link), &(kcs_info->xmit_msgs));		}	}	spin_unlock_irqrestore(&(kcs_info->msg_lock), flags);	spin_lock_irqsave(&(kcs_info->kcs_lock), flags);	if ((kcs_info->kcs_state == KCS_NORMAL)	    && (kcs_info->curr_msg == NULL))	{		start_next_msg(kcs_info);	}	spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);}static void set_run_to_completion(void *send_info, int i_run_to_completion){	struct kcs_info *kcs_info = (struct kcs_info *) send_info;	enum kcs_result result;	unsigned long   flags;	spin_lock_irqsave(&(kcs_info->kcs_lock), flags);	kcs_info->run_to_completion = i_run_to_completion;	if (i_run_to_completion) {		result = kcs_event_handler(kcs_info, 0);		while (result != KCS_SM_IDLE) {			udelay(500);			result = kcs_event_handler(kcs_info, 500);		}	}	spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);}static void request_events(void *send_info){	struct kcs_info *kcs_info = (struct kcs_info *) send_info;	atomic_set(&kcs_info->req_events, 1);}static int new_user(void *send_info){	if (!try_inc_mod_count(THIS_MODULE))		return -EBUSY;	return 0;}static void user_left(void *send_info){	MOD_DEC_USE_COUNT;}/* Call every 10 ms. */#define KCS_TIMEOUT_TIME_USEC	10000#define KCS_USEC_PER_JIFFY	(1000000/HZ)#define KCS_TIMEOUT_JIFFIES	(KCS_TIMEOUT_TIME_USEC/KCS_USEC_PER_JIFFY)#define KCS_SHORT_TIMEOUT_USEC  500 /* .5ms when the SM request a                                       short timeout */static int initialized = 0;static void kcs_timeout(unsigned long data){	struct kcs_info *kcs_info = (struct kcs_info *) data;	enum kcs_result kcs_result;	unsigned long   flags;	unsigned long   jiffies_now;	unsigned long   time_diff;#ifdef DEBUG_TIMING	struct timeval t;#endif	if (kcs_info->stop_operation) {		kcs_info->timer_stopped = 1;		return;	}	spin_lock_irqsave(&(kcs_info->kcs_lock), flags);#ifdef DEBUG_TIMING	do_gettimeofday(&t);	printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);#endif	jiffies_now = jiffies;	time_diff = ((jiffies_now - kcs_info->last_timeout_jiffies)		     * KCS_USEC_PER_JIFFY);	kcs_result = kcs_event_handler(kcs_info, time_diff);	spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);	kcs_info->last_timeout_jiffies = jiffies_now;	if ((kcs_info->irq) && (! kcs_info->interrupt_disabled)) {		/* Running with interrupts, only do long timeouts. */		kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;		goto do_add_timer;	}	/* If the state machine asks for a short delay, then shorten           the timer timeout. */#ifdef CONFIG_HIGH_RES_TIMERS	if (kcs_result == KCS_CALL_WITH_DELAY) {		kcs_info->kcs_timer.sub_expires			+= usec_to_arch_cycles(KCS_SHORT_TIMEOUT_USEC);		while (kcs_info->kcs_timer.sub_expires >= cycles_per_jiffies) {			kcs_info->kcs_timer.expires++;			kcs_info->kcs_timer.sub_expires -= cycles_per_jiffies;		}	} else {		kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;	}#else	/* If requested, take the shortest delay possible */	if (kcs_result == KCS_CALL_WITH_DELAY) {		kcs_info->kcs_timer.expires = jiffies + 1;	} else {		kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;	}#endif do_add_timer:	add_timer(&(kcs_info->kcs_timer));}static void kcs_irq_handler(int irq, void *data, struct pt_regs *regs){	struct kcs_info *kcs_info = (struct kcs_info *) data;	unsigned long   flags;#ifdef DEBUG_TIMING	struct timeval t;#endif	spin_lock_irqsave(&(kcs_info->kcs_lock), flags);	if (kcs_info->stop_operation)		goto out;#ifdef DEBUG_TIMING	do_gettimeofday(&t);	printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);#endif	kcs_event_handler(kcs_info, 0); out:	spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);}static struct ipmi_smi_handlers handlers ={	sender:		       sender,	request_events:        request_events,	new_user:	       new_user,	user_left:	       user_left,	set_run_to_completion: set_run_to_completion};static unsigned char ipmi_kcs_dev_rev;static unsigned char ipmi_kcs_fw_rev_major;static unsigned char ipmi_kcs_fw_rev_minor;static unsigned char ipmi_version_major;static unsigned char ipmi_version_minor;extern int kcs_dbg;static int ipmi_kcs_detect_hardware(unsigned int port,				    unsigned char *addr,				    struct kcs_data *data){	unsigned char   msg[2];	unsigned char   resp[IPMI_MAX_MSG_LENGTH];	unsigned long   resp_len;	enum kcs_result kcs_result;	/* It's impossible for the KCS status register to be all 1's,	   (assuming a properly functioning, self-initialized BMC)	   but that's what you get from reading a bogus address, so we	   test that first. */	if (port) {		if (inb(port+1) == 0xff) return -ENODEV; 	} else { 		if (readb(addr+1) == 0xff) return -ENODEV; 	}	/* Do a Get Device ID command, since it comes back with some	   useful info. */	msg[0] = IPMI_NETFN_APP_REQUEST << 2;	msg[1] = IPMI_GET_DEVICE_ID_CMD;	start_kcs_transaction(data, msg, 2);		kcs_result = kcs_event(data, 0);	for (;;)	{		if (kcs_result == KCS_CALL_WITH_DELAY) {			udelay(100);			kcs_result = kcs_event(data, 100);		}		else if (kcs_result == KCS_CALL_WITHOUT_DELAY)		{			kcs_result = kcs_event(data, 0);		}		else			break;	}	if (kcs_result == KCS_SM_HOSED) {		/* We couldn't get the state machine to run, so whatever's at		   the port is probably not an IPMI KCS interface. */		return -ENODEV;	}	/* Otherwise, we got some data. */	resp_len = kcs_get_result(data, resp, IPMI_MAX_MSG_LENGTH);	if (resp_len < 6)		/* That's odd, it should be longer. */		return -EINVAL;		if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0))		/* That's odd, it shouldn't be able to fail. */		return -EINVAL;		ipmi_kcs_dev_rev = resp[4] & 0xf;	ipmi_kcs_fw_rev_major = resp[5] & 0x7f;	ipmi_kcs_fw_rev_minor = resp[6];	ipmi_version_major = resp[7] & 0xf;	ipmi_version_minor = resp[7] >> 4;	return 0;}/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,   a default IO port, and 1 ACPI/SPMI address.  That sets KCS_MAX_DRIVERS */#define KCS_MAX_PARMS 4#define KCS_MAX_DRIVERS ((KCS_MAX_PARMS * 2) + 2)static struct kcs_info *kcs_infos[KCS_MAX_DRIVERS] ={ NULL, NULL, NULL, NULL };#define DEVICE_NAME "ipmi_kcs"#define DEFAULT_IO_PORT 0xca2static int kcs_trydefaults = 1;static unsigned long kcs_addrs[KCS_MAX_PARMS] = { 0, 0, 0, 0 };static int kcs_ports[KCS_MAX_PARMS] = { 0, 0, 0, 0 };static int kcs_irqs[KCS_MAX_PARMS] = { 0, 0, 0, 0 };MODULE_PARM(kcs_trydefaults, "i");MODULE_PARM(kcs_addrs, "1-4l");MODULE_PARM(kcs_irqs, "1-4i");MODULE_PARM(kcs_ports, "1-4i");/* Returns 0 if initialized, or negative on an error. */static int init_one_kcs(int kcs_port, 			int irq, 			unsigned long kcs_physaddr,			struct kcs_info **kcs){	int		rv;	struct kcs_info *new_kcs;	/* Did anything get passed in at all?  Both == zero disables the	   driver. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -