⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ehca_irq.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 2 页
字号:
	return;}irqreturn_t ehca_interrupt_eq(int irq, void *dev_id){	struct ehca_shca *shca = (struct ehca_shca*)dev_id;	tasklet_hi_schedule(&shca->eq.interrupt_task);	return IRQ_HANDLED;}static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe){	u64 eqe_value;	u32 token;	struct ehca_cq *cq;	eqe_value = eqe->entry;	ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);	if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {		ehca_dbg(&shca->ib_device, "Got completion event");		token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);		read_lock(&ehca_cq_idr_lock);		cq = idr_find(&ehca_cq_idr, token);		if (cq)			atomic_inc(&cq->nr_events);		read_unlock(&ehca_cq_idr_lock);		if (cq == NULL) {			ehca_err(&shca->ib_device,				 "Invalid eqe for non-existing cq token=%x",				 token);			return;		}		reset_eq_pending(cq);		if (ehca_scaling_code)			queue_comp_task(cq);		else {			comp_event_callback(cq);			if (atomic_dec_and_test(&cq->nr_events))				wake_up(&cq->wait_completion);		}	} else {		ehca_dbg(&shca->ib_device, "Got non completion event");		parse_identifier(shca, eqe_value);	}}void ehca_process_eq(struct ehca_shca *shca, int is_irq){	struct ehca_eq *eq = &shca->eq;	struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;	u64 eqe_value;	unsigned long flags;	int eqe_cnt, i;	int eq_empty = 0;	spin_lock_irqsave(&eq->irq_spinlock, flags);	if (is_irq) {		const int max_query_cnt = 100;		int query_cnt = 0;		int int_state = 1;		do {			int_state = hipz_h_query_int_state(				shca->ipz_hca_handle, eq->ist);			query_cnt++;			iosync();		} while (int_state && query_cnt < max_query_cnt);		if (unlikely((query_cnt == max_query_cnt)))			ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",				 int_state, query_cnt);	}	/* read out all eqes */	eqe_cnt = 0;	do {		u32 token;		eqe_cache[eqe_cnt].eqe =			(struct ehca_eqe *)ehca_poll_eq(shca, eq);		if (!eqe_cache[eqe_cnt].eqe)			break;		eqe_value = eqe_cache[eqe_cnt].eqe->entry;		if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {			token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);			read_lock(&ehca_cq_idr_lock);			eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);			if (eqe_cache[eqe_cnt].cq)				atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);			read_unlock(&ehca_cq_idr_lock);			if (!eqe_cache[eqe_cnt].cq) {				ehca_err(&shca->ib_device,					 "Invalid eqe for non-existing cq "					 "token=%x", token);				continue;			}		} else			eqe_cache[eqe_cnt].cq = NULL;		eqe_cnt++;	} while (eqe_cnt < EHCA_EQE_CACHE_SIZE);	if (!eqe_cnt) {		if (is_irq)			ehca_dbg(&shca->ib_device,				 "No eqe found for irq event");		goto unlock_irq_spinlock;	} else if (!is_irq)		ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);	if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))		ehca_dbg(&shca->ib_device, "too many eqes for one irq event");	/* enable irq for new packets */	for (i = 0; i < eqe_cnt; i++) {		if (eq->eqe_cache[i].cq)			reset_eq_pending(eq->eqe_cache[i].cq);	}	/* check eq */	spin_lock(&eq->spinlock);	eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));	spin_unlock(&eq->spinlock);	/* call completion handler for cached eqes */	for (i = 0; i < eqe_cnt; i++)		if (eq->eqe_cache[i].cq) {			if (ehca_scaling_code)				queue_comp_task(eq->eqe_cache[i].cq);			else {				struct ehca_cq *cq = eq->eqe_cache[i].cq;				comp_event_callback(cq);				if (atomic_dec_and_test(&cq->nr_events))					wake_up(&cq->wait_completion);			}		} else {			ehca_dbg(&shca->ib_device, "Got non completion event");			parse_identifier(shca, eq->eqe_cache[i].eqe->entry);		}	/* poll eq if not empty */	if (eq_empty)		goto unlock_irq_spinlock;	do {		struct ehca_eqe *eqe;		eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);		if (!eqe)			break;		process_eqe(shca, eqe);	} while (1);unlock_irq_spinlock:	spin_unlock_irqrestore(&eq->irq_spinlock, flags);}void ehca_tasklet_eq(unsigned long data){	ehca_process_eq((struct ehca_shca*)data, 1);}static inline int find_next_online_cpu(struct ehca_comp_pool *pool){	int cpu;	unsigned long flags;	WARN_ON_ONCE(!in_interrupt());	if (ehca_debug_level)		ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");	spin_lock_irqsave(&pool->last_cpu_lock, flags);	cpu = next_cpu(pool->last_cpu, cpu_online_map);	if (cpu == NR_CPUS)		cpu = first_cpu(cpu_online_map);	pool->last_cpu = cpu;	spin_unlock_irqrestore(&pool->last_cpu_lock, flags);	return cpu;}static void __queue_comp_task(struct ehca_cq *__cq,			      struct ehca_cpu_comp_task *cct){	unsigned long flags;	spin_lock_irqsave(&cct->task_lock, flags);	spin_lock(&__cq->task_lock);	if (__cq->nr_callbacks == 0) {		__cq->nr_callbacks++;		list_add_tail(&__cq->entry, &cct->cq_list);		cct->cq_jobs++;		wake_up(&cct->wait_queue);	} else		__cq->nr_callbacks++;	spin_unlock(&__cq->task_lock);	spin_unlock_irqrestore(&cct->task_lock, flags);}static void queue_comp_task(struct ehca_cq *__cq){	int cpu_id;	struct ehca_cpu_comp_task *cct;	int cq_jobs;	unsigned long flags;	cpu_id = find_next_online_cpu(pool);	BUG_ON(!cpu_online(cpu_id));	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);	BUG_ON(!cct);	spin_lock_irqsave(&cct->task_lock, flags);	cq_jobs = cct->cq_jobs;	spin_unlock_irqrestore(&cct->task_lock, flags);	if (cq_jobs > 0) {		cpu_id = find_next_online_cpu(pool);		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);		BUG_ON(!cct);	}	__queue_comp_task(__cq, cct);}static void run_comp_task(struct ehca_cpu_comp_task *cct){	struct ehca_cq *cq;	unsigned long flags;	spin_lock_irqsave(&cct->task_lock, flags);	while (!list_empty(&cct->cq_list)) {		cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);		spin_unlock_irqrestore(&cct->task_lock, flags);		comp_event_callback(cq);		if (atomic_dec_and_test(&cq->nr_events))			wake_up(&cq->wait_completion);		spin_lock_irqsave(&cct->task_lock, flags);		spin_lock(&cq->task_lock);		cq->nr_callbacks--;		if (!cq->nr_callbacks) {			list_del_init(cct->cq_list.next);			cct->cq_jobs--;		}		spin_unlock(&cq->task_lock);	}	spin_unlock_irqrestore(&cct->task_lock, flags);}static int comp_task(void *__cct){	struct ehca_cpu_comp_task *cct = __cct;	int cql_empty;	DECLARE_WAITQUEUE(wait, current);	set_current_state(TASK_INTERRUPTIBLE);	while (!kthread_should_stop()) {		add_wait_queue(&cct->wait_queue, &wait);		spin_lock_irq(&cct->task_lock);		cql_empty = list_empty(&cct->cq_list);		spin_unlock_irq(&cct->task_lock);		if (cql_empty)			schedule();		else			__set_current_state(TASK_RUNNING);		remove_wait_queue(&cct->wait_queue, &wait);		spin_lock_irq(&cct->task_lock);		cql_empty = list_empty(&cct->cq_list);		spin_unlock_irq(&cct->task_lock);		if (!cql_empty)			run_comp_task(__cct);		set_current_state(TASK_INTERRUPTIBLE);	}	__set_current_state(TASK_RUNNING);	return 0;}static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,					    int cpu){	struct ehca_cpu_comp_task *cct;	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);	spin_lock_init(&cct->task_lock);	INIT_LIST_HEAD(&cct->cq_list);	init_waitqueue_head(&cct->wait_queue);	cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);	return cct->task;}static void destroy_comp_task(struct ehca_comp_pool *pool,			      int cpu){	struct ehca_cpu_comp_task *cct;	struct task_struct *task;	unsigned long flags_cct;	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);	spin_lock_irqsave(&cct->task_lock, flags_cct);	task = cct->task;	cct->task = NULL;	cct->cq_jobs = 0;	spin_unlock_irqrestore(&cct->task_lock, flags_cct);	if (task)		kthread_stop(task);}static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu){	struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);	LIST_HEAD(list);	struct ehca_cq *cq;	unsigned long flags_cct;	spin_lock_irqsave(&cct->task_lock, flags_cct);	list_splice_init(&cct->cq_list, &list);	while (!list_empty(&list)) {		cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);		list_del(&cq->entry);		__queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,						  smp_processor_id()));	}	spin_unlock_irqrestore(&cct->task_lock, flags_cct);}static int __cpuinit comp_pool_callback(struct notifier_block *nfb,					unsigned long action,					void *hcpu){	unsigned int cpu = (unsigned long)hcpu;	struct ehca_cpu_comp_task *cct;	switch (action) {	case CPU_UP_PREPARE:	case CPU_UP_PREPARE_FROZEN:		ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);		if (!create_comp_task(pool, cpu)) {			ehca_gen_err("Can't create comp_task for cpu: %x", cpu);			return NOTIFY_BAD;		}		break;	case CPU_UP_CANCELED:	case CPU_UP_CANCELED_FROZEN:		ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);		kthread_bind(cct->task, any_online_cpu(cpu_online_map));		destroy_comp_task(pool, cpu);		break;	case CPU_ONLINE:	case CPU_ONLINE_FROZEN:		ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);		kthread_bind(cct->task, cpu);		wake_up_process(cct->task);		break;	case CPU_DOWN_PREPARE:	case CPU_DOWN_PREPARE_FROZEN:		ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);		break;	case CPU_DOWN_FAILED:	case CPU_DOWN_FAILED_FROZEN:		ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);		break;	case CPU_DEAD:	case CPU_DEAD_FROZEN:		ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);		destroy_comp_task(pool, cpu);		take_over_work(pool, cpu);		break;	}	return NOTIFY_OK;}static struct notifier_block comp_pool_callback_nb __cpuinitdata = {	.notifier_call	= comp_pool_callback,	.priority	= 0,};int ehca_create_comp_pool(void){	int cpu;	struct task_struct *task;	if (!ehca_scaling_code)		return 0;	pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);	if (pool == NULL)		return -ENOMEM;	spin_lock_init(&pool->last_cpu_lock);	pool->last_cpu = any_online_cpu(cpu_online_map);	pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);	if (pool->cpu_comp_tasks == NULL) {		kfree(pool);		return -EINVAL;	}	for_each_online_cpu(cpu) {		task = create_comp_task(pool, cpu);		if (task) {			kthread_bind(task, cpu);			wake_up_process(task);		}	}	register_hotcpu_notifier(&comp_pool_callback_nb);	printk(KERN_INFO "eHCA scaling code enabled\n");	return 0;}void ehca_destroy_comp_pool(void){	int i;	if (!ehca_scaling_code)		return;	unregister_hotcpu_notifier(&comp_pool_callback_nb);	for (i = 0; i < NR_CPUS; i++) {		if (cpu_online(i))			destroy_comp_task(pool, i);	}	free_percpu(pool->cpu_comp_tasks);	kfree(pool);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -