📄 workqueue.c
字号:
static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, struct work_struct *work){ struct wq_barrier barr; int running = 0; spin_lock_irq(&cwq->lock); if (unlikely(cwq->current_work == work)) { insert_wq_barrier(cwq, &barr, 0); running = 1; } spin_unlock_irq(&cwq->lock); if (unlikely(running)) wait_for_completion(&barr.done);}static void wait_on_work(struct work_struct *work){ struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; const cpumask_t *cpu_map; int cpu; might_sleep(); cwq = get_wq_data(work); if (!cwq) return; wq = cwq->wq; cpu_map = wq_cpu_map(wq); for_each_cpu_mask(cpu, *cpu_map) wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);}/** * cancel_work_sync - block until a work_struct's callback has terminated * @work: the work which is to be flushed * * cancel_work_sync() will cancel the work if it is queued. If the work's * callback appears to be running, cancel_work_sync() will block until it * has completed. * * It is possible to use this function if the work re-queues itself. It can * cancel the work even if it migrates to another workqueue, however in that * case it only guarantees that work->func() has completed on the last queued * workqueue. * * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not * pending, otherwise it goes into a busy-wait loop until the timer expires. * * The caller must ensure that workqueue_struct on which this work was last * queued can't be destroyed before this function returns. */void cancel_work_sync(struct work_struct *work){ while (!try_to_grab_pending(work)) cpu_relax(); wait_on_work(work); work_clear_pending(work);}EXPORT_SYMBOL_GPL(cancel_work_sync);/** * cancel_rearming_delayed_work - reliably kill off a delayed work. * @dwork: the delayed work struct * * It is possible to use this function if @dwork rearms itself via queue_work() * or queue_delayed_work(). See also the comment for cancel_work_sync(). */void cancel_rearming_delayed_work(struct delayed_work *dwork){ while (!del_timer(&dwork->timer) && !try_to_grab_pending(&dwork->work)) cpu_relax(); wait_on_work(&dwork->work); work_clear_pending(&dwork->work);}EXPORT_SYMBOL(cancel_rearming_delayed_work);static struct workqueue_struct *keventd_wq __read_mostly;/** * schedule_work - put work task in global workqueue * @work: job to be done * * This puts a job in the kernel-global workqueue. */int fastcall schedule_work(struct work_struct *work){ return queue_work(keventd_wq, work);}EXPORT_SYMBOL(schedule_work);/** * schedule_delayed_work - put work task in global workqueue after delay * @dwork: job to be done * @delay: number of jiffies to wait or 0 for immediate execution * * After waiting for a given time this puts a job in the kernel-global * workqueue. */int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay){ timer_stats_timer_set_start_info(&dwork->timer); return queue_delayed_work(keventd_wq, dwork, delay);}EXPORT_SYMBOL(schedule_delayed_work);/** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use * @dwork: job to be done * @delay: number of jiffies to wait * * After waiting for a given time this puts a job in the kernel-global * workqueue on the specified CPU. */int schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay){ return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);}EXPORT_SYMBOL(schedule_delayed_work_on);/** * schedule_on_each_cpu - call a function on each online CPU from keventd * @func: the function to call * * Returns zero on success. * Returns -ve errno on failure. * * Appears to be racy against CPU hotplug. * * schedule_on_each_cpu() is very slow. */int schedule_on_each_cpu(work_func_t func){ int cpu; struct work_struct *works; works = alloc_percpu(struct work_struct); if (!works) return -ENOMEM; preempt_disable(); /* CPU hotplug */ for_each_online_cpu(cpu) { struct work_struct *work = per_cpu_ptr(works, cpu); INIT_WORK(work, func); set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); } preempt_enable(); flush_workqueue(keventd_wq); free_percpu(works); return 0;}void flush_scheduled_work(void){ flush_workqueue(keventd_wq);}EXPORT_SYMBOL(flush_scheduled_work);/** * execute_in_process_context - reliably execute the routine with user context * @fn: the function to execute * @ew: guaranteed storage for the execute work structure (must * be available when the work executes) * * Executes the function immediately if process context is available, * otherwise schedules the function for delayed execution. * * Returns: 0 - function was executed * 1 - function was scheduled for execution */int execute_in_process_context(work_func_t fn, struct execute_work *ew){ if (!in_interrupt()) { fn(&ew->work); return 0; } INIT_WORK(&ew->work, fn); schedule_work(&ew->work); return 1;}EXPORT_SYMBOL_GPL(execute_in_process_context);int keventd_up(void){ return keventd_wq != NULL;}int current_is_keventd(void){ struct cpu_workqueue_struct *cwq; int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */ int ret = 0; BUG_ON(!keventd_wq); cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); if (current == cwq->thread) ret = 1; return ret;}static struct cpu_workqueue_struct *init_cpu_workqueue(struct workqueue_struct *wq, int cpu){ struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); cwq->wq = wq; spin_lock_init(&cwq->lock); INIT_LIST_HEAD(&cwq->worklist); init_waitqueue_head(&cwq->more_work); return cwq;}static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu){ struct workqueue_struct *wq = cwq->wq; const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; struct task_struct *p; p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); /* * Nobody can add the work_struct to this cwq, * if (caller is __create_workqueue) * nobody should see this wq * else // caller is CPU_UP_PREPARE * cpu is not on cpu_online_map * so we can abort safely. */ if (IS_ERR(p)) return PTR_ERR(p); cwq->thread = p; return 0;}static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu){ struct task_struct *p = cwq->thread; if (p != NULL) { if (cpu >= 0) kthread_bind(p, cpu); wake_up_process(p); }}struct workqueue_struct *__create_workqueue(const char *name, int singlethread, int freezeable){ struct workqueue_struct *wq; struct cpu_workqueue_struct *cwq; int err = 0, cpu; wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) return NULL; wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); if (!wq->cpu_wq) { kfree(wq); return NULL; } wq->name = name; wq->singlethread = singlethread; wq->freezeable = freezeable; INIT_LIST_HEAD(&wq->list); if (singlethread) { cwq = init_cpu_workqueue(wq, singlethread_cpu); err = create_workqueue_thread(cwq, singlethread_cpu); start_workqueue_thread(cwq, -1); } else { mutex_lock(&workqueue_mutex); list_add(&wq->list, &workqueues); for_each_possible_cpu(cpu) { cwq = init_cpu_workqueue(wq, cpu); if (err || !cpu_online(cpu)) continue; err = create_workqueue_thread(cwq, cpu); start_workqueue_thread(cwq, cpu); } mutex_unlock(&workqueue_mutex); } if (err) { destroy_workqueue(wq); wq = NULL; } return wq;}EXPORT_SYMBOL_GPL(__create_workqueue);static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu){ /* * Our caller is either destroy_workqueue() or CPU_DEAD, * workqueue_mutex protects cwq->thread */ if (cwq->thread == NULL) return; /* * If the caller is CPU_DEAD the single flush_cpu_workqueue() * is not enough, a concurrent flush_workqueue() can insert a * barrier after us. * When ->worklist becomes empty it is safe to exit because no * more work_structs can be queued on this cwq: flush_workqueue * checks list_empty(), and a "normal" queue_work() can't use * a dead CPU. */ while (flush_cpu_workqueue(cwq)) ; kthread_stop(cwq->thread); cwq->thread = NULL;}/** * destroy_workqueue - safely terminate a workqueue * @wq: target workqueue * * Safely destroy a workqueue. All work currently pending will be done first. */void destroy_workqueue(struct workqueue_struct *wq){ const cpumask_t *cpu_map = wq_cpu_map(wq); struct cpu_workqueue_struct *cwq; int cpu; mutex_lock(&workqueue_mutex); list_del(&wq->list); mutex_unlock(&workqueue_mutex); for_each_cpu_mask(cpu, *cpu_map) { cwq = per_cpu_ptr(wq->cpu_wq, cpu); cleanup_workqueue_thread(cwq, cpu); } free_percpu(wq->cpu_wq); kfree(wq);}EXPORT_SYMBOL_GPL(destroy_workqueue);static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu){ unsigned int cpu = (unsigned long)hcpu; struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; action &= ~CPU_TASKS_FROZEN; switch (action) { case CPU_LOCK_ACQUIRE: mutex_lock(&workqueue_mutex); return NOTIFY_OK; case CPU_LOCK_RELEASE: mutex_unlock(&workqueue_mutex); return NOTIFY_OK; case CPU_UP_PREPARE: cpu_set(cpu, cpu_populated_map); } list_for_each_entry(wq, &workqueues, list) { cwq = per_cpu_ptr(wq->cpu_wq, cpu); switch (action) { case CPU_UP_PREPARE: if (!create_workqueue_thread(cwq, cpu)) break; printk(KERN_ERR "workqueue for %i failed\n", cpu); return NOTIFY_BAD; case CPU_ONLINE: start_workqueue_thread(cwq, cpu); break; case CPU_UP_CANCELED: start_workqueue_thread(cwq, -1); case CPU_DEAD: cleanup_workqueue_thread(cwq, cpu); break; } } return NOTIFY_OK;}void __init init_workqueues(void){ cpu_populated_map = cpu_online_map; singlethread_cpu = first_cpu(cpu_possible_map); cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -