mirror of
https://github.com/torvalds/linux.git
synced 2025-11-01 17:18:25 +02:00
workqueues: implement flush_work()
Most of users of flush_workqueue() can be changed to use cancel_work_sync(),
but sometimes we really need to wait for the completion and cancelling is not
an option. schedule_on_each_cpu() is good example.
Add the new helper, flush_work(work), which waits for the completion of the
specific work_struct. More precisely, it "flushes" the result of of the last
queue_work() which is visible to the caller.
For example, this code
queue_work(wq, work);
/* WINDOW */
queue_work(wq, work);
flush_work(work);
doesn't necessary work "as expected". What can happen in the WINDOW above is
- wq starts the execution of work->func()
- the caller migrates to another CPU
now, after the 2nd queue_work() this work is active on the previous CPU, and
at the same time it is queued on another. In this case flush_work(work) may
return before the first work->func() completes.
It is trivial to add another helper
int flush_work_sync(struct work_struct *work)
{
return flush_work(work) || wait_on_work(work);
}
which works "more correctly", but it has to iterate over all CPUs and thus
it much slower than flush_work().
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Max Krasnyansky <maxk@qualcomm.com>
Acked-by: Jarek Poplawski <jarkao2@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1a4d9b0aa0
commit
db70089722
2 changed files with 48 additions and 0 deletions
|
|
@ -201,6 +201,8 @@ extern int keventd_up(void);
|
|||
extern void init_workqueues(void);
|
||||
int execute_in_process_context(work_func_t fn, struct execute_work *);
|
||||
|
||||
extern int flush_work(struct work_struct *work);
|
||||
|
||||
extern int cancel_work_sync(struct work_struct *work);
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -423,6 +423,52 @@ void flush_workqueue(struct workqueue_struct *wq)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(flush_workqueue);
|
||||
|
||||
/**
|
||||
* flush_work - block until a work_struct's callback has terminated
|
||||
* @work: the work which is to be flushed
|
||||
*
|
||||
* It is expected that, prior to calling flush_work(), the caller has
|
||||
* arranged for the work to not be requeued, otherwise it doesn't make
|
||||
* sense to use this function.
|
||||
*/
|
||||
int flush_work(struct work_struct *work)
|
||||
{
|
||||
struct cpu_workqueue_struct *cwq;
|
||||
struct list_head *prev;
|
||||
struct wq_barrier barr;
|
||||
|
||||
might_sleep();
|
||||
cwq = get_wq_data(work);
|
||||
if (!cwq)
|
||||
return 0;
|
||||
|
||||
prev = NULL;
|
||||
spin_lock_irq(&cwq->lock);
|
||||
if (!list_empty(&work->entry)) {
|
||||
/*
|
||||
* See the comment near try_to_grab_pending()->smp_rmb().
|
||||
* If it was re-queued under us we are not going to wait.
|
||||
*/
|
||||
smp_rmb();
|
||||
if (unlikely(cwq != get_wq_data(work)))
|
||||
goto out;
|
||||
prev = &work->entry;
|
||||
} else {
|
||||
if (cwq->current_work != work)
|
||||
goto out;
|
||||
prev = &cwq->worklist;
|
||||
}
|
||||
insert_wq_barrier(cwq, &barr, prev->next);
|
||||
out:
|
||||
spin_unlock_irq(&cwq->lock);
|
||||
if (!prev)
|
||||
return 0;
|
||||
|
||||
wait_for_completion(&barr.done);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flush_work);
|
||||
|
||||
/*
|
||||
* Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
|
||||
* so this work can't be re-armed in any way.
|
||||
|
|
|
|||
Loading…
Reference in a new issue