mirror of
https://github.com/torvalds/linux.git
synced 2025-10-30 00:06:59 +02:00
workqueue: replace use of system_wq with system_percpu_wq
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. system_wq is a per-CPU worqueue, yet nothing in its name tells about that CPU affinity constraint, which is very often not required by users. Make it clear by adding a system_percpu_wq. queue_work() / queue_delayed_work() mod_delayed_work() will now use the new per-cpu wq: whether the user still stick on the old name a warn will be printed along a wq redirect to the new one. This patch add the new system_percpu_wq except for mm, fs and net subsystem, whom are handled in separated patches. The old wq will be kept for a few release cylces. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
f6cfa602d2
commit
a2be943b46
2 changed files with 12 additions and 12 deletions
|
|
@ -434,10 +434,10 @@ enum wq_consts {
|
|||
* short queue flush time. Don't queue works which can run for too
|
||||
* long.
|
||||
*
|
||||
* system_highpri_wq is similar to system_wq but for work items which
|
||||
* system_highpri_wq is similar to system_percpu_wq but for work items which
|
||||
* require WQ_HIGHPRI.
|
||||
*
|
||||
* system_long_wq is similar to system_wq but may host long running
|
||||
* system_long_wq is similar to system_percpu_wq but may host long running
|
||||
* works. Queue flushing might take relatively long.
|
||||
*
|
||||
* system_dfl_wq is unbound workqueue. Workers are not bound to
|
||||
|
|
@ -445,13 +445,13 @@ enum wq_consts {
|
|||
* executed immediately as long as max_active limit is not reached and
|
||||
* resources are available.
|
||||
*
|
||||
* system_freezable_wq is equivalent to system_wq except that it's
|
||||
* system_freezable_wq is equivalent to system_percpu_wq except that it's
|
||||
* freezable.
|
||||
*
|
||||
* *_power_efficient_wq are inclined towards saving power and converted
|
||||
* into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
|
||||
* they are same as their non-power-efficient counterparts - e.g.
|
||||
* system_power_efficient_wq is identical to system_wq if
|
||||
* system_power_efficient_wq is identical to system_percpu_wq if
|
||||
* 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
|
||||
*
|
||||
* system_bh[_highpri]_wq are convenience interface to softirq. BH work items
|
||||
|
|
@ -708,7 +708,7 @@ static inline bool mod_delayed_work(struct workqueue_struct *wq,
|
|||
*/
|
||||
static inline bool schedule_work_on(int cpu, struct work_struct *work)
|
||||
{
|
||||
return queue_work_on(cpu, system_wq, work);
|
||||
return queue_work_on(cpu, system_percpu_wq, work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -727,7 +727,7 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
|
|||
*/
|
||||
static inline bool schedule_work(struct work_struct *work)
|
||||
{
|
||||
return queue_work(system_wq, work);
|
||||
return queue_work(system_percpu_wq, work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -770,15 +770,15 @@ extern void __warn_flushing_systemwide_wq(void)
|
|||
#define flush_scheduled_work() \
|
||||
({ \
|
||||
__warn_flushing_systemwide_wq(); \
|
||||
__flush_workqueue(system_wq); \
|
||||
__flush_workqueue(system_percpu_wq); \
|
||||
})
|
||||
|
||||
#define flush_workqueue(wq) \
|
||||
({ \
|
||||
struct workqueue_struct *_wq = (wq); \
|
||||
\
|
||||
if ((__builtin_constant_p(_wq == system_wq) && \
|
||||
_wq == system_wq) || \
|
||||
if ((__builtin_constant_p(_wq == system_percpu_wq) && \
|
||||
_wq == system_percpu_wq) || \
|
||||
(__builtin_constant_p(_wq == system_highpri_wq) && \
|
||||
_wq == system_highpri_wq) || \
|
||||
(__builtin_constant_p(_wq == system_long_wq) && \
|
||||
|
|
@ -807,7 +807,7 @@ extern void __warn_flushing_systemwide_wq(void)
|
|||
static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
|
||||
unsigned long delay)
|
||||
{
|
||||
return queue_delayed_work_on(cpu, system_wq, dwork, delay);
|
||||
return queue_delayed_work_on(cpu, system_percpu_wq, dwork, delay);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -821,7 +821,7 @@ static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
|
|||
static inline bool schedule_delayed_work(struct delayed_work *dwork,
|
||||
unsigned long delay)
|
||||
{
|
||||
return queue_delayed_work(system_wq, dwork, delay);
|
||||
return queue_delayed_work(system_percpu_wq, dwork, delay);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
|
|
|
|||
|
|
@ -7668,7 +7668,7 @@ static int wq_watchdog_param_set_thresh(const char *val,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (system_wq)
|
||||
if (system_percpu_wq)
|
||||
wq_watchdog_set_thresh(thresh);
|
||||
else
|
||||
wq_watchdog_thresh = thresh;
|
||||
|
|
|
|||
Loading…
Reference in a new issue