workqueue: Make @flags handling consistent across set_work_data() and friends

- set_work_data() takes a separate @flags argument but just ORs it to @data.
  This is more confusing than helpful. Just take @data.

- Use the name @flags consistently and add the parameter to
  set_work_pool_and_{keep|clear}_pending(). This will be used by the planned
  disable/enable support.

No functional changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
This commit is contained in:
Tejun Heo 2024-02-20 19:36:15 -10:00
parent afe928c1dc
commit bccdc1faaf

View file

@ -777,29 +777,28 @@ static int work_next_color(int color)
* but stay off timer and worklist for arbitrarily long and nobody should * but stay off timer and worklist for arbitrarily long and nobody should
* try to steal the PENDING bit. * try to steal the PENDING bit.
*/ */
static inline void set_work_data(struct work_struct *work, unsigned long data, static inline void set_work_data(struct work_struct *work, unsigned long data)
unsigned long flags)
{ {
WARN_ON_ONCE(!work_pending(work)); WARN_ON_ONCE(!work_pending(work));
atomic_long_set(&work->data, data | flags | work_static(work)); atomic_long_set(&work->data, data | work_static(work));
} }
static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
unsigned long extra_flags) unsigned long flags)
{ {
set_work_data(work, (unsigned long)pwq, set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING |
WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); WORK_STRUCT_PWQ | flags);
} }
static void set_work_pool_and_keep_pending(struct work_struct *work, static void set_work_pool_and_keep_pending(struct work_struct *work,
int pool_id) int pool_id, unsigned long flags)
{ {
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) |
WORK_STRUCT_PENDING); WORK_STRUCT_PENDING | flags);
} }
static void set_work_pool_and_clear_pending(struct work_struct *work, static void set_work_pool_and_clear_pending(struct work_struct *work,
int pool_id) int pool_id, unsigned long flags)
{ {
/* /*
* The following wmb is paired with the implied mb in * The following wmb is paired with the implied mb in
@ -808,7 +807,8 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
* owner. * owner.
*/ */
smp_wmb(); smp_wmb();
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) |
flags);
/* /*
* The following mb guarantees that previous clear of a PENDING bit * The following mb guarantees that previous clear of a PENDING bit
* will not be reordered with any speculative LOADS or STORES from * will not be reordered with any speculative LOADS or STORES from
@ -909,7 +909,7 @@ static void mark_work_canceling(struct work_struct *work)
unsigned long pool_id = get_work_pool_id(work); unsigned long pool_id = get_work_pool_id(work);
pool_id <<= WORK_OFFQ_POOL_SHIFT; pool_id <<= WORK_OFFQ_POOL_SHIFT;
set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); set_work_data(work, pool_id | WORK_STRUCT_PENDING | WORK_OFFQ_CANCELING);
} }
static bool work_is_canceling(struct work_struct *work) static bool work_is_canceling(struct work_struct *work)
@ -2127,7 +2127,7 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
* this destroys work->data needed by the next step, stash it. * this destroys work->data needed by the next step, stash it.
*/ */
work_data = *work_data_bits(work); work_data = *work_data_bits(work);
set_work_pool_and_keep_pending(work, pool->id); set_work_pool_and_keep_pending(work, pool->id, 0);
/* must be the last step, see the function comment */ /* must be the last step, see the function comment */
pwq_dec_nr_in_flight(pwq, work_data); pwq_dec_nr_in_flight(pwq, work_data);
@ -3205,7 +3205,7 @@ __acquires(&pool->lock)
* PENDING and queued state changes happen together while IRQ is * PENDING and queued state changes happen together while IRQ is
* disabled. * disabled.
*/ */
set_work_pool_and_clear_pending(work, pool->id); set_work_pool_and_clear_pending(work, pool->id, 0);
pwq->stats[PWQ_STAT_STARTED]++; pwq->stats[PWQ_STAT_STARTED]++;
raw_spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
@ -4188,7 +4188,7 @@ static bool __cancel_work(struct work_struct *work, u32 cflags)
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return false; return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work)); set_work_pool_and_clear_pending(work, get_work_pool_id(work), 0);
local_irq_restore(irq_flags); local_irq_restore(irq_flags);
return ret; return ret;
} }
@ -4215,7 +4215,7 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
* with prepare_to_wait() above so that either waitqueue_active() is * with prepare_to_wait() above so that either waitqueue_active() is
* visible here or !work_is_canceling() is visible there. * visible here or !work_is_canceling() is visible there.
*/ */
set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE); set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, 0);
if (waitqueue_active(&wq_cancel_waitq)) if (waitqueue_active(&wq_cancel_waitq))
__wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work);