nfsd: eliminate cl_ra_cblist and NFSD4_CLIENT_CB_RECALL_ANY

deleg_reaper() will walk the client_lru list and put any suitable
entries onto "cblist" using the cl_ra_cblist pointer. It then walks the
objects outside the spinlock and queues callbacks for them.

None of the operations that deleg_reaper() does outside the
nn->client_lock are blocking operations. Just queue their workqueue jobs
under the nn->client_lock instead.

Also, the NFSD4_CLIENT_CB_RECALL_ANY and NFSD4_CALLBACK_RUNNING flags
serve an identical purpose now. Drop the NFSD4_CLIENT_CB_RECALL_ANY flag
and just use the one in the callback.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
Jeff Layton 2025-02-20 11:47:14 -05:00 committed by Chuck Lever
parent 1054e8ffc5
commit 424dd3df1f
2 changed files with 3 additions and 15 deletions

View file

@ -3175,7 +3175,6 @@ nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
drop_client(clp);
}
@ -6881,7 +6880,6 @@ deleg_reaper(struct nfsd_net *nn)
{
struct list_head *pos, *next;
struct nfs4_client *clp;
LIST_HEAD(cblist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
@ -6893,31 +6891,23 @@ deleg_reaper(struct nfsd_net *nn)
continue;
if (atomic_read(&clp->cl_delegs_in_recall))
continue;
if (test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags))
if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &clp->cl_ra->ra_cb.cb_flags))
continue;
if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5)
continue;
if (clp->cl_cb_state != NFSD4_CB_UP)
continue;
list_add(&clp->cl_ra_cblist, &cblist);
/* release in nfsd4_cb_recall_any_release */
kref_get(&clp->cl_nfsdfs.cl_ref);
set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
clp->cl_ra_time = ktime_get_boottime_seconds();
}
spin_unlock(&nn->client_lock);
while (!list_empty(&cblist)) {
clp = list_first_entry(&cblist, struct nfs4_client,
cl_ra_cblist);
list_del_init(&clp->cl_ra_cblist);
clp->cl_ra->ra_keep = 0;
clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) |
BIT(RCA4_TYPE_MASK_WDATA_DLG);
trace_nfsd_cb_recall_any(clp->cl_ra);
nfsd4_try_run_cb(&clp->cl_ra->ra_cb);
nfsd4_run_cb(&clp->cl_ra->ra_cb);
}
spin_unlock(&nn->client_lock);
}
static void

View file

@ -454,7 +454,6 @@ struct nfs4_client {
#define NFSD4_CLIENT_UPCALL_LOCK (5) /* upcall serialization */
#define NFSD4_CLIENT_CB_FLAG_MASK (1 << NFSD4_CLIENT_CB_UPDATE | \
1 << NFSD4_CLIENT_CB_KILL)
#define NFSD4_CLIENT_CB_RECALL_ANY (6)
unsigned long cl_flags;
struct workqueue_struct *cl_callback_wq;
@ -500,7 +499,6 @@ struct nfs4_client {
struct nfsd4_cb_recall_any *cl_ra;
time64_t cl_ra_time;
struct list_head cl_ra_cblist;
};
/* struct nfs4_client_reset