mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	Move the management of the client connection cache to the I/O thread rather than managing it from the namespace as an aggregate across all the local endpoints within the namespace. This will allow a load of locking to be got rid of in a future patch as only the I/O thread will be looking at the this. The downside is that the total number of cached connections on the system can get higher because the limit is now per-local rather than per-netns. We can, however, keep the number of client conns in use across the entire netfs and use that to reduce the expiration time of idle connection. Signed-off-by: David Howells <dhowells@redhat.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org
		
			
				
	
	
		
			120 lines
		
	
	
	
		
			3.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			120 lines
		
	
	
	
		
			3.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
						|
/* rxrpc network namespace handling.
 | 
						|
 *
 | 
						|
 * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
 | 
						|
 * Written by David Howells (dhowells@redhat.com)
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/proc_fs.h>
 | 
						|
#include "ar-internal.h"
 | 
						|
 | 
						|
unsigned int rxrpc_net_id;
 | 
						|
 | 
						|
static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
 | 
						|
{
 | 
						|
	struct rxrpc_net *rxnet =
 | 
						|
		container_of(timer, struct rxrpc_net, service_conn_reap_timer);
 | 
						|
 | 
						|
	if (rxnet->live)
 | 
						|
		rxrpc_queue_work(&rxnet->service_conn_reaper);
 | 
						|
}
 | 
						|
 | 
						|
static void rxrpc_peer_keepalive_timeout(struct timer_list *timer)
 | 
						|
{
 | 
						|
	struct rxrpc_net *rxnet =
 | 
						|
		container_of(timer, struct rxrpc_net, peer_keepalive_timer);
 | 
						|
 | 
						|
	if (rxnet->live)
 | 
						|
		rxrpc_queue_work(&rxnet->peer_keepalive_work);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Initialise a per-network namespace record.
 | 
						|
 */
 | 
						|
static __net_init int rxrpc_init_net(struct net *net)
 | 
						|
{
 | 
						|
	struct rxrpc_net *rxnet = rxrpc_net(net);
 | 
						|
	int ret, i;
 | 
						|
 | 
						|
	rxnet->live = true;
 | 
						|
	get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
 | 
						|
	rxnet->epoch |= RXRPC_RANDOM_EPOCH;
 | 
						|
 | 
						|
	INIT_LIST_HEAD(&rxnet->calls);
 | 
						|
	spin_lock_init(&rxnet->call_lock);
 | 
						|
	atomic_set(&rxnet->nr_calls, 1);
 | 
						|
 | 
						|
	atomic_set(&rxnet->nr_conns, 1);
 | 
						|
	INIT_LIST_HEAD(&rxnet->conn_proc_list);
 | 
						|
	INIT_LIST_HEAD(&rxnet->service_conns);
 | 
						|
	rwlock_init(&rxnet->conn_lock);
 | 
						|
	INIT_WORK(&rxnet->service_conn_reaper,
 | 
						|
		  rxrpc_service_connection_reaper);
 | 
						|
	timer_setup(&rxnet->service_conn_reap_timer,
 | 
						|
		    rxrpc_service_conn_reap_timeout, 0);
 | 
						|
 | 
						|
	atomic_set(&rxnet->nr_client_conns, 0);
 | 
						|
 | 
						|
	INIT_HLIST_HEAD(&rxnet->local_endpoints);
 | 
						|
	mutex_init(&rxnet->local_mutex);
 | 
						|
 | 
						|
	hash_init(rxnet->peer_hash);
 | 
						|
	spin_lock_init(&rxnet->peer_hash_lock);
 | 
						|
	for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
 | 
						|
		INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
 | 
						|
	INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
 | 
						|
	timer_setup(&rxnet->peer_keepalive_timer,
 | 
						|
		    rxrpc_peer_keepalive_timeout, 0);
 | 
						|
	INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
 | 
						|
	rxnet->peer_keepalive_base = ktime_get_seconds();
 | 
						|
 | 
						|
	ret = -ENOMEM;
 | 
						|
	rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
 | 
						|
	if (!rxnet->proc_net)
 | 
						|
		goto err_proc;
 | 
						|
 | 
						|
	proc_create_net("calls", 0444, rxnet->proc_net, &rxrpc_call_seq_ops,
 | 
						|
			sizeof(struct seq_net_private));
 | 
						|
	proc_create_net("conns", 0444, rxnet->proc_net,
 | 
						|
			&rxrpc_connection_seq_ops,
 | 
						|
			sizeof(struct seq_net_private));
 | 
						|
	proc_create_net("peers", 0444, rxnet->proc_net,
 | 
						|
			&rxrpc_peer_seq_ops,
 | 
						|
			sizeof(struct seq_net_private));
 | 
						|
	proc_create_net("locals", 0444, rxnet->proc_net,
 | 
						|
			&rxrpc_local_seq_ops,
 | 
						|
			sizeof(struct seq_net_private));
 | 
						|
	proc_create_net_single_write("stats", S_IFREG | 0644, rxnet->proc_net,
 | 
						|
				     rxrpc_stats_show, rxrpc_stats_clear, NULL);
 | 
						|
	return 0;
 | 
						|
 | 
						|
err_proc:
 | 
						|
	rxnet->live = false;
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Clean up a per-network namespace record.
 | 
						|
 */
 | 
						|
static __net_exit void rxrpc_exit_net(struct net *net)
 | 
						|
{
 | 
						|
	struct rxrpc_net *rxnet = rxrpc_net(net);
 | 
						|
 | 
						|
	rxnet->live = false;
 | 
						|
	del_timer_sync(&rxnet->peer_keepalive_timer);
 | 
						|
	cancel_work_sync(&rxnet->peer_keepalive_work);
 | 
						|
	/* Remove the timer again as the worker may have restarted it. */
 | 
						|
	del_timer_sync(&rxnet->peer_keepalive_timer);
 | 
						|
	rxrpc_destroy_all_calls(rxnet);
 | 
						|
	rxrpc_destroy_all_connections(rxnet);
 | 
						|
	rxrpc_destroy_all_peers(rxnet);
 | 
						|
	rxrpc_destroy_all_locals(rxnet);
 | 
						|
	proc_remove(rxnet->proc_net);
 | 
						|
}
 | 
						|
 | 
						|
struct pernet_operations rxrpc_net_ops = {
 | 
						|
	.init	= rxrpc_init_net,
 | 
						|
	.exit	= rxrpc_exit_net,
 | 
						|
	.id	= &rxrpc_net_id,
 | 
						|
	.size	= sizeof(struct rxrpc_net),
 | 
						|
};
 |