mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	sock: ulimit on MSG_ZEROCOPY pages
Bound the number of pages that a user may pin.
Follow the lead of perf tools to maintain a per-user bound on memory
locked pages commit 789f90fcf6 ("perf_counter: per user mlock gift")
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
			
			
This commit is contained in:
		
							parent
							
								
									4ab6c99d99
								
							
						
					
					
						commit
						a91dbff551
					
				
					 3 changed files with 55 additions and 1 deletions
				
			
		| 
						 | 
				
			
			@ -36,7 +36,8 @@ struct user_struct {
 | 
			
		|||
	struct hlist_node uidhash_node;
 | 
			
		||||
	kuid_t uid;
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
 | 
			
		||||
#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \
 | 
			
		||||
    defined(CONFIG_NET)
 | 
			
		||||
	atomic_long_t locked_vm;
 | 
			
		||||
#endif
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -457,6 +457,11 @@ struct ubuf_info {
 | 
			
		|||
		};
 | 
			
		||||
	};
 | 
			
		||||
	atomic_t refcnt;
 | 
			
		||||
 | 
			
		||||
	struct mmpin {
 | 
			
		||||
		struct user_struct *user;
 | 
			
		||||
		unsigned int num_pg;
 | 
			
		||||
	} mmp;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#define skb_uarg(SKB)	((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -897,6 +897,44 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(skb_morph);
 | 
			
		||||
 | 
			
		||||
static int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long max_pg, num_pg, new_pg, old_pg;
 | 
			
		||||
	struct user_struct *user;
 | 
			
		||||
 | 
			
		||||
	if (capable(CAP_IPC_LOCK) || !size)
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	num_pg = (size >> PAGE_SHIFT) + 2;	/* worst case */
 | 
			
		||||
	max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 | 
			
		||||
	user = mmp->user ? : current_user();
 | 
			
		||||
 | 
			
		||||
	do {
 | 
			
		||||
		old_pg = atomic_long_read(&user->locked_vm);
 | 
			
		||||
		new_pg = old_pg + num_pg;
 | 
			
		||||
		if (new_pg > max_pg)
 | 
			
		||||
			return -ENOBUFS;
 | 
			
		||||
	} while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
 | 
			
		||||
		 old_pg);
 | 
			
		||||
 | 
			
		||||
	if (!mmp->user) {
 | 
			
		||||
		mmp->user = get_uid(user);
 | 
			
		||||
		mmp->num_pg = num_pg;
 | 
			
		||||
	} else {
 | 
			
		||||
		mmp->num_pg += num_pg;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void mm_unaccount_pinned_pages(struct mmpin *mmp)
 | 
			
		||||
{
 | 
			
		||||
	if (mmp->user) {
 | 
			
		||||
		atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
 | 
			
		||||
		free_uid(mmp->user);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
 | 
			
		||||
{
 | 
			
		||||
	struct ubuf_info *uarg;
 | 
			
		||||
| 
						 | 
				
			
			@ -913,6 +951,12 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
 | 
			
		|||
 | 
			
		||||
	BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
 | 
			
		||||
	uarg = (void *)skb->cb;
 | 
			
		||||
	uarg->mmp.user = NULL;
 | 
			
		||||
 | 
			
		||||
	if (mm_account_pinned_pages(&uarg->mmp, size)) {
 | 
			
		||||
		kfree_skb(skb);
 | 
			
		||||
		return NULL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	uarg->callback = sock_zerocopy_callback;
 | 
			
		||||
	uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -956,6 +1000,8 @@ struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
 | 
			
		|||
 | 
			
		||||
		next = (u32)atomic_read(&sk->sk_zckey);
 | 
			
		||||
		if ((u32)(uarg->id + uarg->len) == next) {
 | 
			
		||||
			if (mm_account_pinned_pages(&uarg->mmp, size))
 | 
			
		||||
				return NULL;
 | 
			
		||||
			uarg->len++;
 | 
			
		||||
			uarg->bytelen = bytelen;
 | 
			
		||||
			atomic_set(&sk->sk_zckey, ++next);
 | 
			
		||||
| 
						 | 
				
			
			@ -1038,6 +1084,8 @@ EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
 | 
			
		|||
void sock_zerocopy_put(struct ubuf_info *uarg)
 | 
			
		||||
{
 | 
			
		||||
	if (uarg && atomic_dec_and_test(&uarg->refcnt)) {
 | 
			
		||||
		mm_unaccount_pinned_pages(&uarg->mmp);
 | 
			
		||||
 | 
			
		||||
		if (uarg->callback)
 | 
			
		||||
			uarg->callback(uarg, uarg->zerocopy);
 | 
			
		||||
		else
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue