forked from mirrors/linux
		
	Fix the warning below by calling the ptr_ring_consume_bh,
which uses spin_[un]lock_bh.
[  179.064300] ================================
[  179.069073] WARNING: inconsistent lock state
[  179.073846] 4.18.0-rc2+ #18 Not tainted
[  179.078133] --------------------------------
[  179.082907] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
[  179.089637] swapper/21/0 [HC0[0]:SC1[1]:HE1:SE0] takes:
[  179.095478] 00000000963d1995 (&(&r->consumer_lock)->rlock){+.?.}, at:
__page_pool_empty_ring+0x61/0x100
[  179.105988] {SOFTIRQ-ON-W} state was registered at:
[  179.111443]   _raw_spin_lock+0x35/0x50
[  179.115634]   __page_pool_empty_ring+0x61/0x100
[  179.120699]   page_pool_destroy+0x32/0x50
[  179.125204]   mlx5e_free_rq+0x38/0xc0 [mlx5_core]
[  179.130471]   mlx5e_close_channel+0x20/0x120 [mlx5_core]
[  179.136418]   mlx5e_close_channels+0x26/0x40 [mlx5_core]
[  179.142364]   mlx5e_close_locked+0x44/0x50 [mlx5_core]
[  179.148509]   mlx5e_close+0x42/0x60 [mlx5_core]
[  179.153936]   __dev_close_many+0xb1/0x120
[  179.158749]   dev_close_many+0xa2/0x170
[  179.163364]   rollback_registered_many+0x148/0x460
[  179.169047]   rollback_registered+0x56/0x90
[  179.174043]   unregister_netdevice_queue+0x7e/0x100
[  179.179816]   unregister_netdev+0x18/0x20
[  179.184623]   mlx5e_remove+0x2a/0x50 [mlx5_core]
[  179.190107]   mlx5_remove_device+0xe5/0x110 [mlx5_core]
[  179.196274]   mlx5_unregister_interface+0x39/0x90 [mlx5_core]
[  179.203028]   cleanup+0x5/0xbfc [mlx5_core]
[  179.208031]   __x64_sys_delete_module+0x16b/0x240
[  179.213640]   do_syscall_64+0x5a/0x210
[  179.218151]   entry_SYSCALL_64_after_hwframe+0x49/0xbe
[  179.224218] irq event stamp: 334398
[  179.228438] hardirqs last  enabled at (334398): [<ffffffffa511d8b7>]
rcu_process_callbacks+0x1c7/0x790
[  179.239178] hardirqs last disabled at (334397): [<ffffffffa511d872>]
rcu_process_callbacks+0x182/0x790
[  179.249931] softirqs last  enabled at (334386): [<ffffffffa509732e>] irq_enter+0x5e/0x70
[  179.259306] softirqs last disabled at (334387): [<ffffffffa509741c>] irq_exit+0xdc/0xf0
[  179.268584]
[  179.268584] other info that might help us debug this:
[  179.276572]  Possible unsafe locking scenario:
[  179.276572]
[  179.283877]        CPU0
[  179.286954]        ----
[  179.290033]   lock(&(&r->consumer_lock)->rlock);
[  179.295546]   <Interrupt>
[  179.298830]     lock(&(&r->consumer_lock)->rlock);
[  179.304550]
[  179.304550]  *** DEADLOCK ***
Fixes: ff7d6b27f8 ("page_pool: refurbish version of page_pool code")
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
		
	
			
		
			
				
	
	
		
			317 lines
		
	
	
	
		
			8.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			317 lines
		
	
	
	
		
			8.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0
 | 
						|
 *
 | 
						|
 * page_pool.c
 | 
						|
 *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
 | 
						|
 *	Copyright (C) 2016 Red Hat, Inc.
 | 
						|
 */
 | 
						|
#include <linux/types.h>
 | 
						|
#include <linux/kernel.h>
 | 
						|
#include <linux/slab.h>
 | 
						|
 | 
						|
#include <net/page_pool.h>
 | 
						|
#include <linux/dma-direction.h>
 | 
						|
#include <linux/dma-mapping.h>
 | 
						|
#include <linux/page-flags.h>
 | 
						|
#include <linux/mm.h> /* for __put_page() */
 | 
						|
 | 
						|
static int page_pool_init(struct page_pool *pool,
 | 
						|
			  const struct page_pool_params *params)
 | 
						|
{
 | 
						|
	unsigned int ring_qsize = 1024; /* Default */
 | 
						|
 | 
						|
	memcpy(&pool->p, params, sizeof(pool->p));
 | 
						|
 | 
						|
	/* Validate only known flags were used */
 | 
						|
	if (pool->p.flags & ~(PP_FLAG_ALL))
 | 
						|
		return -EINVAL;
 | 
						|
 | 
						|
	if (pool->p.pool_size)
 | 
						|
		ring_qsize = pool->p.pool_size;
 | 
						|
 | 
						|
	/* Sanity limit mem that can be pinned down */
 | 
						|
	if (ring_qsize > 32768)
 | 
						|
		return -E2BIG;
 | 
						|
 | 
						|
	/* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
 | 
						|
	 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
 | 
						|
	 * which is the XDP_TX use-case.
 | 
						|
	 */
 | 
						|
	if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
 | 
						|
	    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
 | 
						|
		return -EINVAL;
 | 
						|
 | 
						|
	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
 | 
						|
		return -ENOMEM;
 | 
						|
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
struct page_pool *page_pool_create(const struct page_pool_params *params)
 | 
						|
{
 | 
						|
	struct page_pool *pool;
 | 
						|
	int err = 0;
 | 
						|
 | 
						|
	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
 | 
						|
	if (!pool)
 | 
						|
		return ERR_PTR(-ENOMEM);
 | 
						|
 | 
						|
	err = page_pool_init(pool, params);
 | 
						|
	if (err < 0) {
 | 
						|
		pr_warn("%s() gave up with errno %d\n", __func__, err);
 | 
						|
		kfree(pool);
 | 
						|
		return ERR_PTR(err);
 | 
						|
	}
 | 
						|
	return pool;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(page_pool_create);
 | 
						|
 | 
						|
/* fast path */
 | 
						|
static struct page *__page_pool_get_cached(struct page_pool *pool)
 | 
						|
{
 | 
						|
	struct ptr_ring *r = &pool->ring;
 | 
						|
	struct page *page;
 | 
						|
 | 
						|
	/* Quicker fallback, avoid locks when ring is empty */
 | 
						|
	if (__ptr_ring_empty(r))
 | 
						|
		return NULL;
 | 
						|
 | 
						|
	/* Test for safe-context, caller should provide this guarantee */
 | 
						|
	if (likely(in_serving_softirq())) {
 | 
						|
		if (likely(pool->alloc.count)) {
 | 
						|
			/* Fast-path */
 | 
						|
			page = pool->alloc.cache[--pool->alloc.count];
 | 
						|
			return page;
 | 
						|
		}
 | 
						|
		/* Slower-path: Alloc array empty, time to refill
 | 
						|
		 *
 | 
						|
		 * Open-coded bulk ptr_ring consumer.
 | 
						|
		 *
 | 
						|
		 * Discussion: the ring consumer lock is not really
 | 
						|
		 * needed due to the softirq/NAPI protection, but
 | 
						|
		 * later need the ability to reclaim pages on the
 | 
						|
		 * ring. Thus, keeping the locks.
 | 
						|
		 */
 | 
						|
		spin_lock(&r->consumer_lock);
 | 
						|
		while ((page = __ptr_ring_consume(r))) {
 | 
						|
			if (pool->alloc.count == PP_ALLOC_CACHE_REFILL)
 | 
						|
				break;
 | 
						|
			pool->alloc.cache[pool->alloc.count++] = page;
 | 
						|
		}
 | 
						|
		spin_unlock(&r->consumer_lock);
 | 
						|
		return page;
 | 
						|
	}
 | 
						|
 | 
						|
	/* Slow-path: Get page from locked ring queue */
 | 
						|
	page = ptr_ring_consume(&pool->ring);
 | 
						|
	return page;
 | 
						|
}
 | 
						|
 | 
						|
/* slow path */
 | 
						|
noinline
 | 
						|
static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 | 
						|
						 gfp_t _gfp)
 | 
						|
{
 | 
						|
	struct page *page;
 | 
						|
	gfp_t gfp = _gfp;
 | 
						|
	dma_addr_t dma;
 | 
						|
 | 
						|
	/* We could always set __GFP_COMP, and avoid this branch, as
 | 
						|
	 * prep_new_page() can handle order-0 with __GFP_COMP.
 | 
						|
	 */
 | 
						|
	if (pool->p.order)
 | 
						|
		gfp |= __GFP_COMP;
 | 
						|
 | 
						|
	/* FUTURE development:
 | 
						|
	 *
 | 
						|
	 * Current slow-path essentially falls back to single page
 | 
						|
	 * allocations, which doesn't improve performance.  This code
 | 
						|
	 * need bulk allocation support from the page allocator code.
 | 
						|
	 */
 | 
						|
 | 
						|
	/* Cache was empty, do real allocation */
 | 
						|
	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
 | 
						|
	if (!page)
 | 
						|
		return NULL;
 | 
						|
 | 
						|
	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
 | 
						|
		goto skip_dma_map;
 | 
						|
 | 
						|
	/* Setup DMA mapping: use page->private for DMA-addr
 | 
						|
	 * This mapping is kept for lifetime of page, until leaving pool.
 | 
						|
	 */
 | 
						|
	dma = dma_map_page(pool->p.dev, page, 0,
 | 
						|
			   (PAGE_SIZE << pool->p.order),
 | 
						|
			   pool->p.dma_dir);
 | 
						|
	if (dma_mapping_error(pool->p.dev, dma)) {
 | 
						|
		put_page(page);
 | 
						|
		return NULL;
 | 
						|
	}
 | 
						|
	set_page_private(page, dma); /* page->private = dma; */
 | 
						|
 | 
						|
skip_dma_map:
 | 
						|
	/* When page just alloc'ed is should/must have refcnt 1. */
 | 
						|
	return page;
 | 
						|
}
 | 
						|
 | 
						|
/* For using page_pool replace: alloc_pages() API calls, but provide
 | 
						|
 * synchronization guarantee for allocation side.
 | 
						|
 */
 | 
						|
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
 | 
						|
{
 | 
						|
	struct page *page;
 | 
						|
 | 
						|
	/* Fast-path: Get a page from cache */
 | 
						|
	page = __page_pool_get_cached(pool);
 | 
						|
	if (page)
 | 
						|
		return page;
 | 
						|
 | 
						|
	/* Slow-path: cache empty, do real allocation */
 | 
						|
	page = __page_pool_alloc_pages_slow(pool, gfp);
 | 
						|
	return page;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(page_pool_alloc_pages);
 | 
						|
 | 
						|
/* Cleanup page_pool state from page */
 | 
						|
static void __page_pool_clean_page(struct page_pool *pool,
 | 
						|
				   struct page *page)
 | 
						|
{
 | 
						|
	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
 | 
						|
		return;
 | 
						|
 | 
						|
	/* DMA unmap */
 | 
						|
	dma_unmap_page(pool->p.dev, page_private(page),
 | 
						|
		       PAGE_SIZE << pool->p.order, pool->p.dma_dir);
 | 
						|
	set_page_private(page, 0);
 | 
						|
}
 | 
						|
 | 
						|
/* Return a page to the page allocator, cleaning up our state */
 | 
						|
static void __page_pool_return_page(struct page_pool *pool, struct page *page)
 | 
						|
{
 | 
						|
	__page_pool_clean_page(pool, page);
 | 
						|
	put_page(page);
 | 
						|
	/* An optimization would be to call __free_pages(page, pool->p.order)
 | 
						|
	 * knowing page is not part of page-cache (thus avoiding a
 | 
						|
	 * __page_cache_release() call).
 | 
						|
	 */
 | 
						|
}
 | 
						|
 | 
						|
static bool __page_pool_recycle_into_ring(struct page_pool *pool,
 | 
						|
				   struct page *page)
 | 
						|
{
 | 
						|
	int ret;
 | 
						|
	/* BH protection not needed if current is serving softirq */
 | 
						|
	if (in_serving_softirq())
 | 
						|
		ret = ptr_ring_produce(&pool->ring, page);
 | 
						|
	else
 | 
						|
		ret = ptr_ring_produce_bh(&pool->ring, page);
 | 
						|
 | 
						|
	return (ret == 0) ? true : false;
 | 
						|
}
 | 
						|
 | 
						|
/* Only allow direct recycling in special circumstances, into the
 | 
						|
 * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
 | 
						|
 *
 | 
						|
 * Caller must provide appropriate safe context.
 | 
						|
 */
 | 
						|
static bool __page_pool_recycle_direct(struct page *page,
 | 
						|
				       struct page_pool *pool)
 | 
						|
{
 | 
						|
	if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
 | 
						|
		return false;
 | 
						|
 | 
						|
	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
 | 
						|
	pool->alloc.cache[pool->alloc.count++] = page;
 | 
						|
	return true;
 | 
						|
}
 | 
						|
 | 
						|
void __page_pool_put_page(struct page_pool *pool,
 | 
						|
			  struct page *page, bool allow_direct)
 | 
						|
{
 | 
						|
	/* This allocator is optimized for the XDP mode that uses
 | 
						|
	 * one-frame-per-page, but have fallbacks that act like the
 | 
						|
	 * regular page allocator APIs.
 | 
						|
	 *
 | 
						|
	 * refcnt == 1 means page_pool owns page, and can recycle it.
 | 
						|
	 */
 | 
						|
	if (likely(page_ref_count(page) == 1)) {
 | 
						|
		/* Read barrier done in page_ref_count / READ_ONCE */
 | 
						|
 | 
						|
		if (allow_direct && in_serving_softirq())
 | 
						|
			if (__page_pool_recycle_direct(page, pool))
 | 
						|
				return;
 | 
						|
 | 
						|
		if (!__page_pool_recycle_into_ring(pool, page)) {
 | 
						|
			/* Cache full, fallback to free pages */
 | 
						|
			__page_pool_return_page(pool, page);
 | 
						|
		}
 | 
						|
		return;
 | 
						|
	}
 | 
						|
	/* Fallback/non-XDP mode: API user have elevated refcnt.
 | 
						|
	 *
 | 
						|
	 * Many drivers split up the page into fragments, and some
 | 
						|
	 * want to keep doing this to save memory and do refcnt based
 | 
						|
	 * recycling. Support this use case too, to ease drivers
 | 
						|
	 * switching between XDP/non-XDP.
 | 
						|
	 *
 | 
						|
	 * In-case page_pool maintains the DMA mapping, API user must
 | 
						|
	 * call page_pool_put_page once.  In this elevated refcnt
 | 
						|
	 * case, the DMA is unmapped/released, as driver is likely
 | 
						|
	 * doing refcnt based recycle tricks, meaning another process
 | 
						|
	 * will be invoking put_page.
 | 
						|
	 */
 | 
						|
	__page_pool_clean_page(pool, page);
 | 
						|
	put_page(page);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(__page_pool_put_page);
 | 
						|
 | 
						|
static void __page_pool_empty_ring(struct page_pool *pool)
 | 
						|
{
 | 
						|
	struct page *page;
 | 
						|
 | 
						|
	/* Empty recycle ring */
 | 
						|
	while ((page = ptr_ring_consume_bh(&pool->ring))) {
 | 
						|
		/* Verify the refcnt invariant of cached pages */
 | 
						|
		if (!(page_ref_count(page) == 1))
 | 
						|
			pr_crit("%s() page_pool refcnt %d violation\n",
 | 
						|
				__func__, page_ref_count(page));
 | 
						|
 | 
						|
		__page_pool_return_page(pool, page);
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static void __page_pool_destroy_rcu(struct rcu_head *rcu)
 | 
						|
{
 | 
						|
	struct page_pool *pool;
 | 
						|
 | 
						|
	pool = container_of(rcu, struct page_pool, rcu);
 | 
						|
 | 
						|
	WARN(pool->alloc.count, "API usage violation");
 | 
						|
 | 
						|
	__page_pool_empty_ring(pool);
 | 
						|
	ptr_ring_cleanup(&pool->ring, NULL);
 | 
						|
	kfree(pool);
 | 
						|
}
 | 
						|
 | 
						|
/* Cleanup and release resources */
 | 
						|
void page_pool_destroy(struct page_pool *pool)
 | 
						|
{
 | 
						|
	struct page *page;
 | 
						|
 | 
						|
	/* Empty alloc cache, assume caller made sure this is
 | 
						|
	 * no-longer in use, and page_pool_alloc_pages() cannot be
 | 
						|
	 * call concurrently.
 | 
						|
	 */
 | 
						|
	while (pool->alloc.count) {
 | 
						|
		page = pool->alloc.cache[--pool->alloc.count];
 | 
						|
		__page_pool_return_page(pool, page);
 | 
						|
	}
 | 
						|
 | 
						|
	/* No more consumers should exist, but producers could still
 | 
						|
	 * be in-flight.
 | 
						|
	 */
 | 
						|
	__page_pool_empty_ring(pool);
 | 
						|
 | 
						|
	/* An xdp_mem_allocator can still ref page_pool pointer */
 | 
						|
	call_rcu(&pool->rcu, __page_pool_destroy_rcu);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(page_pool_destroy);
 |