mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: Rename SLAB_DESTROY_BY_RCU to SLAB_TYPESAFE_BY_RCU
A group of Linux kernel hackers reported chasing a bug that resulted from their assumption that SLAB_DESTROY_BY_RCU provided an existence guarantee, that is, that no block from such a slab would be reallocated during an RCU read-side critical section. Of course, that is not the case. Instead, SLAB_DESTROY_BY_RCU only prevents freeing of an entire slab of blocks. However, there is a phrase for this, namely "type safety". This commit therefore renames SLAB_DESTROY_BY_RCU to SLAB_TYPESAFE_BY_RCU in order to avoid future instances of this sort of confusion. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: <linux-mm@kvack.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> [ paulmck: Add comments mentioning the old name, as requested by Eric Dumazet, in order to help people familiar with the old name find the new one. ] Acked-by: David Rientjes <rientjes@google.com>
This commit is contained in:
		
							parent
							
								
									4495c08e84
								
							
						
					
					
						commit
						5f0d5a3ae7
					
				
					 30 changed files with 57 additions and 54 deletions
				
			
		| 
						 | 
				
			
			@ -17,7 +17,7 @@ rcu_dereference.txt
 | 
			
		|||
rcubarrier.txt
 | 
			
		||||
	- RCU and Unloadable Modules
 | 
			
		||||
rculist_nulls.txt
 | 
			
		||||
	- RCU list primitives for use with SLAB_DESTROY_BY_RCU
 | 
			
		||||
	- RCU list primitives for use with SLAB_TYPESAFE_BY_RCU
 | 
			
		||||
rcuref.txt
 | 
			
		||||
	- Reference-count design for elements of lists/arrays protected by RCU
 | 
			
		||||
rcu.txt
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,5 +1,5 @@
 | 
			
		|||
Using hlist_nulls to protect read-mostly linked lists and
 | 
			
		||||
objects using SLAB_DESTROY_BY_RCU allocations.
 | 
			
		||||
objects using SLAB_TYPESAFE_BY_RCU allocations.
 | 
			
		||||
 | 
			
		||||
Please read the basics in Documentation/RCU/listRCU.txt
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -7,7 +7,7 @@ Using special makers (called 'nulls') is a convenient way
 | 
			
		|||
to solve following problem :
 | 
			
		||||
 | 
			
		||||
A typical RCU linked list managing objects which are
 | 
			
		||||
allocated with SLAB_DESTROY_BY_RCU kmem_cache can
 | 
			
		||||
allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can
 | 
			
		||||
use following algos :
 | 
			
		||||
 | 
			
		||||
1) Lookup algo
 | 
			
		||||
| 
						 | 
				
			
			@ -96,7 +96,7 @@ unlock_chain(); // typically a spin_unlock()
 | 
			
		|||
3) Remove algo
 | 
			
		||||
--------------
 | 
			
		||||
Nothing special here, we can use a standard RCU hlist deletion.
 | 
			
		||||
But thanks to SLAB_DESTROY_BY_RCU, beware a deleted object can be reused
 | 
			
		||||
But thanks to SLAB_TYPESAFE_BY_RCU, beware a deleted object can be reused
 | 
			
		||||
very very fast (before the end of RCU grace period)
 | 
			
		||||
 | 
			
		||||
if (put_last_reference_on(obj) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -925,7 +925,8 @@ d.	Do you need RCU grace periods to complete even in the face
 | 
			
		|||
 | 
			
		||||
e.	Is your workload too update-intensive for normal use of
 | 
			
		||||
	RCU, but inappropriate for other synchronization mechanisms?
 | 
			
		||||
	If so, consider SLAB_DESTROY_BY_RCU.  But please be careful!
 | 
			
		||||
	If so, consider SLAB_TYPESAFE_BY_RCU (which was originally
 | 
			
		||||
	named SLAB_DESTROY_BY_RCU).  But please be careful!
 | 
			
		||||
 | 
			
		||||
f.	Do you need read-side critical sections that are respected
 | 
			
		||||
	even though they are in the middle of the idle loop, during
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4552,7 +4552,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
 | 
			
		|||
	dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
 | 
			
		||||
					SLAB_HWCACHE_ALIGN |
 | 
			
		||||
					SLAB_RECLAIM_ACCOUNT |
 | 
			
		||||
					SLAB_DESTROY_BY_RCU);
 | 
			
		||||
					SLAB_TYPESAFE_BY_RCU);
 | 
			
		||||
	if (!dev_priv->requests)
 | 
			
		||||
		goto err_vmas;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -493,7 +493,7 @@ static inline struct drm_i915_gem_request *
 | 
			
		|||
__i915_gem_active_get_rcu(const struct i915_gem_active *active)
 | 
			
		||||
{
 | 
			
		||||
	/* Performing a lockless retrieval of the active request is super
 | 
			
		||||
	 * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
 | 
			
		||||
	 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
 | 
			
		||||
	 * slab of request objects will not be freed whilst we hold the
 | 
			
		||||
	 * RCU read lock. It does not guarantee that the request itself
 | 
			
		||||
	 * will not be freed and then *reused*. Viz,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1071,7 +1071,7 @@ int ldlm_init(void)
 | 
			
		|||
	ldlm_lock_slab = kmem_cache_create("ldlm_locks",
 | 
			
		||||
					   sizeof(struct ldlm_lock), 0,
 | 
			
		||||
					   SLAB_HWCACHE_ALIGN |
 | 
			
		||||
					   SLAB_DESTROY_BY_RCU, NULL);
 | 
			
		||||
					   SLAB_TYPESAFE_BY_RCU, NULL);
 | 
			
		||||
	if (!ldlm_lock_slab) {
 | 
			
		||||
		kmem_cache_destroy(ldlm_resource_slab);
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2340,7 +2340,7 @@ static int jbd2_journal_init_journal_head_cache(void)
 | 
			
		|||
	jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
 | 
			
		||||
				sizeof(struct journal_head),
 | 
			
		||||
				0,		/* offset */
 | 
			
		||||
				SLAB_TEMPORARY | SLAB_DESTROY_BY_RCU,
 | 
			
		||||
				SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU,
 | 
			
		||||
				NULL);		/* ctor */
 | 
			
		||||
	retval = 0;
 | 
			
		||||
	if (!jbd2_journal_head_cache) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -38,7 +38,7 @@ void signalfd_cleanup(struct sighand_struct *sighand)
 | 
			
		|||
	/*
 | 
			
		||||
	 * The lockless check can race with remove_wait_queue() in progress,
 | 
			
		||||
	 * but in this case its caller should run under rcu_read_lock() and
 | 
			
		||||
	 * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
 | 
			
		||||
	 * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
 | 
			
		||||
	 */
 | 
			
		||||
	if (likely(!waitqueue_active(wqh)))
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -229,7 +229,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
 | 
			
		|||
 *
 | 
			
		||||
 * Function returns NULL if no refcount could be obtained, or the fence.
 | 
			
		||||
 * This function handles acquiring a reference to a fence that may be
 | 
			
		||||
 * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
 | 
			
		||||
 * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU),
 | 
			
		||||
 * so long as the caller is using RCU on the pointer to the fence.
 | 
			
		||||
 *
 | 
			
		||||
 * An alternative mechanism is to employ a seqlock to protect a bunch of
 | 
			
		||||
| 
						 | 
				
			
			@ -257,7 +257,7 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
 | 
			
		|||
		 * have successfully acquire a reference to it. If it no
 | 
			
		||||
		 * longer matches, we are holding a reference to some other
 | 
			
		||||
		 * reallocated pointer. This is possible if the allocator
 | 
			
		||||
		 * is using a freelist like SLAB_DESTROY_BY_RCU where the
 | 
			
		||||
		 * is using a freelist like SLAB_TYPESAFE_BY_RCU where the
 | 
			
		||||
		 * fence remains valid for the RCU grace period, but it
 | 
			
		||||
		 * may be reallocated. When using such allocators, we are
 | 
			
		||||
		 * responsible for ensuring the reference we get is to
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -28,7 +28,7 @@
 | 
			
		|||
#define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
 | 
			
		||||
#define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
 | 
			
		||||
/*
 | 
			
		||||
 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
 | 
			
		||||
 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
 | 
			
		||||
 *
 | 
			
		||||
 * This delays freeing the SLAB page by a grace period, it does _NOT_
 | 
			
		||||
 * delay object freeing. This means that if you do kmem_cache_free()
 | 
			
		||||
| 
						 | 
				
			
			@ -61,8 +61,10 @@
 | 
			
		|||
 *
 | 
			
		||||
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 | 
			
		||||
 * taking the spinlock within the structure expected at that address.
 | 
			
		||||
 *
 | 
			
		||||
 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
 | 
			
		||||
 */
 | 
			
		||||
#define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
 | 
			
		||||
#define SLAB_TYPESAFE_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
 | 
			
		||||
#define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
 | 
			
		||||
#define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -993,7 +993,7 @@ struct smc_hashinfo;
 | 
			
		|||
struct module;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
 | 
			
		||||
 * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
 | 
			
		||||
 * un-modified. Special care is taken when initializing object to zero.
 | 
			
		||||
 */
 | 
			
		||||
static inline void sk_prot_clear_nulls(struct sock *sk, int size)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1313,7 +1313,7 @@ void __cleanup_sighand(struct sighand_struct *sighand)
 | 
			
		|||
	if (atomic_dec_and_test(&sighand->count)) {
 | 
			
		||||
		signalfd_cleanup(sighand);
 | 
			
		||||
		/*
 | 
			
		||||
		 * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
 | 
			
		||||
		 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
 | 
			
		||||
		 * without an RCU grace period, see __lock_task_sighand().
 | 
			
		||||
		 */
 | 
			
		||||
		kmem_cache_free(sighand_cachep, sighand);
 | 
			
		||||
| 
						 | 
				
			
			@ -2144,7 +2144,7 @@ void __init proc_caches_init(void)
 | 
			
		|||
{
 | 
			
		||||
	sighand_cachep = kmem_cache_create("sighand_cache",
 | 
			
		||||
			sizeof(struct sighand_struct), 0,
 | 
			
		||||
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
 | 
			
		||||
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
 | 
			
		||||
			SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
 | 
			
		||||
	signal_cachep = kmem_cache_create("signal_cache",
 | 
			
		||||
			sizeof(struct signal_struct), 0,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1237,7 +1237,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
 | 
			
		|||
		}
 | 
			
		||||
		/*
 | 
			
		||||
		 * This sighand can be already freed and even reused, but
 | 
			
		||||
		 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
 | 
			
		||||
		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
 | 
			
		||||
		 * initializes ->siglock: this slab can't go away, it has
 | 
			
		||||
		 * the same object type, ->siglock can't be reinitialized.
 | 
			
		||||
		 *
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -413,7 +413,7 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
 | 
			
		|||
	*size += sizeof(struct kasan_alloc_meta);
 | 
			
		||||
 | 
			
		||||
	/* Add free meta. */
 | 
			
		||||
	if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
 | 
			
		||||
	if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
 | 
			
		||||
	    cache->object_size < sizeof(struct kasan_free_meta)) {
 | 
			
		||||
		cache->kasan_info.free_meta_offset = *size;
 | 
			
		||||
		*size += sizeof(struct kasan_free_meta);
 | 
			
		||||
| 
						 | 
				
			
			@ -561,7 +561,7 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
 | 
			
		|||
	unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
 | 
			
		||||
 | 
			
		||||
	/* RCU slabs could be legally used after free within the RCU period */
 | 
			
		||||
	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
 | 
			
		||||
	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
 | 
			
		||||
| 
						 | 
				
			
			@ -572,7 +572,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
 | 
			
		|||
	s8 shadow_byte;
 | 
			
		||||
 | 
			
		||||
	/* RCU slabs could be legally used after free within the RCU period */
 | 
			
		||||
	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
 | 
			
		||||
	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -95,7 +95,7 @@ void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
 | 
			
		|||
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
 | 
			
		||||
{
 | 
			
		||||
	/* TODO: RCU freeing is unsupported for now; hide false positives. */
 | 
			
		||||
	if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
 | 
			
		||||
	if (!s->ctor && !(s->flags & SLAB_TYPESAFE_BY_RCU))
 | 
			
		||||
		kmemcheck_mark_freed(object, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -430,7 +430,7 @@ static void anon_vma_ctor(void *data)
 | 
			
		|||
void __init anon_vma_init(void)
 | 
			
		||||
{
 | 
			
		||||
	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
 | 
			
		||||
			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
 | 
			
		||||
			0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
 | 
			
		||||
			anon_vma_ctor);
 | 
			
		||||
	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
 | 
			
		||||
			SLAB_PANIC|SLAB_ACCOUNT);
 | 
			
		||||
| 
						 | 
				
			
			@ -481,7 +481,7 @@ struct anon_vma *page_get_anon_vma(struct page *page)
 | 
			
		|||
	 * If this page is still mapped, then its anon_vma cannot have been
 | 
			
		||||
	 * freed.  But if it has been unmapped, we have no security against the
 | 
			
		||||
	 * anon_vma structure being freed and reused (for another anon_vma:
 | 
			
		||||
	 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
 | 
			
		||||
	 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
 | 
			
		||||
	 * above cannot corrupt).
 | 
			
		||||
	 */
 | 
			
		||||
	if (!page_mapped(page)) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1728,7 +1728,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page)
 | 
			
		|||
 | 
			
		||||
	freelist = page->freelist;
 | 
			
		||||
	slab_destroy_debugcheck(cachep, page);
 | 
			
		||||
	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
 | 
			
		||||
	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
 | 
			
		||||
		call_rcu(&page->rcu_head, kmem_rcu_free);
 | 
			
		||||
	else
 | 
			
		||||
		kmem_freepages(cachep, page);
 | 
			
		||||
| 
						 | 
				
			
			@ -1924,7 +1924,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
 | 
			
		|||
 | 
			
		||||
	cachep->num = 0;
 | 
			
		||||
 | 
			
		||||
	if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU)
 | 
			
		||||
	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	left = calculate_slab_order(cachep, size,
 | 
			
		||||
| 
						 | 
				
			
			@ -2030,7 +2030,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
 | 
			
		|||
	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
 | 
			
		||||
						2 * sizeof(unsigned long long)))
 | 
			
		||||
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
 | 
			
		||||
	if (!(flags & SLAB_DESTROY_BY_RCU))
 | 
			
		||||
	if (!(flags & SLAB_TYPESAFE_BY_RCU))
 | 
			
		||||
		flags |= SLAB_POISON;
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -126,7 +126,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
 | 
			
		|||
 | 
			
		||||
/* Legal flag mask for kmem_cache_create(), for various configurations */
 | 
			
		||||
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
 | 
			
		||||
			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
 | 
			
		||||
			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_DEBUG_SLAB)
 | 
			
		||||
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 | 
			
		||||
| 
						 | 
				
			
			@ -415,7 +415,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
 | 
			
		|||
	 * back there or track user information then we can
 | 
			
		||||
	 * only use the space before that information.
 | 
			
		||||
	 */
 | 
			
		||||
	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
 | 
			
		||||
	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
 | 
			
		||||
		return s->inuse;
 | 
			
		||||
	/*
 | 
			
		||||
	 * Else we can use all the padding etc for the allocation
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -39,7 +39,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
 | 
			
		|||
 * Set of flags that will prevent slab merging
 | 
			
		||||
 */
 | 
			
		||||
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
 | 
			
		||||
		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
 | 
			
		||||
		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
 | 
			
		||||
		SLAB_FAILSLAB | SLAB_KASAN)
 | 
			
		||||
 | 
			
		||||
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
 | 
			
		||||
| 
						 | 
				
			
			@ -500,7 +500,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
 | 
			
		|||
	struct kmem_cache *s, *s2;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * On destruction, SLAB_DESTROY_BY_RCU kmem_caches are put on the
 | 
			
		||||
	 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
 | 
			
		||||
	 * @slab_caches_to_rcu_destroy list.  The slab pages are freed
 | 
			
		||||
	 * through RCU and and the associated kmem_cache are dereferenced
 | 
			
		||||
	 * while freeing the pages, so the kmem_caches should be freed only
 | 
			
		||||
| 
						 | 
				
			
			@ -537,7 +537,7 @@ static int shutdown_cache(struct kmem_cache *s)
 | 
			
		|||
	memcg_unlink_cache(s);
 | 
			
		||||
	list_del(&s->list);
 | 
			
		||||
 | 
			
		||||
	if (s->flags & SLAB_DESTROY_BY_RCU) {
 | 
			
		||||
	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
 | 
			
		||||
		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
 | 
			
		||||
		schedule_work(&slab_caches_to_rcu_destroy_work);
 | 
			
		||||
	} else {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -126,7 +126,7 @@ static inline void clear_slob_page_free(struct page *sp)
 | 
			
		|||
 | 
			
		||||
/*
 | 
			
		||||
 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
 | 
			
		||||
 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
 | 
			
		||||
 * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
 | 
			
		||||
 * the block using call_rcu.
 | 
			
		||||
 */
 | 
			
		||||
struct slob_rcu {
 | 
			
		||||
| 
						 | 
				
			
			@ -524,7 +524,7 @@ EXPORT_SYMBOL(ksize);
 | 
			
		|||
 | 
			
		||||
int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
 | 
			
		||||
{
 | 
			
		||||
	if (flags & SLAB_DESTROY_BY_RCU) {
 | 
			
		||||
	if (flags & SLAB_TYPESAFE_BY_RCU) {
 | 
			
		||||
		/* leave room for rcu footer at the end of object */
 | 
			
		||||
		c->size += sizeof(struct slob_rcu);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -598,7 +598,7 @@ static void kmem_rcu_free(struct rcu_head *head)
 | 
			
		|||
void kmem_cache_free(struct kmem_cache *c, void *b)
 | 
			
		||||
{
 | 
			
		||||
	kmemleak_free_recursive(b, c->flags);
 | 
			
		||||
	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
 | 
			
		||||
	if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
 | 
			
		||||
		struct slob_rcu *slob_rcu;
 | 
			
		||||
		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
 | 
			
		||||
		slob_rcu->size = c->size;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										12
									
								
								mm/slub.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								mm/slub.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1687,7 +1687,7 @@ static void rcu_free_slab(struct rcu_head *h)
 | 
			
		|||
 | 
			
		||||
static void free_slab(struct kmem_cache *s, struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
 | 
			
		||||
	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
 | 
			
		||||
		struct rcu_head *head;
 | 
			
		||||
 | 
			
		||||
		if (need_reserve_slab_rcu) {
 | 
			
		||||
| 
						 | 
				
			
			@ -2963,7 +2963,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
 | 
			
		|||
	 * slab_free_freelist_hook() could have put the items into quarantine.
 | 
			
		||||
	 * If so, no need to free them.
 | 
			
		||||
	 */
 | 
			
		||||
	if (s->flags & SLAB_KASAN && !(s->flags & SLAB_DESTROY_BY_RCU))
 | 
			
		||||
	if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
 | 
			
		||||
		return;
 | 
			
		||||
	do_slab_free(s, page, head, tail, cnt, addr);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -3433,7 +3433,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 | 
			
		|||
	 * the slab may touch the object after free or before allocation
 | 
			
		||||
	 * then we should never poison the object itself.
 | 
			
		||||
	 */
 | 
			
		||||
	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
 | 
			
		||||
	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
 | 
			
		||||
			!s->ctor)
 | 
			
		||||
		s->flags |= __OBJECT_POISON;
 | 
			
		||||
	else
 | 
			
		||||
| 
						 | 
				
			
			@ -3455,7 +3455,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 | 
			
		|||
	 */
 | 
			
		||||
	s->inuse = size;
 | 
			
		||||
 | 
			
		||||
	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
 | 
			
		||||
	if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
 | 
			
		||||
		s->ctor)) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * Relocate free pointer after the object if it is not
 | 
			
		||||
| 
						 | 
				
			
			@ -3537,7 +3537,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
 | 
			
		|||
	s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
 | 
			
		||||
	s->reserved = 0;
 | 
			
		||||
 | 
			
		||||
	if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
 | 
			
		||||
	if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
 | 
			
		||||
		s->reserved = sizeof(struct rcu_head);
 | 
			
		||||
 | 
			
		||||
	if (!calculate_sizes(s, -1))
 | 
			
		||||
| 
						 | 
				
			
			@ -5042,7 +5042,7 @@ SLAB_ATTR_RO(cache_dma);
 | 
			
		|||
 | 
			
		||||
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
 | 
			
		||||
{
 | 
			
		||||
	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
 | 
			
		||||
	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
 | 
			
		||||
}
 | 
			
		||||
SLAB_ATTR_RO(destroy_by_rcu);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -950,7 +950,7 @@ static struct proto dccp_v4_prot = {
 | 
			
		|||
	.orphan_count		= &dccp_orphan_count,
 | 
			
		||||
	.max_header		= MAX_DCCP_HEADER,
 | 
			
		||||
	.obj_size		= sizeof(struct dccp_sock),
 | 
			
		||||
	.slab_flags		= SLAB_DESTROY_BY_RCU,
 | 
			
		||||
	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
 | 
			
		||||
	.rsk_prot		= &dccp_request_sock_ops,
 | 
			
		||||
	.twsk_prot		= &dccp_timewait_sock_ops,
 | 
			
		||||
	.h.hashinfo		= &dccp_hashinfo,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1012,7 +1012,7 @@ static struct proto dccp_v6_prot = {
 | 
			
		|||
	.orphan_count	   = &dccp_orphan_count,
 | 
			
		||||
	.max_header	   = MAX_DCCP_HEADER,
 | 
			
		||||
	.obj_size	   = sizeof(struct dccp6_sock),
 | 
			
		||||
	.slab_flags	   = SLAB_DESTROY_BY_RCU,
 | 
			
		||||
	.slab_flags	   = SLAB_TYPESAFE_BY_RCU,
 | 
			
		||||
	.rsk_prot	   = &dccp6_request_sock_ops,
 | 
			
		||||
	.twsk_prot	   = &dccp6_timewait_sock_ops,
 | 
			
		||||
	.h.hashinfo	   = &dccp_hashinfo,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2398,7 +2398,7 @@ struct proto tcp_prot = {
 | 
			
		|||
	.sysctl_rmem		= sysctl_tcp_rmem,
 | 
			
		||||
	.max_header		= MAX_TCP_HEADER,
 | 
			
		||||
	.obj_size		= sizeof(struct tcp_sock),
 | 
			
		||||
	.slab_flags		= SLAB_DESTROY_BY_RCU,
 | 
			
		||||
	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
 | 
			
		||||
	.twsk_prot		= &tcp_timewait_sock_ops,
 | 
			
		||||
	.rsk_prot		= &tcp_request_sock_ops,
 | 
			
		||||
	.h.hashinfo		= &tcp_hashinfo,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1919,7 +1919,7 @@ struct proto tcpv6_prot = {
 | 
			
		|||
	.sysctl_rmem		= sysctl_tcp_rmem,
 | 
			
		||||
	.max_header		= MAX_TCP_HEADER,
 | 
			
		||||
	.obj_size		= sizeof(struct tcp6_sock),
 | 
			
		||||
	.slab_flags		= SLAB_DESTROY_BY_RCU,
 | 
			
		||||
	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
 | 
			
		||||
	.twsk_prot		= &tcp6_timewait_sock_ops,
 | 
			
		||||
	.rsk_prot		= &tcp6_request_sock_ops,
 | 
			
		||||
	.h.hashinfo		= &tcp_hashinfo,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -142,7 +142,7 @@ static struct proto llc_proto = {
 | 
			
		|||
	.name	  = "LLC",
 | 
			
		||||
	.owner	  = THIS_MODULE,
 | 
			
		||||
	.obj_size = sizeof(struct llc_sock),
 | 
			
		||||
	.slab_flags = SLAB_DESTROY_BY_RCU,
 | 
			
		||||
	.slab_flags = SLAB_TYPESAFE_BY_RCU,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -506,7 +506,7 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
 | 
			
		|||
again:
 | 
			
		||||
	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
 | 
			
		||||
		if (llc_estab_match(sap, daddr, laddr, rc)) {
 | 
			
		||||
			/* Extra checks required by SLAB_DESTROY_BY_RCU */
 | 
			
		||||
			/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
 | 
			
		||||
			if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
 | 
			
		||||
				goto again;
 | 
			
		||||
			if (unlikely(llc_sk(rc)->sap != sap ||
 | 
			
		||||
| 
						 | 
				
			
			@ -565,7 +565,7 @@ static struct sock *__llc_lookup_listener(struct llc_sap *sap,
 | 
			
		|||
again:
 | 
			
		||||
	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
 | 
			
		||||
		if (llc_listener_match(sap, laddr, rc)) {
 | 
			
		||||
			/* Extra checks required by SLAB_DESTROY_BY_RCU */
 | 
			
		||||
			/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
 | 
			
		||||
			if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
 | 
			
		||||
				goto again;
 | 
			
		||||
			if (unlikely(llc_sk(rc)->sap != sap ||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -328,7 +328,7 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
 | 
			
		|||
again:
 | 
			
		||||
	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
 | 
			
		||||
		if (llc_dgram_match(sap, laddr, rc)) {
 | 
			
		||||
			/* Extra checks required by SLAB_DESTROY_BY_RCU */
 | 
			
		||||
			/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
 | 
			
		||||
			if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
 | 
			
		||||
				goto again;
 | 
			
		||||
			if (unlikely(llc_sk(rc)->sap != sap ||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -914,7 +914,7 @@ static unsigned int early_drop_list(struct net *net,
 | 
			
		|||
			continue;
 | 
			
		||||
 | 
			
		||||
		/* kill only if still in same netns -- might have moved due to
 | 
			
		||||
		 * SLAB_DESTROY_BY_RCU rules.
 | 
			
		||||
		 * SLAB_TYPESAFE_BY_RCU rules.
 | 
			
		||||
		 *
 | 
			
		||||
		 * We steal the timer reference.  If that fails timer has
 | 
			
		||||
		 * already fired or someone else deleted it. Just drop ref
 | 
			
		||||
| 
						 | 
				
			
			@ -1069,7 +1069,7 @@ __nf_conntrack_alloc(struct net *net,
 | 
			
		|||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Do not use kmem_cache_zalloc(), as this cache uses
 | 
			
		||||
	 * SLAB_DESTROY_BY_RCU.
 | 
			
		||||
	 * SLAB_TYPESAFE_BY_RCU.
 | 
			
		||||
	 */
 | 
			
		||||
	ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
 | 
			
		||||
	if (ct == NULL)
 | 
			
		||||
| 
						 | 
				
			
			@ -1114,7 +1114,7 @@ void nf_conntrack_free(struct nf_conn *ct)
 | 
			
		|||
	struct net *net = nf_ct_net(ct);
 | 
			
		||||
 | 
			
		||||
	/* A freed object has refcnt == 0, that's
 | 
			
		||||
	 * the golden rule for SLAB_DESTROY_BY_RCU
 | 
			
		||||
	 * the golden rule for SLAB_TYPESAFE_BY_RCU
 | 
			
		||||
	 */
 | 
			
		||||
	NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1878,7 +1878,7 @@ int nf_conntrack_init_start(void)
 | 
			
		|||
	nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
 | 
			
		||||
						sizeof(struct nf_conn),
 | 
			
		||||
						NFCT_INFOMASK + 1,
 | 
			
		||||
						SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
 | 
			
		||||
						SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
 | 
			
		||||
	if (!nf_conntrack_cachep)
 | 
			
		||||
		goto err_cachep;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -101,7 +101,7 @@ struct proto smc_proto = {
 | 
			
		|||
	.unhash		= smc_unhash_sk,
 | 
			
		||||
	.obj_size	= sizeof(struct smc_sock),
 | 
			
		||||
	.h.smc_hash	= &smc_v4_hashinfo,
 | 
			
		||||
	.slab_flags	= SLAB_DESTROY_BY_RCU,
 | 
			
		||||
	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
 | 
			
		||||
};
 | 
			
		||||
EXPORT_SYMBOL_GPL(smc_proto);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue