mirror of
https://github.com/torvalds/linux.git
synced 2025-10-28 23:36:27 +02:00
slab fixes for 6.18-rc3
-----BEGIN PGP SIGNATURE----- iQFPBAABCAA5FiEEe7vIQRWZI0iWSE3xu+CwddJFiJoFAmj7Wb0bFIAAAAAABAAO bWFudTIsMi41KzEuMTEsMiwyAAoJELvgsHXSRYiaXgoH/RvZHUHzvACGosCOb2uc YVKIkxmPd3Feww+0oyQox+/BrBdri1eZ93Vfsl/udX7ZT5NBwC/pWT40SJUvyO+M fICYdoM/f+Qkknja7FSVuQXho252JPZYZ37erFeB3J+lardHyPOMMjWX2YnRQZvW EUwyKUH31DJnVa2IN9R1tT64ulY79KGmeMiSdQxaQc8SSM5idZ7/sQHjz4UcbOh1 kLsh3EQOgvVegzefLiTgKkcwT8Lry92X3bjeOrdvTqInDZj1aZWMDlQSFb31+XbS 1aeCrl3PpnD8EeEo7MIVmDzEAM4c2z4tYzpOVDa8rglkL7IRLwpK7t4DzK+zZhqq e0Q= =Y2Sq -----END PGP SIGNATURE----- Merge tag 'slab-for-6.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab Pull slab fixes from Vlastimil Babka: - Two fixes for race conditions in obj_exts allocation (Hao Ge) - Fix for slab accounting imbalance due to deferred slab decativation (Vlastimil Babka) * tag 'slab-for-6.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: slab: Fix obj_ext mistakenly considered NULL due to race condition slab: fix slab accounting imbalance due to defer_deactivate_slab() slab: Avoid race on slab->obj_exts in alloc_slab_obj_exts
This commit is contained in:
commit
66cd8e9cb8
1 changed files with 21 additions and 10 deletions
31
mm/slub.c
31
mm/slub.c
|
|
@ -2052,9 +2052,9 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void mark_failed_objexts_alloc(struct slab *slab)
|
||||
static inline bool mark_failed_objexts_alloc(struct slab *slab)
|
||||
{
|
||||
slab->obj_exts = OBJEXTS_ALLOC_FAIL;
|
||||
return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0;
|
||||
}
|
||||
|
||||
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
|
||||
|
|
@ -2076,7 +2076,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
|
|||
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
|
||||
|
||||
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
|
||||
static inline void mark_failed_objexts_alloc(struct slab *slab) {}
|
||||
static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
|
||||
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
|
||||
struct slabobj_ext *vec, unsigned int objects) {}
|
||||
|
||||
|
|
@ -2124,8 +2124,14 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
|||
slab_nid(slab));
|
||||
}
|
||||
if (!vec) {
|
||||
/* Mark vectors which failed to allocate */
|
||||
mark_failed_objexts_alloc(slab);
|
||||
/*
|
||||
* Try to mark vectors which failed to allocate.
|
||||
* If this operation fails, there may be a racing process
|
||||
* that has already completed the allocation.
|
||||
*/
|
||||
if (!mark_failed_objexts_alloc(slab) &&
|
||||
slab_obj_exts(slab))
|
||||
return 0;
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
@ -2136,6 +2142,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
|||
#ifdef CONFIG_MEMCG
|
||||
new_exts |= MEMCG_DATA_OBJEXTS;
|
||||
#endif
|
||||
retry:
|
||||
old_exts = READ_ONCE(slab->obj_exts);
|
||||
handle_failed_objexts_alloc(old_exts, vec, objects);
|
||||
if (new_slab) {
|
||||
|
|
@ -2145,8 +2152,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
|||
* be simply assigned.
|
||||
*/
|
||||
slab->obj_exts = new_exts;
|
||||
} else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
|
||||
cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
|
||||
} else if (old_exts & ~OBJEXTS_FLAGS_MASK) {
|
||||
/*
|
||||
* If the slab is already in use, somebody can allocate and
|
||||
* assign slabobj_exts in parallel. In this case the existing
|
||||
|
|
@ -2158,6 +2164,9 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
|||
else
|
||||
kfree(vec);
|
||||
return 0;
|
||||
} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
|
||||
/* Retry if a racing thread changed slab->obj_exts from under us. */
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (allow_spin)
|
||||
|
|
@ -3419,7 +3428,6 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
|
|||
|
||||
if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
|
||||
/* Unlucky, discard newly allocated slab */
|
||||
slab->frozen = 1;
|
||||
defer_deactivate_slab(slab, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -6468,9 +6476,12 @@ static void free_deferred_objects(struct irq_work *work)
|
|||
struct slab *slab = container_of(pos, struct slab, llnode);
|
||||
|
||||
#ifdef CONFIG_SLUB_TINY
|
||||
discard_slab(slab->slab_cache, slab);
|
||||
free_slab(slab->slab_cache, slab);
|
||||
#else
|
||||
deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
|
||||
if (slab->frozen)
|
||||
deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
|
||||
else
|
||||
free_slab(slab->slab_cache, slab);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in a new issue