diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-26 01:51:24 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-26 01:51:24 +0100 |
commit | e06635e26cd8144eee17e9f256e8fde8aed3ba4f (patch) | |
tree | 24bd832e3098ebf70999a1af6c16087ce4c5995e /mm/slab_common.c | |
parent | Merge tag 'mm-nonmm-stable-2024-11-24-02-05' of git://git.kernel.org/pub/scm/... (diff) | |
parent | slab: Fix too strict alignment check in create_cache() (diff) | |
download | linux-e06635e26cd8144eee17e9f256e8fde8aed3ba4f.tar.xz linux-e06635e26cd8144eee17e9f256e8fde8aed3ba4f.zip |
Merge tag 'slab-for-6.13-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka:
- Add new slab_strict_numa boot parameter to enforce per-object memory
policies on top of slab folio policies, for systems where saving cost
of remote accesses is more important than minimizing slab allocation
overhead (Christoph Lameter)
- Fix for freeptr_offset alignment check being too strict for m68k
(Geert Uytterhoeven)
- krealloc() fixes for not violating __GFP_ZERO guarantees on
krealloc() when slub_debug (redzone and object tracking) is enabled
(Feng Tang)
- Fix a memory leak in case sysfs registration fails for a slab cache,
and also no longer fail to create the cache in that case (Hyeonggon
Yoo)
- Fix handling of detected consistency problems (due to buggy slab
user) with slub_debug enabled, so that it does not cause further list
corruption bugs (yuan.gao)
- Code cleanup and kerneldocs polishing (Zhen Lei, Vlastimil Babka)
* tag 'slab-for-6.13-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
slab: Fix too strict alignment check in create_cache()
mm/slab: Allow cache creation to proceed even if sysfs registration fails
mm/slub: Avoid list corruption when removing a slab from the full list
mm/slub, kunit: Add testcase for krealloc redzone and zeroing
mm/slub: Improve redzone check and zeroing for krealloc()
mm/slub: Consider kfence case for get_orig_size()
SLUB: Add support for per object memory policies
mm, slab: add kerneldocs for common SLAB_ flags
mm/slab: remove duplicate check in create_cache()
mm/slub: Move krealloc() and related code to slub.c
mm/kasan: Don't store metadata inside kmalloc object when slub_debug_orig_size is on
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 103 |
1 files changed, 14 insertions, 89 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index a7174455db9f..a29457bef626 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -222,15 +222,12 @@ static struct kmem_cache *create_cache(const char *name, struct kmem_cache *s; int err; - if (WARN_ON(args->useroffset + args->usersize > object_size)) - args->useroffset = args->usersize = 0; - /* If a custom freelist pointer is requested make sure it's sane. */ err = -EINVAL; if (args->use_freeptr_offset && (args->freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU) || - !IS_ALIGNED(args->freeptr_offset, sizeof(freeptr_t)))) + !IS_ALIGNED(args->freeptr_offset, __alignof__(freeptr_t)))) goto out; err = -ENOMEM; @@ -257,11 +254,23 @@ out: * @object_size: The size of objects to be created in this cache. * @args: Additional arguments for the cache creation (see * &struct kmem_cache_args). - * @flags: See %SLAB_* flags for an explanation of individual @flags. + * @flags: See the desriptions of individual flags. The common ones are listed + * in the description below. * * Not to be called directly, use the kmem_cache_create() wrapper with the same * parameters. * + * Commonly used @flags: + * + * &SLAB_ACCOUNT - Account allocations to memcg. + * + * &SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries. + * + * &SLAB_RECLAIM_ACCOUNT - Objects are reclaimable. + * + * &SLAB_TYPESAFE_BY_RCU - Slab page (not individual objects) freeing delayed + * by a grace period - see the full description before using. + * * Context: Cannot be called within a interrupt, but can be interrupted. * * Return: a pointer to the cache on success, NULL on failure. @@ -1199,90 +1208,6 @@ module_init(slab_proc_init); #endif /* CONFIG_SLUB_DEBUG */ -static __always_inline __realloc_size(2) void * -__do_krealloc(const void *p, size_t new_size, gfp_t flags) -{ - void *ret; - size_t ks; - - /* Check for double-free before calling ksize. */ - if (likely(!ZERO_OR_NULL_PTR(p))) { - if (!kasan_check_byte(p)) - return NULL; - ks = ksize(p); - } else - ks = 0; - - /* If the object still fits, repoison it precisely. */ - if (ks >= new_size) { - /* Zero out spare memory. */ - if (want_init_on_alloc(flags)) { - kasan_disable_current(); - memset(kasan_reset_tag(p) + new_size, 0, ks - new_size); - kasan_enable_current(); - } - - p = kasan_krealloc((void *)p, new_size, flags); - return (void *)p; - } - - ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_); - if (ret && p) { - /* Disable KASAN checks as the object's redzone is accessed. */ - kasan_disable_current(); - memcpy(ret, kasan_reset_tag(p), ks); - kasan_enable_current(); - } - - return ret; -} - -/** - * krealloc - reallocate memory. The contents will remain unchanged. - * @p: object to reallocate memory for. - * @new_size: how many bytes of memory are required. - * @flags: the type of memory to allocate. - * - * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size - * is 0 and @p is not a %NULL pointer, the object pointed to is freed. - * - * If __GFP_ZERO logic is requested, callers must ensure that, starting with the - * initial memory allocation, every subsequent call to this API for the same - * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that - * __GFP_ZERO is not fully honored by this API. - * - * This is the case, since krealloc() only knows about the bucket size of an - * allocation (but not the exact size it was allocated with) and hence - * implements the following semantics for shrinking and growing buffers with - * __GFP_ZERO. - * - * new bucket - * 0 size size - * |--------|----------------| - * | keep | zero | - * - * In any case, the contents of the object pointed to are preserved up to the - * lesser of the new and old sizes. - * - * Return: pointer to the allocated memory or %NULL in case of error - */ -void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags) -{ - void *ret; - - if (unlikely(!new_size)) { - kfree(p); - return ZERO_SIZE_PTR; - } - - ret = __do_krealloc(p, new_size, flags); - if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) - kfree(p); - - return ret; -} -EXPORT_SYMBOL(krealloc_noprof); - /** * kfree_sensitive - Clear sensitive information in memory before freeing * @p: object to free memory of |