summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGuo Weikang <guoweikang.kernel@gmail.com>2025-01-06 03:11:25 +0100
committerAndrew Morton <akpm@linux-foundation.org>2025-01-26 05:22:30 +0100
commitb2aad24b53333f1904a55d97e3fde2246ef05bb6 (patch)
tree36f84ba8c6325f8d16a83bcd97517d2aa2b0f774
parentmm/fake-numa: allow later numa node hotplug (diff)
downloadlinux-b2aad24b53333f1904a55d97e3fde2246ef05bb6.tar.xz
linux-b2aad24b53333f1904a55d97e3fde2246ef05bb6.zip
mm/memmap: prevent double scanning of memmap by kmemleak
kmemleak explicitly scans the mem_map through the valid struct page objects. However, memmap_alloc() was also adding this memory to the gray object list, causing it to be scanned twice. Remove memmap_alloc() from the scan list and add a comment to clarify the behavior. Link: https://lore.kernel.org/lkml/CAOm6qn=FVeTpH54wGDFMHuCOeYtvoTx30ktnv9-w3Nh8RMofEA@mail.gmail.com/ Link: https://lkml.kernel.org/r/20250106021126.1678334-1-guoweikang.kernel@gmail.com Signed-off-by: Guo Weikang <guoweikang.kernel@gmail.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Mike Rapoport (Microsoft) <rppt@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/memblock.h4
-rw-r--r--mm/mm_init.c8
-rw-r--r--mm/sparse-vmemmap.c5
3 files changed, 13 insertions, 4 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 673d5cae7c81..d48b56c1e558 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -378,6 +378,10 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
+/*
+ * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
+ * MEMBLOCK_ALLOC_ACCESSIBLE
+ */
#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
/* We are using top down, so it is safe to use 0 here */
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 24b68b425afb..2630cc30147e 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1585,13 +1585,17 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
{
void *ptr;
+ /*
+ * Kmemleak will explicitly scan mem_map by traversing all valid
+ * `struct *page`,so memblock does not need to be added to the scan list.
+ */
if (exact_nid)
ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
- MEMBLOCK_ALLOC_ACCESSIBLE,
+ MEMBLOCK_ALLOC_NOLEAKTRACE,
nid);
else
ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
- MEMBLOCK_ALLOC_ACCESSIBLE,
+ MEMBLOCK_ALLOC_NOLEAKTRACE,
nid);
if (ptr && size > 0)
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index cec67c5f37d8..3287ebadd167 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -31,6 +31,8 @@
#include <asm/dma.h>
#include <asm/pgalloc.h>
+#include "internal.h"
+
/*
* Allocate a block of memory to be used to back the virtual memory map
* or to back the page tables that are used to create the mapping.
@@ -42,8 +44,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
unsigned long align,
unsigned long goal)
{
- return memblock_alloc_try_nid_raw(size, align, goal,
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ return memmap_alloc(size, align, goal, node, false);
}
void * __meminit vmemmap_alloc_block(unsigned long size, int node)