kasan: refactor pcpu kasan vmalloc unpoison

A KASAN tag mismatch, possibly causing a kernel panic, can be observed
on systems with a tag-based KASAN enabled and with multiple NUMA nodes.
It was reported on arm64 and reproduced on x86. It can be explained in
the following points:

1. There can be more than one virtual memory chunk.
2. Chunk's base address has a tag.
3. The base address points at the first chunk and thus inherits
   the tag of the first chunk.
4. The subsequent chunks will be accessed with the tag from the
   first chunk.
5. Thus, the subsequent chunks need to have their tag set to
   match that of the first chunk.

Refactor code by reusing __kasan_unpoison_vmalloc in a new helper in
preparation for the actual fix.

Link: https://lkml.kernel.org/r/eb61d93b907e262eefcaa130261a08bcb6c5ce51.1764874575.git.m.wieczorretman@pm.me
Fixes: 1d96320f8d ("kasan, vmalloc: add vmalloc tagging for SW_TAGS")
Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Jiayuan Chen <jiayuan.chen@linux.dev>
Cc: Kees Cook <kees@kernel.org>
Cc: Marco Elver <elver@google.com>
Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: <stable@vger.kernel.org>	[6.1+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Maciej Wieczor-Retman
2025-12-04 19:00:04 +00:00
committed by Andrew Morton
parent 007f5da43b
commit 6f13db031e
3 changed files with 33 additions and 3 deletions

View File

@@ -631,6 +631,16 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
__kasan_poison_vmalloc(start, size);
}
void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
kasan_vmalloc_flags_t flags);
static __always_inline void
kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
kasan_vmalloc_flags_t flags)
{
if (kasan_enabled())
__kasan_unpoison_vmap_areas(vms, nr_vms, flags);
}
#else /* CONFIG_KASAN_VMALLOC */
static inline void kasan_populate_early_vm_area_shadow(void *start,
@@ -655,6 +665,11 @@ static inline void *kasan_unpoison_vmalloc(const void *start,
static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
{ }
static __always_inline void
kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
kasan_vmalloc_flags_t flags)
{ }
#endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \

View File

@@ -28,6 +28,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/vmalloc.h>
#include "kasan.h"
#include "../slab.h"
@@ -575,3 +576,19 @@ bool __kasan_check_byte(const void *address, unsigned long ip)
}
return true;
}
#ifdef CONFIG_KASAN_VMALLOC
void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
kasan_vmalloc_flags_t flags)
{
unsigned long size;
void *addr;
int area;
for (area = 0 ; area < nr_vms ; area++) {
size = vms[area]->size;
addr = vms[area]->addr;
vms[area]->addr = __kasan_unpoison_vmalloc(addr, size, flags);
}
}
#endif

View File

@@ -5027,9 +5027,7 @@ retry:
* With hardware tag-based KASAN, marking is skipped for
* non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
*/
for (area = 0; area < nr_vms; area++)
vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL);
kfree(vas);
return vms;