mirror of
https://github.com/torvalds/linux.git
synced 2025-12-01 07:26:02 +07:00
mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask and
always allocate memory using the hardcoded GFP_KERNEL flag. This makes
them inconsistent with vmalloc(), which was recently extended to support
GFP_NOFS and GFP_NOIO allocations.
Page table allocations performed during shadow population also ignore the
external gfp_mask. To preserve the intended semantics of GFP_NOFS and
GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
memalloc scope.
xfs calls vmalloc with GFP_NOFS, so this bug could lead to deadlock.
There was a report here
https://lkml.kernel.org/r/686ea951.050a0220.385921.0016.GAE@google.com
This patch:
- Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
- Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
- Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
around apply_to_page_range();
- Updates vmalloc.c and percpu allocator call sites accordingly.
Link: https://lkml.kernel.org/r/20250831121058.92971-1-urezki@gmail.com
Fixes: 451769ebb7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reported-by: syzbot+3470c9ffee63e4abafeb@syzkaller.appspotmail.com
Reviewed-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
04100f775c
commit
79357cd06d
@@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
|
||||
if (unlikely(!vmap_initialized))
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
/* Only reclaim behaviour flags are relevant. */
|
||||
gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
|
||||
might_sleep();
|
||||
|
||||
/*
|
||||
@@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
|
||||
*/
|
||||
va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
|
||||
if (!va) {
|
||||
gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
|
||||
|
||||
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
|
||||
if (unlikely(!va))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@@ -2089,7 +2089,7 @@ retry:
|
||||
BUG_ON(va->va_start < vstart);
|
||||
BUG_ON(va->va_end > vend);
|
||||
|
||||
ret = kasan_populate_vmalloc(addr, size);
|
||||
ret = kasan_populate_vmalloc(addr, size, gfp_mask);
|
||||
if (ret) {
|
||||
free_vmap_area(va);
|
||||
return ERR_PTR(ret);
|
||||
@@ -4826,7 +4826,7 @@ retry:
|
||||
|
||||
/* populate the kasan shadow space */
|
||||
for (area = 0; area < nr_vms; area++) {
|
||||
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
|
||||
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
|
||||
goto err_free_shadow;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user