mirror of
https://github.com/torvalds/linux.git
synced 2025-12-01 07:26:02 +07:00
mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask and
always allocate memory using the hardcoded GFP_KERNEL flag. This makes
them inconsistent with vmalloc(), which was recently extended to support
GFP_NOFS and GFP_NOIO allocations.
Page table allocations performed during shadow population also ignore the
external gfp_mask. To preserve the intended semantics of GFP_NOFS and
GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
memalloc scope.
xfs calls vmalloc with GFP_NOFS, so this bug could lead to deadlock.
There was a report here
https://lkml.kernel.org/r/686ea951.050a0220.385921.0016.GAE@google.com
This patch:
- Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
- Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
- Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
around apply_to_page_range();
- Updates vmalloc.c and percpu allocator call sites accordingly.
Link: https://lkml.kernel.org/r/20250831121058.92971-1-urezki@gmail.com
Fixes: 451769ebb7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reported-by: syzbot+3470c9ffee63e4abafeb@syzkaller.appspotmail.com
Reviewed-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
04100f775c
commit
79357cd06d
@@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(void) { }
|
|||||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||||
|
|
||||||
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
|
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
|
||||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
|
int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
|
||||||
void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
||||||
unsigned long free_region_start,
|
unsigned long free_region_start,
|
||||||
unsigned long free_region_end,
|
unsigned long free_region_end,
|
||||||
@@ -574,7 +574,7 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
|
|||||||
unsigned long size)
|
unsigned long size)
|
||||||
{ }
|
{ }
|
||||||
static inline int kasan_populate_vmalloc(unsigned long start,
|
static inline int kasan_populate_vmalloc(unsigned long start,
|
||||||
unsigned long size)
|
unsigned long size, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -610,7 +610,7 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
|
|||||||
static inline void kasan_populate_early_vm_area_shadow(void *start,
|
static inline void kasan_populate_early_vm_area_shadow(void *start,
|
||||||
unsigned long size) { }
|
unsigned long size) { }
|
||||||
static inline int kasan_populate_vmalloc(unsigned long start,
|
static inline int kasan_populate_vmalloc(unsigned long start,
|
||||||
unsigned long size)
|
unsigned long size, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -336,13 +336,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
|
static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
unsigned long nr_populated, nr_total = nr_pages;
|
unsigned long nr_populated, nr_total = nr_pages;
|
||||||
struct page **page_array = pages;
|
struct page **page_array = pages;
|
||||||
|
|
||||||
while (nr_pages) {
|
while (nr_pages) {
|
||||||
nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
|
nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
|
||||||
if (!nr_populated) {
|
if (!nr_populated) {
|
||||||
___free_pages_bulk(page_array, nr_total - nr_pages);
|
___free_pages_bulk(page_array, nr_total - nr_pages);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -354,25 +354,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
|
static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
unsigned long nr_pages, nr_total = PFN_UP(end - start);
|
unsigned long nr_pages, nr_total = PFN_UP(end - start);
|
||||||
struct vmalloc_populate_data data;
|
struct vmalloc_populate_data data;
|
||||||
|
unsigned int flags;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
|
||||||
if (!data.pages)
|
if (!data.pages)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
while (nr_total) {
|
while (nr_total) {
|
||||||
nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
|
nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
|
||||||
ret = ___alloc_pages_bulk(data.pages, nr_pages);
|
ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
data.start = start;
|
data.start = start;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* page tables allocations ignore external gfp mask, enforce it
|
||||||
|
* by the scope API
|
||||||
|
*/
|
||||||
|
if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
|
||||||
|
flags = memalloc_nofs_save();
|
||||||
|
else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
|
||||||
|
flags = memalloc_noio_save();
|
||||||
|
|
||||||
ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
|
ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
|
||||||
kasan_populate_vmalloc_pte, &data);
|
kasan_populate_vmalloc_pte, &data);
|
||||||
|
|
||||||
|
if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
|
||||||
|
memalloc_nofs_restore(flags);
|
||||||
|
else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
|
||||||
|
memalloc_noio_restore(flags);
|
||||||
|
|
||||||
___free_pages_bulk(data.pages, nr_pages);
|
___free_pages_bulk(data.pages, nr_pages);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
@@ -386,7 +403,7 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
|
int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
unsigned long shadow_start, shadow_end;
|
unsigned long shadow_start, shadow_end;
|
||||||
int ret;
|
int ret;
|
||||||
@@ -415,7 +432,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
|
|||||||
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
|
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
|
||||||
shadow_end = PAGE_ALIGN(shadow_end);
|
shadow_end = PAGE_ALIGN(shadow_end);
|
||||||
|
|
||||||
ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
|
ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|||||||
@@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
|
|||||||
if (unlikely(!vmap_initialized))
|
if (unlikely(!vmap_initialized))
|
||||||
return ERR_PTR(-EBUSY);
|
return ERR_PTR(-EBUSY);
|
||||||
|
|
||||||
|
/* Only reclaim behaviour flags are relevant. */
|
||||||
|
gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
|
|||||||
*/
|
*/
|
||||||
va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
|
va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
|
||||||
if (!va) {
|
if (!va) {
|
||||||
gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
|
|
||||||
|
|
||||||
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
|
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
|
||||||
if (unlikely(!va))
|
if (unlikely(!va))
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
@@ -2089,7 +2089,7 @@ retry:
|
|||||||
BUG_ON(va->va_start < vstart);
|
BUG_ON(va->va_start < vstart);
|
||||||
BUG_ON(va->va_end > vend);
|
BUG_ON(va->va_end > vend);
|
||||||
|
|
||||||
ret = kasan_populate_vmalloc(addr, size);
|
ret = kasan_populate_vmalloc(addr, size, gfp_mask);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
free_vmap_area(va);
|
free_vmap_area(va);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
@@ -4826,7 +4826,7 @@ retry:
|
|||||||
|
|
||||||
/* populate the kasan shadow space */
|
/* populate the kasan shadow space */
|
||||||
for (area = 0; area < nr_vms; area++) {
|
for (area = 0; area < nr_vms; area++) {
|
||||||
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
|
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
|
||||||
goto err_free_shadow;
|
goto err_free_shadow;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user