Files
linux/mm/memfd.c
Deepanshu Kartikey de8798965f mm/memfd: fix information leak in hugetlb folios
When allocating hugetlb folios for memfd, three initialization steps are
missing:

1. Folios are not zeroed, leading to kernel memory disclosure to userspace
2. Folios are not marked uptodate before adding to page cache
3. hugetlb_fault_mutex is not taken before hugetlb_add_to_page_cache()

The memfd allocation path bypasses the normal page fault handler
(hugetlb_no_page) which would handle all of these initialization steps. 
This is problematic especially for udmabuf use cases where folios are
pinned and directly accessed by userspace via DMA.

Fix by matching the initialization pattern used in hugetlb_no_page():
- Zero the folio using folio_zero_user() which is optimized for huge pages
- Mark it uptodate with folio_mark_uptodate()
- Take hugetlb_fault_mutex before adding to page cache to prevent races

The folio_zero_user() change also fixes a potential security issue where
uninitialized kernel memory could be disclosed to userspace through read()
or mmap() operations on the memfd.

Link: https://lkml.kernel.org/r/20251112145034.2320452-1-kartikey406@gmail.com
Fixes: 89c1905d9c ("mm/gup: introduce memfd_pin_folios() for pinning memfd folios")
Signed-off-by: Deepanshu Kartikey <kartikey406@gmail.com>
Reported-by: syzbot+f64019ba229e3a5c411b@syzkaller.appspotmail.com
Link: https://lore.kernel.org/all/20251112031631.2315651-1-kartikey406@gmail.com/ [v1]
Closes: https://syzkaller.appspot.com/bug?extid=f64019ba229e3a5c411b
Suggested-by: Oscar Salvador <osalvador@suse.de>
Suggested-by: David Hildenbrand <david@redhat.com>
Tested-by: syzbot+f64019ba229e3a5c411b@syzkaller.appspotmail.com
Acked-by: Oscar Salvador <osalvador@suse.de>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Vivek Kasireddy <vivek.kasireddy@intel.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jason Gunthorpe <jgg@nvidia.com> (v2)
Cc: Christoph Hellwig <hch@lst.de> (v6)
Cc: Dave Airlie <airlied@redhat.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-11-24 14:25:17 -08:00

534 lines
13 KiB
C

/*
* memfd_create system call and file sealing support
*
* Code was originally included in shmem.c, and broken out to facilitate
* use by hugetlbfs as well as tmpfs.
*
* This file is released under the GPL.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/khugepaged.h>
#include <linux/syscalls.h>
#include <linux/hugetlb.h>
#include <linux/shmem_fs.h>
#include <linux/memfd.h>
#include <linux/pid_namespace.h>
#include <uapi/linux/memfd.h>
#include "swap.h"
/*
* We need a tag: a new tag would expand every xa_node by 8 bytes,
* so reuse a tag which we firmly believe is never set or cleared on tmpfs
* or hugetlbfs because they are memory only filesystems.
*/
#define MEMFD_TAG_PINNED PAGECACHE_TAG_TOWRITE
#define LAST_SCAN 4 /* about 150ms max */
static bool memfd_folio_has_extra_refs(struct folio *folio)
{
return folio_ref_count(folio) != folio_expected_ref_count(folio);
}
static void memfd_tag_pins(struct xa_state *xas)
{
struct folio *folio;
int latency = 0;
lru_add_drain();
xas_lock_irq(xas);
xas_for_each(xas, folio, ULONG_MAX) {
if (!xa_is_value(folio) && memfd_folio_has_extra_refs(folio))
xas_set_mark(xas, MEMFD_TAG_PINNED);
if (++latency < XA_CHECK_SCHED)
continue;
latency = 0;
xas_pause(xas);
xas_unlock_irq(xas);
cond_resched();
xas_lock_irq(xas);
}
xas_unlock_irq(xas);
}
/*
* This is a helper function used by memfd_pin_user_pages() in GUP (gup.c).
* It is mainly called to allocate a folio in a memfd when the caller
* (memfd_pin_folios()) cannot find a folio in the page cache at a given
* index in the mapping.
*/
struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
{
#ifdef CONFIG_HUGETLB_PAGE
struct folio *folio;
gfp_t gfp_mask;
if (is_file_hugepages(memfd)) {
/*
* The folio would most likely be accessed by a DMA driver,
* therefore, we have zone memory constraints where we can
* alloc from. Also, the folio will be pinned for an indefinite
* amount of time, so it is not expected to be migrated away.
*/
struct inode *inode = file_inode(memfd);
struct hstate *h = hstate_file(memfd);
int err = -ENOMEM;
long nr_resv;
gfp_mask = htlb_alloc_mask(h);
gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE);
idx >>= huge_page_order(h);
nr_resv = hugetlb_reserve_pages(inode, idx, idx + 1, NULL, 0);
if (nr_resv < 0)
return ERR_PTR(nr_resv);
folio = alloc_hugetlb_folio_reserve(h,
numa_node_id(),
NULL,
gfp_mask);
if (folio) {
u32 hash;
/*
* Zero the folio to prevent information leaks to userspace.
* Use folio_zero_user() which is optimized for huge/gigantic
* pages. Pass 0 as addr_hint since this is not a faulting path
* and we don't have a user virtual address yet.
*/
folio_zero_user(folio, 0);
/*
* Mark the folio uptodate before adding to page cache,
* as required by filemap.c and other hugetlb paths.
*/
__folio_mark_uptodate(folio);
/*
* Serialize hugepage allocation and instantiation to prevent
* races with concurrent allocations, as required by all other
* callers of hugetlb_add_to_page_cache().
*/
hash = hugetlb_fault_mutex_hash(memfd->f_mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
err = hugetlb_add_to_page_cache(folio,
memfd->f_mapping,
idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
if (err) {
folio_put(folio);
goto err_unresv;
}
hugetlb_set_folio_subpool(folio, subpool_inode(inode));
folio_unlock(folio);
return folio;
}
err_unresv:
if (nr_resv > 0)
hugetlb_unreserve_pages(inode, idx, idx + 1, 0);
return ERR_PTR(err);
}
#endif
return shmem_read_folio(memfd->f_mapping, idx);
}
/*
* Setting SEAL_WRITE requires us to verify there's no pending writer. However,
* via get_user_pages(), drivers might have some pending I/O without any active
* user-space mappings (eg., direct-IO, AIO). Therefore, we look at all folios
* and see whether it has an elevated ref-count. If so, we tag them and wait for
* them to be dropped.
* The caller must guarantee that no new user will acquire writable references
* to those folios to avoid races.
*/
static int memfd_wait_for_pins(struct address_space *mapping)
{
XA_STATE(xas, &mapping->i_pages, 0);
struct folio *folio;
int error, scan;
memfd_tag_pins(&xas);
error = 0;
for (scan = 0; scan <= LAST_SCAN; scan++) {
int latency = 0;
if (!xas_marked(&xas, MEMFD_TAG_PINNED))
break;
if (!scan)
lru_add_drain_all();
else if (schedule_timeout_killable((HZ << scan) / 200))
scan = LAST_SCAN;
xas_set(&xas, 0);
xas_lock_irq(&xas);
xas_for_each_marked(&xas, folio, ULONG_MAX, MEMFD_TAG_PINNED) {
bool clear = true;
if (!xa_is_value(folio) &&
memfd_folio_has_extra_refs(folio)) {
/*
* On the last scan, we clean up all those tags
* we inserted; but make a note that we still
* found folios pinned.
*/
if (scan == LAST_SCAN)
error = -EBUSY;
else
clear = false;
}
if (clear)
xas_clear_mark(&xas, MEMFD_TAG_PINNED);
if (++latency < XA_CHECK_SCHED)
continue;
latency = 0;
xas_pause(&xas);
xas_unlock_irq(&xas);
cond_resched();
xas_lock_irq(&xas);
}
xas_unlock_irq(&xas);
}
return error;
}
static unsigned int *memfd_file_seals_ptr(struct file *file)
{
if (shmem_file(file))
return &SHMEM_I(file_inode(file))->seals;
#ifdef CONFIG_HUGETLBFS
if (is_file_hugepages(file))
return &HUGETLBFS_I(file_inode(file))->seals;
#endif
return NULL;
}
#define F_ALL_SEALS (F_SEAL_SEAL | \
F_SEAL_EXEC | \
F_SEAL_SHRINK | \
F_SEAL_GROW | \
F_SEAL_WRITE | \
F_SEAL_FUTURE_WRITE)
static int memfd_add_seals(struct file *file, unsigned int seals)
{
struct inode *inode = file_inode(file);
unsigned int *file_seals;
int error;
/*
* SEALING
* Sealing allows multiple parties to share a tmpfs or hugetlbfs file
* but restrict access to a specific subset of file operations. Seals
* can only be added, but never removed. This way, mutually untrusted
* parties can share common memory regions with a well-defined policy.
* A malicious peer can thus never perform unwanted operations on a
* shared object.
*
* Seals are only supported on special tmpfs or hugetlbfs files and
* always affect the whole underlying inode. Once a seal is set, it
* may prevent some kinds of access to the file. Currently, the
* following seals are defined:
* SEAL_SEAL: Prevent further seals from being set on this file
* SEAL_SHRINK: Prevent the file from shrinking
* SEAL_GROW: Prevent the file from growing
* SEAL_WRITE: Prevent write access to the file
* SEAL_EXEC: Prevent modification of the exec bits in the file mode
*
* As we don't require any trust relationship between two parties, we
* must prevent seals from being removed. Therefore, sealing a file
* only adds a given set of seals to the file, it never touches
* existing seals. Furthermore, the "setting seals"-operation can be
* sealed itself, which basically prevents any further seal from being
* added.
*
* Semantics of sealing are only defined on volatile files. Only
* anonymous tmpfs and hugetlbfs files support sealing. More
* importantly, seals are never written to disk. Therefore, there's
* no plan to support it on other file types.
*/
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
if (seals & ~(unsigned int)F_ALL_SEALS)
return -EINVAL;
inode_lock(inode);
file_seals = memfd_file_seals_ptr(file);
if (!file_seals) {
error = -EINVAL;
goto unlock;
}
if (*file_seals & F_SEAL_SEAL) {
error = -EPERM;
goto unlock;
}
if ((seals & F_SEAL_WRITE) && !(*file_seals & F_SEAL_WRITE)) {
error = mapping_deny_writable(file->f_mapping);
if (error)
goto unlock;
error = memfd_wait_for_pins(file->f_mapping);
if (error) {
mapping_allow_writable(file->f_mapping);
goto unlock;
}
}
/*
* SEAL_EXEC implies SEAL_WRITE, making W^X from the start.
*/
if (seals & F_SEAL_EXEC && inode->i_mode & 0111)
seals |= F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_FUTURE_WRITE;
*file_seals |= seals;
error = 0;
unlock:
inode_unlock(inode);
return error;
}
static int memfd_get_seals(struct file *file)
{
unsigned int *seals = memfd_file_seals_ptr(file);
return seals ? *seals : -EINVAL;
}
long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
{
long error;
switch (cmd) {
case F_ADD_SEALS:
error = memfd_add_seals(file, arg);
break;
case F_GET_SEALS:
error = memfd_get_seals(file);
break;
default:
error = -EINVAL;
break;
}
return error;
}
#define MFD_NAME_PREFIX "memfd:"
#define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
#define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
#define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB | MFD_NOEXEC_SEAL | MFD_EXEC)
static int check_sysctl_memfd_noexec(unsigned int *flags)
{
#ifdef CONFIG_SYSCTL
struct pid_namespace *ns = task_active_pid_ns(current);
int sysctl = pidns_memfd_noexec_scope(ns);
if (!(*flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
if (sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL)
*flags |= MFD_NOEXEC_SEAL;
else
*flags |= MFD_EXEC;
}
if (!(*flags & MFD_NOEXEC_SEAL) && sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED) {
pr_err_ratelimited(
"%s[%d]: memfd_create() requires MFD_NOEXEC_SEAL with vm.memfd_noexec=%d\n",
current->comm, task_pid_nr(current), sysctl);
return -EACCES;
}
#endif
return 0;
}
static inline bool is_write_sealed(unsigned int seals)
{
return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE);
}
static int check_write_seal(vm_flags_t *vm_flags_ptr)
{
vm_flags_t vm_flags = *vm_flags_ptr;
vm_flags_t mask = vm_flags & (VM_SHARED | VM_WRITE);
/* If a private mapping then writability is irrelevant. */
if (!(mask & VM_SHARED))
return 0;
/*
* New PROT_WRITE and MAP_SHARED mmaps are not allowed when
* write seals are active.
*/
if (mask & VM_WRITE)
return -EPERM;
/*
* This is a read-only mapping, disallow mprotect() from making a
* write-sealed mapping writable in future.
*/
*vm_flags_ptr &= ~VM_MAYWRITE;
return 0;
}
int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr)
{
int err = 0;
unsigned int *seals_ptr = memfd_file_seals_ptr(file);
unsigned int seals = seals_ptr ? *seals_ptr : 0;
if (is_write_sealed(seals))
err = check_write_seal(vm_flags_ptr);
return err;
}
static int sanitize_flags(unsigned int *flags_ptr)
{
unsigned int flags = *flags_ptr;
if (!(flags & MFD_HUGETLB)) {
if (flags & ~MFD_ALL_FLAGS)
return -EINVAL;
} else {
/* Allow huge page size encoding in flags. */
if (flags & ~(MFD_ALL_FLAGS |
(MFD_HUGE_MASK << MFD_HUGE_SHIFT)))
return -EINVAL;
}
/* Invalid if both EXEC and NOEXEC_SEAL are set.*/
if ((flags & MFD_EXEC) && (flags & MFD_NOEXEC_SEAL))
return -EINVAL;
return check_sysctl_memfd_noexec(flags_ptr);
}
static char *alloc_name(const char __user *uname)
{
int error;
char *name;
long len;
name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
if (!name)
return ERR_PTR(-ENOMEM);
memcpy(name, MFD_NAME_PREFIX, MFD_NAME_PREFIX_LEN);
/* returned length does not include terminating zero */
len = strncpy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, MFD_NAME_MAX_LEN + 1);
if (len < 0) {
error = -EFAULT;
goto err_name;
} else if (len > MFD_NAME_MAX_LEN) {
error = -EINVAL;
goto err_name;
}
return name;
err_name:
kfree(name);
return ERR_PTR(error);
}
static struct file *alloc_file(const char *name, unsigned int flags)
{
unsigned int *file_seals;
struct file *file;
if (flags & MFD_HUGETLB) {
file = hugetlb_file_setup(name, 0, VM_NORESERVE,
HUGETLB_ANONHUGE_INODE,
(flags >> MFD_HUGE_SHIFT) &
MFD_HUGE_MASK);
} else {
file = shmem_file_setup(name, 0, VM_NORESERVE);
}
if (IS_ERR(file))
return file;
file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
file->f_flags |= O_LARGEFILE;
if (flags & MFD_NOEXEC_SEAL) {
struct inode *inode = file_inode(file);
inode->i_mode &= ~0111;
file_seals = memfd_file_seals_ptr(file);
if (file_seals) {
*file_seals &= ~F_SEAL_SEAL;
*file_seals |= F_SEAL_EXEC;
}
} else if (flags & MFD_ALLOW_SEALING) {
/* MFD_EXEC and MFD_ALLOW_SEALING are set */
file_seals = memfd_file_seals_ptr(file);
if (file_seals)
*file_seals &= ~F_SEAL_SEAL;
}
return file;
}
SYSCALL_DEFINE2(memfd_create,
const char __user *, uname,
unsigned int, flags)
{
struct file *file;
int fd, error;
char *name;
error = sanitize_flags(&flags);
if (error < 0)
return error;
name = alloc_name(uname);
if (IS_ERR(name))
return PTR_ERR(name);
fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
if (fd < 0) {
error = fd;
goto err_free_name;
}
file = alloc_file(name, flags);
if (IS_ERR(file)) {
error = PTR_ERR(file);
goto err_free_fd;
}
fd_install(fd, file);
kfree(name);
return fd;
err_free_fd:
put_unused_fd(fd);
err_free_name:
kfree(name);
return error;
}