mirror of
https://github.com/torvalds/linux.git
synced 2025-12-01 07:26:02 +07:00
93 lines
2.7 KiB
C
93 lines
2.7 KiB
C
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
||
|
|
|
||
|
|
/*
|
||
|
|
* Functions explicitly implemented for exec functionality which however are
|
||
|
|
* explicitly VMA-only logic.
|
||
|
|
*/
|
||
|
|
|
||
|
|
#include "vma_internal.h"
|
||
|
|
#include "vma.h"
|
||
|
|
|
||
|
|
/*
|
||
|
|
* Relocate a VMA downwards by shift bytes. There cannot be any VMAs between
|
||
|
|
* this VMA and its relocated range, which will now reside at [vma->vm_start -
|
||
|
|
* shift, vma->vm_end - shift).
|
||
|
|
*
|
||
|
|
* This function is almost certainly NOT what you want for anything other than
|
||
|
|
* early executable temporary stack relocation.
|
||
|
|
*/
|
||
|
|
int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
|
||
|
|
{
|
||
|
|
/*
|
||
|
|
* The process proceeds as follows:
|
||
|
|
*
|
||
|
|
* 1) Use shift to calculate the new vma endpoints.
|
||
|
|
* 2) Extend vma to cover both the old and new ranges. This ensures the
|
||
|
|
* arguments passed to subsequent functions are consistent.
|
||
|
|
* 3) Move vma's page tables to the new range.
|
||
|
|
* 4) Free up any cleared pgd range.
|
||
|
|
* 5) Shrink the vma to cover only the new range.
|
||
|
|
*/
|
||
|
|
|
||
|
|
struct mm_struct *mm = vma->vm_mm;
|
||
|
|
unsigned long old_start = vma->vm_start;
|
||
|
|
unsigned long old_end = vma->vm_end;
|
||
|
|
unsigned long length = old_end - old_start;
|
||
|
|
unsigned long new_start = old_start - shift;
|
||
|
|
unsigned long new_end = old_end - shift;
|
||
|
|
VMA_ITERATOR(vmi, mm, new_start);
|
||
|
|
VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff);
|
||
|
|
struct vm_area_struct *next;
|
||
|
|
struct mmu_gather tlb;
|
||
|
|
PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length);
|
||
|
|
|
||
|
|
BUG_ON(new_start > new_end);
|
||
|
|
|
||
|
|
/*
|
||
|
|
* ensure there are no vmas between where we want to go
|
||
|
|
* and where we are
|
||
|
|
*/
|
||
|
|
if (vma != vma_next(&vmi))
|
||
|
|
return -EFAULT;
|
||
|
|
|
||
|
|
vma_iter_prev_range(&vmi);
|
||
|
|
/*
|
||
|
|
* cover the whole range: [new_start, old_end)
|
||
|
|
*/
|
||
|
|
vmg.middle = vma;
|
||
|
|
if (vma_expand(&vmg))
|
||
|
|
return -ENOMEM;
|
||
|
|
|
||
|
|
/*
|
||
|
|
* move the page tables downwards, on failure we rely on
|
||
|
|
* process cleanup to remove whatever mess we made.
|
||
|
|
*/
|
||
|
|
pmc.for_stack = true;
|
||
|
|
if (length != move_page_tables(&pmc))
|
||
|
|
return -ENOMEM;
|
||
|
|
|
||
|
|
tlb_gather_mmu(&tlb, mm);
|
||
|
|
next = vma_next(&vmi);
|
||
|
|
if (new_end > old_start) {
|
||
|
|
/*
|
||
|
|
* when the old and new regions overlap clear from new_end.
|
||
|
|
*/
|
||
|
|
free_pgd_range(&tlb, new_end, old_end, new_end,
|
||
|
|
next ? next->vm_start : USER_PGTABLES_CEILING);
|
||
|
|
} else {
|
||
|
|
/*
|
||
|
|
* otherwise, clean from old_start; this is done to not touch
|
||
|
|
* the address space in [new_end, old_start) some architectures
|
||
|
|
* have constraints on va-space that make this illegal (IA64) -
|
||
|
|
* for the others its just a little faster.
|
||
|
|
*/
|
||
|
|
free_pgd_range(&tlb, old_start, old_end, new_end,
|
||
|
|
next ? next->vm_start : USER_PGTABLES_CEILING);
|
||
|
|
}
|
||
|
|
tlb_finish_mmu(&tlb);
|
||
|
|
|
||
|
|
vma_prev(&vmi);
|
||
|
|
/* Shrink the vma to just the new range */
|
||
|
|
return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
|
||
|
|
}
|