mirror of
https://github.com/torvalds/linux.git
synced 2025-11-30 23:16:01 +07:00
Merge tag 'loongarch-fixes-6.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
Pull LoongArch fixes from Huacai Chen:
- Fix a Rust build error
- Fix exception/interrupt, memory management, perf event, hardware
breakpoint, kexec and KVM bugs
* tag 'loongarch-fixes-6.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
LoongArch: KVM: Fix max supported vCPUs set with EIOINTC
LoongArch: KVM: Skip PMU checking on vCPU context switch
LoongArch: KVM: Restore guest PMU if it is enabled
LoongArch: KVM: Add delay until timer interrupt injected
LoongArch: KVM: Set page with write attribute if dirty track disabled
LoongArch: kexec: Print out debugging message if required
LoongArch: kexec: Initialize the kexec_buf structure
LoongArch: Use correct accessor to read FWPC/MWPC
LoongArch: Refine the init_hw_perf_events() function
LoongArch: Remove __GFP_HIGHMEM masking in pud_alloc_one()
LoongArch: Let {pte,pmd}_modify() record the status of _PAGE_DIRTY
LoongArch: Consolidate max_pfn & max_low_pfn calculation
LoongArch: Consolidate early_ioremap()/ioremap_prot()
LoongArch: Use physical addresses for CSR_MERRENTRY/CSR_TLBRENTRY
LoongArch: Clarify 3 MSG interrupt features
rust: Add -fno-isolate-erroneous-paths-dereference to bindgen_skip_c_flags
This commit is contained in:
@@ -67,6 +67,8 @@
|
||||
#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
|
||||
#define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW)
|
||||
#define cpu_has_lspw cpu_opt(LOONGARCH_CPU_LSPW)
|
||||
#define cpu_has_msgint cpu_opt(LOONGARCH_CPU_MSGINT)
|
||||
#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT)
|
||||
#define cpu_has_redirectint cpu_opt(LOONGARCH_CPU_REDIRECTINT)
|
||||
|
||||
#endif /* __ASM_CPU_FEATURES_H */
|
||||
|
||||
@@ -101,7 +101,9 @@ enum cpu_type_enum {
|
||||
#define CPU_FEATURE_HYPERVISOR 26 /* CPU has hypervisor (running in VM) */
|
||||
#define CPU_FEATURE_PTW 27 /* CPU has hardware page table walker */
|
||||
#define CPU_FEATURE_LSPW 28 /* CPU has LSPW (lddir/ldpte instructions) */
|
||||
#define CPU_FEATURE_AVECINT 29 /* CPU has AVEC interrupt */
|
||||
#define CPU_FEATURE_MSGINT 29 /* CPU has MSG interrupt */
|
||||
#define CPU_FEATURE_AVECINT 30 /* CPU has AVEC interrupt */
|
||||
#define CPU_FEATURE_REDIRECTINT 31 /* CPU has interrupt remapping */
|
||||
|
||||
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
|
||||
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
|
||||
@@ -132,6 +134,8 @@ enum cpu_type_enum {
|
||||
#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
|
||||
#define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW)
|
||||
#define LOONGARCH_CPU_LSPW BIT_ULL(CPU_FEATURE_LSPW)
|
||||
#define LOONGARCH_CPU_MSGINT BIT_ULL(CPU_FEATURE_MSGINT)
|
||||
#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT)
|
||||
#define LOONGARCH_CPU_REDIRECTINT BIT_ULL(CPU_FEATURE_REDIRECTINT)
|
||||
|
||||
#endif /* _ASM_CPU_H */
|
||||
|
||||
@@ -134,13 +134,13 @@ static inline void hw_breakpoint_thread_switch(struct task_struct *next)
|
||||
/* Determine number of BRP registers available. */
|
||||
static inline int get_num_brps(void)
|
||||
{
|
||||
return csr_read64(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM;
|
||||
return csr_read32(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM;
|
||||
}
|
||||
|
||||
/* Determine number of WRP registers available. */
|
||||
static inline int get_num_wrps(void)
|
||||
{
|
||||
return csr_read64(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM;
|
||||
return csr_read32(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM;
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include <asm/pgtable-bits.h>
|
||||
#include <asm/string.h>
|
||||
|
||||
extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
|
||||
extern void __init __iomem *early_ioremap(phys_addr_t phys_addr, unsigned long size);
|
||||
extern void __init early_iounmap(void __iomem *addr, unsigned long size);
|
||||
|
||||
#define early_memremap early_ioremap
|
||||
@@ -25,6 +25,9 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size);
|
||||
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
|
||||
pgprot_t prot)
|
||||
{
|
||||
if (offset > TO_PHYS_MASK)
|
||||
return NULL;
|
||||
|
||||
switch (pgprot_val(prot) & _CACHE_MASK) {
|
||||
case _CACHE_CC:
|
||||
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
|
||||
|
||||
@@ -128,6 +128,7 @@
|
||||
#define CPUCFG6_PMNUM GENMASK(7, 4)
|
||||
#define CPUCFG6_PMNUM_SHIFT 4
|
||||
#define CPUCFG6_PMBITS GENMASK(13, 8)
|
||||
#define CPUCFG6_PMBITS_SHIFT 8
|
||||
#define CPUCFG6_UPM BIT(14)
|
||||
|
||||
#define LOONGARCH_CPUCFG16 0x10
|
||||
@@ -1137,6 +1138,7 @@
|
||||
#define IOCSRF_FLATMODE BIT_ULL(10)
|
||||
#define IOCSRF_VM BIT_ULL(11)
|
||||
#define IOCSRF_AVEC BIT_ULL(15)
|
||||
#define IOCSRF_REDIRECT BIT_ULL(16)
|
||||
|
||||
#define LOONGARCH_IOCSR_VENDOR 0x10
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
pud_t *pud;
|
||||
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
|
||||
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, 0);
|
||||
|
||||
if (!ptdesc)
|
||||
return NULL;
|
||||
|
||||
@@ -424,6 +424,9 @@ static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
if (pte_val(pte) & _PAGE_DIRTY)
|
||||
pte_val(pte) |= _PAGE_MODIFIED;
|
||||
|
||||
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
|
||||
(pgprot_val(newprot) & ~_PAGE_CHG_MASK));
|
||||
}
|
||||
@@ -547,9 +550,11 @@ static inline struct page *pmd_page(pmd_t pmd)
|
||||
|
||||
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
{
|
||||
pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
|
||||
(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
|
||||
return pmd;
|
||||
if (pmd_val(pmd) & _PAGE_DIRTY)
|
||||
pmd_val(pmd) |= _PAGE_MODIFIED;
|
||||
|
||||
return __pmd((pmd_val(pmd) & _HPAGE_CHG_MASK) |
|
||||
(pgprot_val(newprot) & ~_HPAGE_CHG_MASK));
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkinvalid(pmd_t pmd)
|
||||
|
||||
@@ -157,6 +157,8 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
|
||||
c->options |= LOONGARCH_CPU_TLB;
|
||||
if (config & CPUCFG1_IOCSR)
|
||||
c->options |= LOONGARCH_CPU_IOCSR;
|
||||
if (config & CPUCFG1_MSGINT)
|
||||
c->options |= LOONGARCH_CPU_MSGINT;
|
||||
if (config & CPUCFG1_UAL) {
|
||||
c->options |= LOONGARCH_CPU_UAL;
|
||||
elf_hwcap |= HWCAP_LOONGARCH_UAL;
|
||||
@@ -331,6 +333,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
|
||||
c->options |= LOONGARCH_CPU_EIODECODE;
|
||||
if (config & IOCSRF_AVEC)
|
||||
c->options |= LOONGARCH_CPU_AVECINT;
|
||||
if (config & IOCSRF_REDIRECT)
|
||||
c->options |= LOONGARCH_CPU_REDIRECTINT;
|
||||
if (config & IOCSRF_VM)
|
||||
c->options |= LOONGARCH_CPU_HYPERVISOR;
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ static void *efi_kexec_load(struct kimage *image,
|
||||
{
|
||||
int ret;
|
||||
unsigned long text_offset, kernel_segment_number;
|
||||
struct kexec_buf kbuf;
|
||||
struct kexec_buf kbuf = {};
|
||||
struct kexec_segment *kernel_segment;
|
||||
struct loongarch_image_header *h;
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ static void *elf_kexec_load(struct kimage *image,
|
||||
int ret;
|
||||
unsigned long text_offset, kernel_segment_number;
|
||||
struct elfhdr ehdr;
|
||||
struct kexec_buf kbuf;
|
||||
struct kexec_buf kbuf = {};
|
||||
struct kexec_elf_info elf_info;
|
||||
struct kexec_segment *kernel_segment;
|
||||
|
||||
|
||||
@@ -39,34 +39,12 @@ static unsigned long systable_ptr;
|
||||
static unsigned long start_addr;
|
||||
static unsigned long first_ind_entry;
|
||||
|
||||
static void kexec_image_info(const struct kimage *kimage)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
pr_debug("kexec kimage info:\n");
|
||||
pr_debug("\ttype: %d\n", kimage->type);
|
||||
pr_debug("\tstart: %lx\n", kimage->start);
|
||||
pr_debug("\thead: %lx\n", kimage->head);
|
||||
pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
|
||||
|
||||
for (i = 0; i < kimage->nr_segments; i++) {
|
||||
pr_debug("\t segment[%lu]: %016lx - %016lx", i,
|
||||
kimage->segment[i].mem,
|
||||
kimage->segment[i].mem + kimage->segment[i].memsz);
|
||||
pr_debug("\t\t0x%lx bytes, %lu pages\n",
|
||||
(unsigned long)kimage->segment[i].memsz,
|
||||
(unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
int machine_kexec_prepare(struct kimage *kimage)
|
||||
{
|
||||
int i;
|
||||
char *bootloader = "kexec";
|
||||
void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
|
||||
|
||||
kexec_image_info(kimage);
|
||||
|
||||
kimage->arch.efi_boot = fw_arg0;
|
||||
kimage->arch.systable_ptr = fw_arg2;
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ int load_other_segments(struct kimage *image,
|
||||
unsigned long initrd_load_addr = 0;
|
||||
unsigned long orig_segments = image->nr_segments;
|
||||
char *modified_cmdline = NULL;
|
||||
struct kexec_buf kbuf;
|
||||
struct kexec_buf kbuf = {};
|
||||
|
||||
kbuf.image = image;
|
||||
/* Don't allocate anything below the kernel */
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
void __init memblock_init(void)
|
||||
{
|
||||
u32 mem_type;
|
||||
u64 mem_start, mem_end, mem_size;
|
||||
u64 mem_start, mem_size;
|
||||
efi_memory_desc_t *md;
|
||||
|
||||
/* Parse memory information */
|
||||
@@ -21,7 +21,6 @@ void __init memblock_init(void)
|
||||
mem_type = md->type;
|
||||
mem_start = md->phys_addr;
|
||||
mem_size = md->num_pages << EFI_PAGE_SHIFT;
|
||||
mem_end = mem_start + mem_size;
|
||||
|
||||
switch (mem_type) {
|
||||
case EFI_LOADER_CODE:
|
||||
@@ -31,8 +30,6 @@ void __init memblock_init(void)
|
||||
case EFI_PERSISTENT_MEMORY:
|
||||
case EFI_CONVENTIONAL_MEMORY:
|
||||
memblock_add(mem_start, mem_size);
|
||||
if (max_low_pfn < (mem_end >> PAGE_SHIFT))
|
||||
max_low_pfn = mem_end >> PAGE_SHIFT;
|
||||
break;
|
||||
case EFI_PAL_CODE:
|
||||
case EFI_UNUSABLE_MEMORY:
|
||||
@@ -49,6 +46,8 @@ void __init memblock_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
||||
max_low_pfn = min(PFN_DOWN(HIGHMEM_START), max_pfn);
|
||||
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
|
||||
|
||||
/* Reserve the first 2MB */
|
||||
|
||||
@@ -272,7 +272,8 @@ int __init init_numa_memory(void)
|
||||
node_mem_init(node);
|
||||
node_set_online(node);
|
||||
}
|
||||
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
|
||||
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
||||
max_low_pfn = min(PFN_DOWN(HIGHMEM_START), max_pfn);
|
||||
|
||||
setup_nr_node_ids();
|
||||
loongson_sysconf.nr_nodes = nr_node_ids;
|
||||
@@ -283,26 +284,6 @@ int __init init_numa_memory(void)
|
||||
|
||||
#endif
|
||||
|
||||
void __init paging_init(void)
|
||||
{
|
||||
unsigned int node;
|
||||
unsigned long zones_size[MAX_NR_ZONES] = {0, };
|
||||
|
||||
for_each_online_node(node) {
|
||||
unsigned long start_pfn, end_pfn;
|
||||
|
||||
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
|
||||
|
||||
if (end_pfn > max_low_pfn)
|
||||
max_low_pfn = end_pfn;
|
||||
}
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
|
||||
#endif
|
||||
zones_size[ZONE_NORMAL] = max_low_pfn;
|
||||
free_area_init(zones_size);
|
||||
}
|
||||
|
||||
int pcibus_to_node(struct pci_bus *bus)
|
||||
{
|
||||
return dev_to_node(&bus->dev);
|
||||
|
||||
@@ -845,13 +845,14 @@ static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config
|
||||
|
||||
static int __init init_hw_perf_events(void)
|
||||
{
|
||||
int counters;
|
||||
int bits, counters;
|
||||
|
||||
if (!cpu_has_pmp)
|
||||
return -ENODEV;
|
||||
|
||||
pr_info("Performance counters: ");
|
||||
counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> 4) + 1;
|
||||
bits = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMBITS) >> CPUCFG6_PMBITS_SHIFT) + 1;
|
||||
counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT) + 1;
|
||||
|
||||
loongarch_pmu.num_counters = counters;
|
||||
loongarch_pmu.max_period = (1ULL << 63) - 1;
|
||||
@@ -867,7 +868,7 @@ static int __init init_hw_perf_events(void)
|
||||
on_each_cpu(reset_counters, NULL, 1);
|
||||
|
||||
pr_cont("%s PMU enabled, %d %d-bit counters available to each CPU.\n",
|
||||
loongarch_pmu.name, counters, 64);
|
||||
loongarch_pmu.name, counters, bits);
|
||||
|
||||
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
||||
|
||||
|
||||
@@ -294,8 +294,6 @@ static void __init fdt_setup(void)
|
||||
|
||||
early_init_dt_scan(fdt_pointer, __pa(fdt_pointer));
|
||||
early_init_fdt_reserve_self();
|
||||
|
||||
max_low_pfn = PFN_PHYS(memblock_end_of_DRAM());
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -390,7 +388,8 @@ static void __init check_kernel_sections_mem(void)
|
||||
static void __init arch_mem_init(char **cmdline_p)
|
||||
{
|
||||
/* Recalculate max_low_pfn for "mem=xxx" */
|
||||
max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
|
||||
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
||||
max_low_pfn = min(PFN_DOWN(HIGHMEM_START), max_pfn);
|
||||
|
||||
if (usermem)
|
||||
pr_info("User-defined physical RAM map overwrite\n");
|
||||
|
||||
@@ -1131,8 +1131,8 @@ static void configure_exception_vector(void)
|
||||
tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
|
||||
|
||||
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
|
||||
csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
|
||||
csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
|
||||
csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY);
|
||||
csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY);
|
||||
}
|
||||
|
||||
void per_cpu_trap_init(int cpu)
|
||||
|
||||
@@ -439,7 +439,7 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
switch (type) {
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
|
||||
if (val >= EIOINTC_ROUTE_MAX_VCPUS)
|
||||
if (val > EIOINTC_ROUTE_MAX_VCPUS)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
s->num_cpu = val;
|
||||
|
||||
@@ -857,7 +857,7 @@ retry:
|
||||
|
||||
if (writeable) {
|
||||
prot_bits = kvm_pte_mkwriteable(prot_bits);
|
||||
if (write)
|
||||
if (write || !kvm_slot_dirty_track_enabled(memslot))
|
||||
prot_bits = kvm_pte_mkdirty(prot_bits);
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/kvm_csr.h>
|
||||
#include <asm/kvm_vcpu.h>
|
||||
|
||||
@@ -95,6 +96,7 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
|
||||
* and set CSR TVAL with -1
|
||||
*/
|
||||
write_gcsr_timertick(0);
|
||||
__delay(2); /* Wait cycles until timer interrupt injected */
|
||||
|
||||
/*
|
||||
* Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear
|
||||
|
||||
@@ -132,6 +132,9 @@ static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
|
||||
* Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
|
||||
* exiting the guest, so that the next time trap into the guest.
|
||||
* We don't need to deal with PMU CSRs contexts.
|
||||
*
|
||||
* Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
|
||||
* before entering guest VM
|
||||
*/
|
||||
val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
|
||||
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
|
||||
@@ -139,16 +142,12 @@ static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
|
||||
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
|
||||
if (!(val & KVM_PMU_EVENT_ENABLED))
|
||||
vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
|
||||
else
|
||||
kvm_make_request(KVM_REQ_PMU, vcpu);
|
||||
|
||||
kvm_restore_host_pmu(vcpu);
|
||||
}
|
||||
|
||||
static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
|
||||
kvm_make_request(KVM_REQ_PMU, vcpu);
|
||||
}
|
||||
|
||||
static void kvm_check_pmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
|
||||
@@ -299,7 +298,10 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
|
||||
|
||||
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
|
||||
kvm_lose_pmu(vcpu);
|
||||
if (vcpu->arch.aux_inuse & KVM_LARCH_PMU) {
|
||||
kvm_lose_pmu(vcpu);
|
||||
kvm_make_request(KVM_REQ_PMU, vcpu);
|
||||
}
|
||||
/* make sure the vcpu mode has been written */
|
||||
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
|
||||
local_irq_enable();
|
||||
@@ -1604,9 +1606,6 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
kvm_restore_timer(vcpu);
|
||||
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
||||
|
||||
/* Restore hardware PMU CSRs */
|
||||
kvm_restore_pmu(vcpu);
|
||||
|
||||
/* Don't bother restoring registers multiple times unless necessary */
|
||||
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
|
||||
return 0;
|
||||
|
||||
@@ -60,7 +60,6 @@ int __ref page_is_ram(unsigned long pfn)
|
||||
return memblock_is_memory(addr) && !memblock_is_reserved(addr);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_NUMA
|
||||
void __init paging_init(void)
|
||||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
@@ -72,7 +71,6 @@ void __init paging_init(void)
|
||||
|
||||
free_area_init(max_zone_pfns);
|
||||
}
|
||||
#endif /* !CONFIG_NUMA */
|
||||
|
||||
void __ref free_initmem(void)
|
||||
{
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
#include <asm/io.h>
|
||||
#include <asm-generic/early_ioremap.h>
|
||||
|
||||
void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
|
||||
void __init __iomem *early_ioremap(phys_addr_t phys_addr, unsigned long size)
|
||||
{
|
||||
return ((void __iomem *)TO_CACHE(phys_addr));
|
||||
}
|
||||
|
||||
@@ -298,7 +298,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
|
||||
-fno-inline-functions-called-once -fsanitize=bounds-strict \
|
||||
-fstrict-flex-arrays=% -fmin-function-alignment=% \
|
||||
-fzero-init-padding-bits=% -mno-fdpic \
|
||||
--param=% --param asan-%
|
||||
--param=% --param asan-% -fno-isolate-erroneous-paths-dereference
|
||||
|
||||
# Derived from `scripts/Makefile.clang`.
|
||||
BINDGEN_TARGET_x86 := x86_64-linux-gnu
|
||||
|
||||
Reference in New Issue
Block a user