Pull KVM updates from Gleb Natapov: "The highlights of the release are nested EPT and pv-ticketlocks support (hypervisor part, guest part, which is most of the code, goes through tip tree). Apart of that there are many fixes for all arches" Fix up semantic conflicts as discussed in the pull request thread.. * 'next' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (88 commits) ARM: KVM: Add newlines to panic strings ARM: KVM: Work around older compiler bug ARM: KVM: Simplify tracepoint text ARM: KVM: Fix kvm_set_pte assignment ARM: KVM: vgic: Bump VGIC_NR_IRQS to 256 ARM: KVM: Bugfix: vgic_bytemap_get_reg per cpu regs ARM: KVM: vgic: fix GICD_ICFGRn access ARM: KVM: vgic: simplify vgic_get_target_reg KVM: MMU: remove unused parameter KVM: PPC: Book3S PR: Rework kvmppc_mmu_book3s_64_xlate() KVM: PPC: Book3S PR: Make instruction fetch fallback work for system calls KVM: PPC: Book3S PR: Don't corrupt guest state when kernel uses VMX KVM: x86: update masterclock when kvmclock_offset is calculated (v2) KVM: PPC: Book3S: Fix compile error in XICS emulation KVM: PPC: Book3S PR: return appropriate error when allocation fails arch: powerpc: kvm: add signed type cast for comparation KVM: x86: add comments where MMIO does not return to the emulator KVM: vmx: count exits to userspace during invalid guest emulation KVM: rename __kvm_io_bus_sort_cmp to kvm_io_bus_cmp kvm: optimize away THP checks in kvm_is_mmio_pfn() ...
88 lines
2.3 KiB
C
88 lines
2.3 KiB
C
/*
|
|
* S390 version
|
|
*
|
|
* Derived from "include/asm-i386/mmu_context.h"
|
|
*/
|
|
|
|
#ifndef __S390_MMU_CONTEXT_H
|
|
#define __S390_MMU_CONTEXT_H
|
|
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/ctl_reg.h>
|
|
|
|
static inline int init_new_context(struct task_struct *tsk,
|
|
struct mm_struct *mm)
|
|
{
|
|
atomic_set(&mm->context.attach_count, 0);
|
|
mm->context.flush_mm = 0;
|
|
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
|
|
#ifdef CONFIG_64BIT
|
|
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
|
|
#endif
|
|
mm->context.has_pgste = 0;
|
|
mm->context.asce_limit = STACK_TOP_MAX;
|
|
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
|
|
return 0;
|
|
}
|
|
|
|
#define destroy_context(mm) do { } while (0)
|
|
|
|
#ifndef CONFIG_64BIT
|
|
#define LCTL_OPCODE "lctl"
|
|
#else
|
|
#define LCTL_OPCODE "lctlg"
|
|
#endif
|
|
|
|
static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
|
|
{
|
|
pgd_t *pgd = mm->pgd;
|
|
|
|
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
|
|
if (s390_user_mode != HOME_SPACE_MODE) {
|
|
/* Load primary space page table origin. */
|
|
asm volatile(LCTL_OPCODE" 1,1,%0\n"
|
|
: : "m" (S390_lowcore.user_asce) );
|
|
} else
|
|
/* Load home space page table origin. */
|
|
asm volatile(LCTL_OPCODE" 13,13,%0"
|
|
: : "m" (S390_lowcore.user_asce) );
|
|
set_fs(current->thread.mm_segment);
|
|
}
|
|
|
|
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
struct task_struct *tsk)
|
|
{
|
|
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
|
|
update_mm(next, tsk);
|
|
atomic_dec(&prev->context.attach_count);
|
|
WARN_ON(atomic_read(&prev->context.attach_count) < 0);
|
|
atomic_inc(&next->context.attach_count);
|
|
/* Check for TLBs not flushed yet */
|
|
__tlb_flush_mm_lazy(next);
|
|
}
|
|
|
|
#define enter_lazy_tlb(mm,tsk) do { } while (0)
|
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
|
|
|
static inline void activate_mm(struct mm_struct *prev,
|
|
struct mm_struct *next)
|
|
{
|
|
switch_mm(prev, next, current);
|
|
}
|
|
|
|
static inline void arch_dup_mmap(struct mm_struct *oldmm,
|
|
struct mm_struct *mm)
|
|
{
|
|
#ifdef CONFIG_64BIT
|
|
if (oldmm->context.asce_limit < mm->context.asce_limit)
|
|
crst_table_downgrade(mm, oldmm->context.asce_limit);
|
|
#endif
|
|
}
|
|
|
|
static inline void arch_exit_mmap(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
#endif /* __S390_MMU_CONTEXT_H */
|