* More selftests

* Improved KVM_S390_MEM_OP ioctl input checking
 * Add kvm_valid_regs and kvm_dirty_regs invalid bit checking
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJdb8MuAAoJEONU5rjiOLn4w80P/0oFvdohxQuk2KAVxs9u4I2A
 lMcoer637WukI8K5r9oBacofzG+6ODlv75VOrm4DXVmluaLMD8X5XbKmIXKK2k9Q
 YrkdUo/h+g+O9e6oLcawhkDr+BrTnAoBt9ox1W2SEKQjMe1hbgacrnogktYc7WPY
 diPSovQ3g53BX0W/OXw4ym5C0Qeyseegewl1Vc110fXKPH0eMlnXbWdkHpe9tNxV
 DjtikIC6/NNHL4shwDFZtxao0jUpjlOMASdfTJpNk6g+16XFpUJwm0Frca8qplzt
 4HJyuWPeZeyMKzCPOqJbqvwzxMmAoft+fcBeX4YhtqMerOVIZ0wM7bcf1zm99jbq
 PYMW9KXIdYEdljnQBgrK7vdZ91z0KUKUa1QkxXbPPfzD2nDo3f/hOiBcpyP8cGHO
 DZ10rkv6sNG6Y5COVDD0HMxsFh3fxDPjvHvpsU/77bS/JNHBzvcRNhafzr20en6g
 PAuBqkjWFbGbPwdINN01v0LDiHTzsZ8Z2mzv02+1UYGTOxDopbDZyB6l5Nbi51lE
 fxJKHiyqHjEO4eGzhL7vc+Cl1w/k6yvIoprM2sV+gTXdHgwh8GxzNomhRwkunXlp
 2hvCFS9XyD7M89T09hhHkDaSDP0hWcCaAp00ZuBFLRKmXJYz+Im7wqmEwRuZwOhV
 P/MiQjOnCDQ/+qW5VPgp
 =gYMG
 -----END PGP SIGNATURE-----

Merge tag 'kvm-s390-next-5.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

* More selftests
* Improved KVM_S390_MEM_OP ioctl input checking
* Add kvm_valid_regs and kvm_dirty_regs invalid bit checking
This commit is contained in:
Paolo Bonzini 2019-09-11 18:06:15 +02:00
commit 17a81bdb4e
12 changed files with 503 additions and 185 deletions

View file

@ -3092,12 +3092,14 @@ This exception is also raised directly at the corresponding VCPU if the
flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field.
The start address of the memory region has to be specified in the "gaddr"
field, and the length of the region in the "size" field. "buf" is the buffer
supplied by the userspace application where the read data should be written
to for KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written
is stored for a KVM_S390_MEMOP_LOGICAL_WRITE. "buf" is unused and can be NULL
when KVM_S390_MEMOP_F_CHECK_ONLY is specified. "ar" designates the access
register number to be used.
field, and the length of the region in the "size" field (which must not
be 0). The maximum value for "size" can be obtained by checking the
KVM_CAP_S390_MEM_OP capability. "buf" is the buffer supplied by the
userspace application where the read data should be written to for
KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written is
stored for a KVM_S390_MEMOP_LOGICAL_WRITE. When KVM_S390_MEMOP_F_CHECK_ONLY
is specified, "buf" is unused and can be NULL. "ar" designates the access
register number to be used; the valid range is 0..15.
The "reserved" field is meant for future extensions. It is not used by
KVM with the currently defined set of flags.

View file

@ -231,6 +231,12 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_GSCB (1UL << 9)
#define KVM_SYNC_BPBC (1UL << 10)
#define KVM_SYNC_ETOKEN (1UL << 11)
#define KVM_SYNC_S390_VALID_FIELDS \
(KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \
KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \
KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN)
/* length and alignment of the sdnx as a power of two */
#define SDNXC 8
#define SDNXL (1UL << SDNXC)

View file

@ -3998,6 +3998,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (kvm_run->immediate_exit)
return -EINTR;
if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
return -EINVAL;
vcpu_load(vcpu);
if (guestdbg_exit_pending(vcpu)) {
@ -4255,7 +4259,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
| KVM_S390_MEMOP_F_CHECK_ONLY;
if (mop->flags & ~supported_flags)
if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
return -EINVAL;
if (mop->size > MEM_OP_MAX_SIZE)

View file

@ -7,10 +7,10 @@ top_srcdir = ../../../..
KSFT_KHDR_INSTALL := 1
UNAME_M := $(shell uname -m)
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c
LIBKVM_aarch64 = lib/aarch64/processor.c
LIBKVM_s390x = lib/s390x/processor.c
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/ucall.c
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
@ -32,7 +32,9 @@ TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_s390x = s390x/memop
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))

View file

@ -26,8 +26,8 @@
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
/* Default guest test memory offset, 1G */
#define DEFAULT_GUEST_TEST_MEM 0x40000000
/* Default guest test virtual memory offset */
#define DEFAULT_GUEST_TEST_MEM 0xc0000000
/* How many pages to dirty for each guest loop */
#define TEST_PAGES_PER_LOOP 1024
@ -38,6 +38,27 @@
/* Interval for each host loop (ms) */
#define TEST_HOST_LOOP_INTERVAL 10UL
/* Dirty bitmaps are always little endian, so we need to swap on big endian */
#if defined(__s390x__)
# define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
# define test_bit_le(nr, addr) \
test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define set_bit_le(nr, addr) \
set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define clear_bit_le(nr, addr) \
clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define test_and_set_bit_le(nr, addr) \
test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define test_and_clear_bit_le(nr, addr) \
test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
#else
# define test_bit_le test_bit
# define set_bit_le set_bit
# define clear_bit_le clear_bit
# define test_and_set_bit_le test_and_set_bit
# define test_and_clear_bit_le test_and_clear_bit
#endif
/*
* Guest/Host shared variables. Ensure addr_gva2hva() and/or
* sync_global_to/from_guest() are used when accessing from
@ -69,11 +90,23 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
*/
static void guest_code(void)
{
uint64_t addr;
int i;
/*
* On s390x, all pages of a 1M segment are initially marked as dirty
* when a page of the segment is written to for the very first time.
* To compensate this specialty in this test, we need to touch all
* pages during the first iteration.
*/
for (i = 0; i < guest_num_pages; i++) {
addr = guest_test_virt_mem + i * guest_page_size;
*(uint64_t *)addr = READ_ONCE(iteration);
}
while (true) {
for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
uint64_t addr = guest_test_virt_mem;
addr = guest_test_virt_mem;
addr += (READ_ONCE(random_array[i]) % guest_num_pages)
* guest_page_size;
addr &= ~(host_page_size - 1);
@ -158,15 +191,15 @@ static void vm_dirty_log_verify(unsigned long *bmap)
value_ptr = host_test_mem + page * host_page_size;
/* If this is a special page that we were tracking... */
if (test_and_clear_bit(page, host_bmap_track)) {
if (test_and_clear_bit_le(page, host_bmap_track)) {
host_track_next_count++;
TEST_ASSERT(test_bit(page, bmap),
TEST_ASSERT(test_bit_le(page, bmap),
"Page %"PRIu64" should have its dirty bit "
"set in this iteration but it is missing",
page);
}
if (test_bit(page, bmap)) {
if (test_bit_le(page, bmap)) {
host_dirty_count++;
/*
* If the bit is set, the value written onto
@ -209,7 +242,7 @@ static void vm_dirty_log_verify(unsigned long *bmap)
* should report its dirtyness in the
* next run
*/
set_bit(page, host_bmap_track);
set_bit_le(page, host_bmap_track);
}
}
}
@ -293,6 +326,10 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
* case where the size is not aligned to 64 pages.
*/
guest_num_pages = (1ul << (30 - guest_page_shift)) + 16;
#ifdef __s390x__
/* Round up to multiple of 1M (segment size) */
guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
#endif
host_page_size = getpagesize();
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
!!((guest_num_pages * guest_page_size) % host_page_size);
@ -304,6 +341,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
guest_test_phys_mem = phys_offset;
}
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem &= ~((1 << 20) - 1);
#endif
DEBUG("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
bmap = bitmap_alloc(host_num_pages);
@ -337,7 +379,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
#endif
#ifdef __aarch64__
ucall_init(vm, UCALL_MMIO, NULL);
ucall_init(vm, NULL);
#endif
/* Export the shared variables to the guest */
@ -454,6 +496,9 @@ int main(int argc, char *argv[])
vm_guest_mode_params_init(VM_MODE_P48V48_64K, true, true);
}
#endif
#ifdef __s390x__
vm_guest_mode_params_init(VM_MODE_P40V48_4K, true, true);
#endif
while ((opt = getopt(argc, argv, "hi:I:p:m:")) != -1) {
switch (opt) {

View file

@ -165,12 +165,6 @@ int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
memcpy(&(g), _p, sizeof(g)); \
})
/* ucall implementation types */
typedef enum {
UCALL_PIO,
UCALL_MMIO,
} ucall_type_t;
/* Common ucalls */
enum {
UCALL_NONE,
@ -186,7 +180,7 @@ struct ucall {
uint64_t args[UCALL_MAX_ARGS];
};
void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg);
void ucall_init(struct kvm_vm *vm, void *arg);
void ucall_uninit(struct kvm_vm *vm);
void ucall(uint64_t cmd, int nargs, ...);
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);

View file

@ -0,0 +1,112 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#include "kvm_util.h"
#include "../kvm_util_internal.h"
static vm_vaddr_t *ucall_exit_mmio_addr;
static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
{
if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
return false;
virt_pg_map(vm, gpa, gpa, 0);
ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
sync_global_to_guest(vm, ucall_exit_mmio_addr);
return true;
}
void ucall_init(struct kvm_vm *vm, void *arg)
{
vm_paddr_t gpa, start, end, step, offset;
unsigned int bits;
bool ret;
if (arg) {
gpa = (vm_paddr_t)arg;
ret = ucall_mmio_init(vm, gpa);
TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa);
return;
}
/*
* Find an address within the allowed physical and virtual address
* spaces, that does _not_ have a KVM memory region associated with
* it. Identity mapping an address like this allows the guest to
* access it, but as KVM doesn't know what to do with it, it
* will assume it's something userspace handles and exit with
* KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
* Here we start with a guess that the addresses around 5/8th
* of the allowed space are unmapped and then work both down and
* up from there in 1/16th allowed space sized steps.
*
* Note, we need to use VA-bits - 1 when calculating the allowed
* virtual address space for an identity mapping because the upper
* half of the virtual address space is the two's complement of the
* lower and won't match physical addresses.
*/
bits = vm->va_bits - 1;
bits = vm->pa_bits < bits ? vm->pa_bits : bits;
end = 1ul << bits;
start = end * 5 / 8;
step = end / 16;
for (offset = 0; offset < end - start; offset += step) {
if (ucall_mmio_init(vm, start - offset))
return;
if (ucall_mmio_init(vm, start + offset))
return;
}
TEST_ASSERT(false, "Can't find a ucall mmio address");
}
void ucall_uninit(struct kvm_vm *vm)
{
ucall_exit_mmio_addr = 0;
sync_global_to_guest(vm, ucall_exit_mmio_addr);
}
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall uc = {
.cmd = cmd,
};
va_list va;
int i;
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
*ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
}
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
{
struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {};
if (run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
vm_vaddr_t gva;
TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
"Unexpected ucall exit mmio address access");
memcpy(&gva, run->mmio.data, sizeof(gva));
memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall));
vcpu_run_complete_io(vm, vcpu_id);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
return ucall.cmd;
}

View file

@ -0,0 +1,56 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2019 Red Hat, Inc.
*/
#include "kvm_util.h"
void ucall_init(struct kvm_vm *vm, void *arg)
{
}
void ucall_uninit(struct kvm_vm *vm)
{
}
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall uc = {
.cmd = cmd,
};
va_list va;
int i;
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
/* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
asm volatile ("diag 0,%0,0x501" : : "a"(&uc) : "memory");
}
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
{
struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {};
if (run->exit_reason == KVM_EXIT_S390_SIEIC &&
run->s390_sieic.icptcode == 4 &&
(run->s390_sieic.ipa >> 8) == 0x83 && /* 0x83 means DIAGNOSE */
(run->s390_sieic.ipb >> 16) == 0x501) {
int reg = run->s390_sieic.ipa & 0xf;
memcpy(&ucall, addr_gva2hva(vm, run->s.regs.gprs[reg]),
sizeof(ucall));
vcpu_run_complete_io(vm, vcpu_id);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
return ucall.cmd;
}

View file

@ -1,157 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#include "kvm_util.h"
#include "kvm_util_internal.h"
#define UCALL_PIO_PORT ((uint16_t)0x1000)
static ucall_type_t ucall_type;
static vm_vaddr_t *ucall_exit_mmio_addr;
static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
{
if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
return false;
virt_pg_map(vm, gpa, gpa, 0);
ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
sync_global_to_guest(vm, ucall_exit_mmio_addr);
return true;
}
void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg)
{
ucall_type = type;
sync_global_to_guest(vm, ucall_type);
if (type == UCALL_PIO)
return;
if (type == UCALL_MMIO) {
vm_paddr_t gpa, start, end, step, offset;
unsigned bits;
bool ret;
if (arg) {
gpa = (vm_paddr_t)arg;
ret = ucall_mmio_init(vm, gpa);
TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa);
return;
}
/*
* Find an address within the allowed physical and virtual address
* spaces, that does _not_ have a KVM memory region associated with
* it. Identity mapping an address like this allows the guest to
* access it, but as KVM doesn't know what to do with it, it
* will assume it's something userspace handles and exit with
* KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
* Here we start with a guess that the addresses around 5/8th
* of the allowed space are unmapped and then work both down and
* up from there in 1/16th allowed space sized steps.
*
* Note, we need to use VA-bits - 1 when calculating the allowed
* virtual address space for an identity mapping because the upper
* half of the virtual address space is the two's complement of the
* lower and won't match physical addresses.
*/
bits = vm->va_bits - 1;
bits = vm->pa_bits < bits ? vm->pa_bits : bits;
end = 1ul << bits;
start = end * 5 / 8;
step = end / 16;
for (offset = 0; offset < end - start; offset += step) {
if (ucall_mmio_init(vm, start - offset))
return;
if (ucall_mmio_init(vm, start + offset))
return;
}
TEST_ASSERT(false, "Can't find a ucall mmio address");
}
}
void ucall_uninit(struct kvm_vm *vm)
{
ucall_type = 0;
sync_global_to_guest(vm, ucall_type);
ucall_exit_mmio_addr = 0;
sync_global_to_guest(vm, ucall_exit_mmio_addr);
}
static void ucall_pio_exit(struct ucall *uc)
{
#ifdef __x86_64__
asm volatile("in %[port], %%al"
: : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax");
#endif
}
static void ucall_mmio_exit(struct ucall *uc)
{
*ucall_exit_mmio_addr = (vm_vaddr_t)uc;
}
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall uc = {
.cmd = cmd,
};
va_list va;
int i;
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
switch (ucall_type) {
case UCALL_PIO:
ucall_pio_exit(&uc);
break;
case UCALL_MMIO:
ucall_mmio_exit(&uc);
break;
};
}
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
{
struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {};
bool got_ucall = false;
#ifdef __x86_64__
if (ucall_type == UCALL_PIO && run->exit_reason == KVM_EXIT_IO &&
run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;
vcpu_regs_get(vm, vcpu_id, &regs);
memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(ucall));
got_ucall = true;
}
#endif
if (ucall_type == UCALL_MMIO && run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
vm_vaddr_t gva;
TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
"Unexpected ucall exit mmio address access");
memcpy(&gva, run->mmio.data, sizeof(gva));
memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall));
got_ucall = true;
}
if (got_ucall) {
vcpu_run_complete_io(vm, vcpu_id);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
return ucall.cmd;
}

View file

@ -0,0 +1,56 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#include "kvm_util.h"
#define UCALL_PIO_PORT ((uint16_t)0x1000)
void ucall_init(struct kvm_vm *vm, void *arg)
{
}
void ucall_uninit(struct kvm_vm *vm)
{
}
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall uc = {
.cmd = cmd,
};
va_list va;
int i;
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
asm volatile("in %[port], %%al"
: : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax");
}
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
{
struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {};
if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;
vcpu_regs_get(vm, vcpu_id, &regs);
memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi),
sizeof(ucall));
vcpu_run_complete_io(vm, vcpu_id);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
return ucall.cmd;
}

View file

@ -0,0 +1,166 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test for s390x KVM_S390_MEM_OP
*
* Copyright (C) 2019, Red Hat, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#define VCPU_ID 1
static uint8_t mem1[65536];
static uint8_t mem2[65536];
static void guest_code(void)
{
int i;
for (;;) {
for (i = 0; i < sizeof(mem2); i++)
mem2[i] = mem1[i];
GUEST_SYNC(0);
}
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_s390_mem_op ksmo;
int rv, i, maxsize;
setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
maxsize = kvm_check_cap(KVM_CAP_S390_MEM_OP);
if (!maxsize) {
fprintf(stderr, "CAP_S390_MEM_OP not supported -> skip test\n");
exit(KSFT_SKIP);
}
if (maxsize > sizeof(mem1))
maxsize = sizeof(mem1);
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
run = vcpu_state(vm, VCPU_ID);
for (i = 0; i < sizeof(mem1); i++)
mem1[i] = i * i + i;
/* Set the first array */
ksmo.gaddr = addr_gva2gpa(vm, (uintptr_t)mem1);
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
/* Let the guest code copy the first array to the second */
vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
memset(mem2, 0xaa, sizeof(mem2));
/* Get the second array */
ksmo.gaddr = (uintptr_t)mem2;
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
ksmo.buf = (uintptr_t)mem2;
ksmo.ar = 0;
vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
"Memory contents do not match!");
/* Check error conditions - first bad size: */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = 0;
ksmo.size = -1;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
/* Zero size: */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = 0;
ksmo.size = 0;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
"ioctl allows 0 as size");
/* Bad flags: */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = -1;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
/* Bad operation: */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = -1;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
/* Bad guest address: */
ksmo.gaddr = ~0xfffUL;
ksmo.flags = KVM_S390_MEMOP_F_CHECK_ONLY;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");
/* Bad host address: */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = 0;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == EFAULT,
"ioctl does not report bad host memory address");
/* Bad access register: */
run->psw_mask &= ~(3UL << (63 - 17));
run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
vcpu_run(vm, VCPU_ID); /* To sync new state to SIE block */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 17;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */
vcpu_run(vm, VCPU_ID); /* Run to sync new state */
kvm_vm_free(vm);
return 0;
}

View file

@ -25,9 +25,11 @@
static void guest_code(void)
{
register u64 stage asm("11") = 0;
for (;;) {
asm volatile ("diag 0,0,0x501");
asm volatile ("ahi 11,1");
GUEST_SYNC(0);
asm volatile ("ahi %0,1" : : "r"(stage));
}
}
@ -83,6 +85,36 @@ int main(int argc, char *argv[])
run = vcpu_state(vm, VCPU_ID);
/* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
/* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
/* Request and verify all valid register sets. */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, VCPU_ID);