crosvm/tests/plugin_async_write.c
Matt Delco a52b2a6c81 crosvm: add plugin API for async writes
A plugin might care to be immediately notified when a write
is made to a port, but it doesn't care to have the VM stopped
while the plugin calls back to resume the VM.

Unfortunately this means that multiple messages can be queued up in the
pipe and read() together by the plugin API.  Protobuf's parsing function
doesn't report how many bytes it read, so I've resorted to having crosvm
prefix every message with a length and then have the plugin lib parse
this number.  Impact on performance has not been measured.

BUG=b:143294496
TEST=Local build and run of build_test.  Verified that new unit
test was executed, exercised the case where multiple msgs are
received together, and completed successfully.

Change-Id: If6ef463e7b4d2e688e649f832a764fa644bf2d36
Signed-off-by: Matt Delco <delco@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/1896376
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Stephen Barber <smbarber@chromium.org>
2019-11-06 23:01:15 +00:00

273 lines
8.3 KiB
C

/*
* Copyright 2019 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include <errno.h>
#include <fcntl.h>
#include <linux/memfd.h>
#include <pthread.h>
#include <signal.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <time.h>
#include <unistd.h>
#include "crosvm.h"
#ifndef F_LINUX_SPECIFIC_BASE
#define F_LINUX_SPECIFIC_BASE 1024
#endif
#ifndef F_ADD_SEALS
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
#endif
#ifndef F_SEAL_SHRINK
#define F_SEAL_SHRINK 0x0002
#endif
#define KILL_ADDRESS 0x3f9
#define ASYNC_ADDRESS 0x500
int g_kill_evt;
int got_error = 0;
void *vcpu_thread(void *arg) {
struct crosvm_vcpu *vcpu = arg;
struct crosvm_vcpu_event evt;
while (crosvm_vcpu_wait(vcpu, &evt) == 0) {
if (evt.kind == CROSVM_VCPU_EVENT_KIND_INIT) {
struct kvm_sregs sregs;
crosvm_vcpu_get_sregs(vcpu, &sregs);
sregs.cs.base = 0;
sregs.cs.selector = 0;
sregs.es.base = KILL_ADDRESS;
sregs.es.selector = 0;
crosvm_vcpu_set_sregs(vcpu, &sregs);
struct kvm_regs regs;
crosvm_vcpu_get_regs(vcpu, &regs);
regs.rip = 0x1000;
regs.rax = 2;
regs.rbx = 7;
regs.rflags = 2;
crosvm_vcpu_set_regs(vcpu, &regs);
}
if (evt.kind == CROSVM_VCPU_EVENT_KIND_IO_ACCESS) {
if (evt.io_access.address_space == CROSVM_ADDRESS_SPACE_IOPORT &&
evt.io_access.address == ASYNC_ADDRESS &&
evt.io_access.is_write &&
evt.io_access.length == 1) {
int ret;
if (!evt.io_access.no_resume) {
fprintf(stderr, "should have been told not to resume\n");
got_error = 1;
}
ret = crosvm_vcpu_wait(vcpu, &evt);
if (ret == 0) {
if (evt.kind != CROSVM_VCPU_EVENT_KIND_IO_ACCESS ||
evt.io_access.address_space !=
CROSVM_ADDRESS_SPACE_IOPORT ||
evt.io_access.address != ASYNC_ADDRESS ||
!evt.io_access.is_write ||
!evt.io_access.no_resume ||
evt.io_access.length != 1) {
fprintf(stderr, "got unexpected wait #1 result\n");
got_error = 1;
}
} else {
fprintf(stderr, "crosvm_vcpu_wait() #1 failed: %d\n", ret);
got_error = 1;
}
ret = crosvm_vcpu_wait(vcpu, &evt);
if (ret == 0) {
if (evt.kind != CROSVM_VCPU_EVENT_KIND_IO_ACCESS ||
evt.io_access.address_space !=
CROSVM_ADDRESS_SPACE_IOPORT ||
evt.io_access.address != ASYNC_ADDRESS ||
!evt.io_access.is_write ||
!evt.io_access.no_resume ||
evt.io_access.length != 1) {
fprintf(stderr, "got unexpected wait #2 result\n");
got_error = 1;
}
} else {
fprintf(stderr, "crosvm_vcpu_wait() #2 failed: %d\n", ret);
got_error = 1;
}
// skip the crosvm_vcpu_resume()
continue;
}
if (evt.io_access.address_space == CROSVM_ADDRESS_SPACE_IOPORT &&
evt.io_access.address == KILL_ADDRESS &&
evt.io_access.is_write &&
evt.io_access.length == 1 &&
evt.io_access.data[0] == 1)
{
uint64_t dummy = 1;
write(g_kill_evt, &dummy, sizeof(dummy));
return NULL;
}
}
crosvm_vcpu_resume(vcpu);
}
return NULL;
}
int main(int argc, char** argv) {
const uint8_t code[] = {
/*
B007 mov al,0x7
BA0005 mov dx,0x500
EE out dx,al
EE out dx,al
EE out dx,al
BAF903 mov dx,0x3f9
B001 mov al,0x1
EE out dx,al
F4 hlt
*/
0xb0, 0x7,
0xba, (ASYNC_ADDRESS & 0xFF), ((ASYNC_ADDRESS >> 8) & 0xFF),
0xee,
0xee,
0xee,
0xba, (KILL_ADDRESS & 0xFF), ((KILL_ADDRESS >> 8) & 0xFF),
0xb0, 0x01,
0xee,
0xf4
};
struct crosvm *crosvm;
int ret = crosvm_connect(&crosvm);
if (ret) {
fprintf(stderr, "failed to connect to crosvm: %d\n", ret);
return 1;
}
/*
* Not strictly necessary, but demonstrates we can have as many connections
* as we please.
*/
struct crosvm *extra_crosvm;
ret = crosvm_new_connection(crosvm, &extra_crosvm);
if (ret) {
fprintf(stderr, "failed to make new socket: %d\n", ret);
return 1;
}
/* We needs this eventfd to know when to exit before being killed. */
g_kill_evt = crosvm_get_shutdown_eventfd(crosvm);
if (g_kill_evt < 0) {
fprintf(stderr, "failed to get kill eventfd: %d\n", g_kill_evt);
return 1;
}
ret = crosvm_reserve_async_write_range(crosvm, CROSVM_ADDRESS_SPACE_IOPORT,
ASYNC_ADDRESS, 1);
if (ret) {
fprintf(stderr, "failed to reserve async ioport range: %d\n", ret);
return 1;
}
ret = crosvm_reserve_range(crosvm, CROSVM_ADDRESS_SPACE_IOPORT,
KILL_ADDRESS, 1);
if (ret) {
fprintf(stderr, "failed to reserve kill ioport range: %d\n", ret);
return 1;
}
int mem_size = 0x2000;
int mem_fd = syscall(SYS_memfd_create, "guest_mem",
MFD_CLOEXEC | MFD_ALLOW_SEALING);
if (mem_fd < 0) {
fprintf(stderr, "failed to create guest memfd: %d\n", errno);
return 1;
}
ret = ftruncate(mem_fd, mem_size);
if (ret) {
fprintf(stderr, "failed to set size of guest memory: %d\n", errno);
return 1;
}
uint8_t *mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED,
mem_fd, 0x1000);
if (mem == MAP_FAILED) {
fprintf(stderr, "failed to mmap guest memory: %d\n", errno);
return 1;
}
fcntl(mem_fd, F_ADD_SEALS, F_SEAL_SHRINK);
memcpy(mem, code, sizeof(code));
struct crosvm_memory *mem_obj;
ret = crosvm_create_memory(crosvm, mem_fd, 0x1000, 0x1000, 0x1000, false,
false, &mem_obj);
if (ret) {
fprintf(stderr, "failed to create memory in crosvm: %d\n", ret);
return 1;
}
/* get and creat a thread for each vcpu */
struct crosvm_vcpu *vcpus[32];
pthread_t vcpu_threads[32];
uint32_t vcpu_count;
for (vcpu_count = 0; vcpu_count < 32; vcpu_count++) {
ret = crosvm_get_vcpu(crosvm, vcpu_count, &vcpus[vcpu_count]);
if (ret == -ENOENT)
break;
if (ret) {
fprintf(stderr, "error while getting all vcpus: %d\n", ret);
return 1;
}
pthread_create(&vcpu_threads[vcpu_count], NULL, vcpu_thread,
vcpus[vcpu_count]);
}
ret = crosvm_start(extra_crosvm);
if (ret) {
fprintf(stderr, "failed to tell crosvm to start: %d\n", ret);
return 1;
}
/* Wait for crosvm to request that we exit otherwise we will be killed. */
uint64_t dummy;
read(g_kill_evt, &dummy, 8);
ret = crosvm_destroy_memory(crosvm, &mem_obj);
if (ret) {
fprintf(stderr, "failed to destroy memory in crosvm: %d\n", ret);
return 1;
}
ret = crosvm_reserve_async_write_range(crosvm, CROSVM_ADDRESS_SPACE_IOPORT,
ASYNC_ADDRESS, 0);
if (ret) {
fprintf(stderr, "failed to unreserve async ioport range: %d\n", ret);
return 1;
}
ret = crosvm_reserve_range(crosvm, CROSVM_ADDRESS_SPACE_IOPORT,
KILL_ADDRESS, 0);
if (ret) {
fprintf(stderr, "failed to unreserve kill ioport range: %d\n", ret);
return 1;
}
if (got_error) {
fprintf(stderr, "vm ran to completion but with an error\n");
return 1;
}
return 0;
}