Date: Sun, 5 Jul 2015 10:24:17 GMT From: mihai@FreeBSD.org To: svn-soc-all@FreeBSD.org Subject: socsvn commit: r287975 - in soc2015/mihai/bhyve-on-arm-head: lib lib/libvmmapiarm sys/arm/include usr.sbin usr.sbin/bhyvearm Message-ID: <201507051024.t65AOHfk057179@socsvn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mihai Date: Sun Jul 5 10:24:17 2015 New Revision: 287975 URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=287975 Log: soc2015: mihai: bhyve-on-arm: added a stub bhyvearm/libvmmapiarm used to control the VMM Added: soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/ soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/Makefile soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/vmmapi.c soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/vmmapi.h soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/vmmapi_freebsd.c soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/ soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/Makefile soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/bhyverun.c soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/bhyverun.h soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/block_if.c soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/block_if.h soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/mem.c soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/mem.h soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/mevent.c soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/mevent.h soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/mevent_test.c Modified: soc2015/mihai/bhyve-on-arm-head/lib/Makefile soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm.h soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm_dev.h soc2015/mihai/bhyve-on-arm-head/usr.sbin/Makefile.arm Modified: soc2015/mihai/bhyve-on-arm-head/lib/Makefile ============================================================================== --- soc2015/mihai/bhyve-on-arm-head/lib/Makefile Sun Jul 5 09:48:03 2015 (r287974) +++ soc2015/mihai/bhyve-on-arm-head/lib/Makefile Sun Jul 5 10:24:17 2015 (r287975) @@ -111,6 +111,7 @@ libutil \ ${_libvgl} \ ${_libvmmapi} \ + ${_libvmmapiarm} \ libwrap \ libxo \ liby \ @@ -258,6 +259,12 @@ .endif .endif +.if ${MACHINE_CPUARCH} == "arm" +.if ${MK_BHYVE} != "no" +_libvmmapiarm= libvmmapiarm +.endif +.endif + .if ${MACHINE_CPUARCH} == "mips" _libproc= libproc _librtld_db= librtld_db Added: soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/Makefile ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/Makefile Sun Jul 5 10:24:17 2015 (r287975) @@ -0,0 +1,11 @@ +# $FreeBSD: head/lib/libvmmapi/Makefile 245678 2013-01-20 03:42:49Z neel $ + +LIB= vmmapiarm +SRCS= vmmapi.c vmmapi_freebsd.c +INCS= vmmapi.h + +WARNS?= 2 + +CFLAGS+= -I${.CURDIR} + +.include <bsd.lib.mk> Added: soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/vmmapi.c ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/vmmapi.c Sun Jul 5 10:24:17 2015 (r287975) @@ -0,0 +1,398 @@ +#include <sys/cdefs.h> + +#include <sys/types.h> +#include <sys/sysctl.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include<sys/errno.h> + +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> +#include <string.h> +#include <fcntl.h> +#include <unistd.h> + +#include <machine/vmm.h> +#include <machine/vmm_dev.h> + +#include "vmmapi.h" + +#define GB (1024 * 1024 * 1024UL) + +struct vmctx { + int fd; + uint32_t lowmem_limit; + enum vm_mmap_style vms; + size_t lowmem; + char *lowmem_addr; + size_t highmem; + char *highmem_addr; + char *name; +}; + +#define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x))) +#define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x))) + +static int +vm_device_open(const char *name) +{ + int fd, len; + char *vmfile; + + len = strlen("/dev/vmm/") + strlen(name) + 1; + vmfile = malloc(len); + assert(vmfile != NULL); + snprintf(vmfile, len, "/dev/vmm/%s", name); + + /* Open the device file */ + fd = open(vmfile, O_RDWR, 0); + + free(vmfile); + return (fd); +} + +int +vm_create(const char *name) +{ + + return (CREATE((char *)name)); +} + +struct vmctx * +vm_open(const char *name) +{ + struct vmctx *vm; + + vm = malloc(sizeof(struct vmctx) + strlen(name) + 1); + assert(vm != NULL); + + vm->fd = -1; + vm->lowmem_limit = 3 * GB; + vm->name = (char *)(vm + 1); + strcpy(vm->name, name); + + if ((vm->fd = vm_device_open(vm->name)) < 0) + goto err; + + return (vm); +err: + vm_destroy(vm); + return (NULL); +} + +void +vm_destroy(struct vmctx *vm) +{ + assert(vm != NULL); + + if (vm->fd >= 0) + close(vm->fd); + DESTROY(vm->name); + + free(vm); +} + +int +vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len) +{ + int error; + struct vm_memory_segment seg; + + bzero(&seg, sizeof(seg)); + seg.gpa = gpa; + error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg); + *ret_len = seg.len; + return (error); +} + +uint32_t +vm_get_lowmem_limit(struct vmctx *ctx) +{ + + return (ctx->lowmem_limit); +} + +void +vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit) +{ + + ctx->lowmem_limit = limit; +} + +static int +setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr) +{ + int error; + struct vm_memory_segment seg; + + /* + * Create and optionally map 'len' bytes of memory at guest + * physical address 'gpa' + */ + bzero(&seg, sizeof(seg)); + seg.gpa = gpa; + seg.len = len; + error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg); + if (error == 0 && addr != NULL) { + *addr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, + ctx->fd, gpa); + } + return (error); +} + +int +vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms) +{ + char **addr; + int error; + + /* XXX VM_MMAP_SPARSE not implemented yet */ + assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL); + ctx->vms = vms; + + /* + * If 'memsize' cannot fit entirely in the 'lowmem' segment then + * create another 'highmem' segment above 4GB for the remainder. + */ + if (memsize > ctx->lowmem_limit) { + ctx->lowmem = ctx->lowmem_limit; + ctx->highmem = memsize - ctx->lowmem; + } else { + ctx->lowmem = memsize; + ctx->highmem = 0; + } + + if (ctx->lowmem > 0) { + addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL; + error = setup_memory_segment(ctx, 0, ctx->lowmem, addr); + if (error) + return (error); + } + + if (ctx->highmem > 0) { + addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL; + error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr); + if (error) + return (error); + } + + return (0); +} + +void * +vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len) +{ + + /* XXX VM_MMAP_SPARSE not implemented yet */ + assert(ctx->vms == VM_MMAP_ALL); + + if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem) + return ((void *)(ctx->lowmem_addr + gaddr)); + + if (gaddr >= 4*GB) { + gaddr -= 4*GB; + if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem) + return ((void *)(ctx->highmem_addr + gaddr)); + } + + return (NULL); +} + + +int +vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val) +{ + int error; + struct vm_register vmreg; + + bzero(&vmreg, sizeof(vmreg)); + vmreg.cpuid = vcpu; + vmreg.regnum = reg; + vmreg.regval = val; + + error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg); + return (error); +} + +int +vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val) +{ + int error; + struct vm_register vmreg; + + bzero(&vmreg, sizeof(vmreg)); + vmreg.cpuid = vcpu; + vmreg.regnum = reg; + + error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg); + *ret_val = vmreg.regval; + return (error); +} + +int +vm_run(struct vmctx *ctx, int vcpu, uint64_t pc, struct vm_exit *vmexit) +{ + int error; + struct vm_run vmrun; + + bzero(&vmrun, sizeof(vmrun)); + vmrun.cpuid = vcpu; + vmrun.pc = pc; + + error = ioctl(ctx->fd, VM_RUN, &vmrun); + bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit)); + return (error); +} + +static struct { + const char *name; + int type; +} capstrmap[] = { + { "hlt_exit", VM_CAP_HALT_EXIT }, + { "mtrap_exit", VM_CAP_MTRAP_EXIT }, + { "pause_exit", VM_CAP_PAUSE_EXIT }, + { "unrestricted_guest", VM_CAP_UNRESTRICTED_GUEST }, + { 0 } +}; + +int +vm_capability_name2type(const char *capname) +{ + int i; + + for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) { + if (strcmp(capstrmap[i].name, capname) == 0) + return (capstrmap[i].type); + } + + return (-1); +} + +const char * +vm_capability_type2name(int type) +{ + int i; + + for (i = 0; capstrmap[i].name != NULL; i++) { + if (capstrmap[i].type == type) + return (capstrmap[i].name); + } + + return (NULL); +} + +int +vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, + int *retval) +{ + int error; + struct vm_capability vmcap; + + bzero(&vmcap, sizeof(vmcap)); + vmcap.cpuid = vcpu; + vmcap.captype = cap; + + error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap); + *retval = vmcap.capval; + return (error); +} + +int +vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val) +{ + struct vm_capability vmcap; + + bzero(&vmcap, sizeof(vmcap)); + vmcap.cpuid = vcpu; + vmcap.captype = cap; + vmcap.capval = val; + + return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap)); +} + +int +vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func) +{ + struct vm_pptdev pptdev; + + bzero(&pptdev, sizeof(pptdev)); + pptdev.bus = bus; + pptdev.slot = slot; + pptdev.func = func; + + return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev)); +} + +int +vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func) +{ + struct vm_pptdev pptdev; + + bzero(&pptdev, sizeof(pptdev)); + pptdev.bus = bus; + pptdev.slot = slot; + pptdev.func = func; + + return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev)); +} + +int +vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, + vm_paddr_t gpa, size_t len, vm_paddr_t hpa) +{ + struct vm_pptdev_mmio pptmmio; + + bzero(&pptmmio, sizeof(pptmmio)); + pptmmio.bus = bus; + pptmmio.slot = slot; + pptmmio.func = func; + pptmmio.gpa = gpa; + pptmmio.len = len; + pptmmio.hpa = hpa; + + return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio)); +} + +uint64_t * +vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv, + int *ret_entries) +{ + int error; + + static struct vm_stats vmstats; + + vmstats.cpuid = vcpu; + + error = ioctl(ctx->fd, VM_STATS, &vmstats); + if (error == 0) { + if (ret_entries) + *ret_entries = vmstats.num_entries; + if (ret_tv) + *ret_tv = vmstats.tv; + return (vmstats.statbuf); + } else + return (NULL); +} + +const char * +vm_get_stat_desc(struct vmctx *ctx, int index) +{ + static struct vm_stat_desc statdesc; + + statdesc.index = index; + if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0) + return (statdesc.desc); + else + return (NULL); +} + +/* + * From Intel Vol 3a: + * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT + */ +int +vcpu_reset(struct vmctx *vmctx, int vcpu) +{ + return (ENXIO); +} Added: soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/vmmapi.h ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/vmmapi.h Sun Jul 5 10:24:17 2015 (r287975) @@ -0,0 +1,86 @@ +/*- + * Copyright (c) 2011 NetApp, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: head/lib/libvmmapi/vmmapi.h 249905 2013-04-25 20:42:21Z neel $ + */ + +#ifndef _VMMAPI_H_ +#define _VMMAPI_H_ + +struct vmctx; + +#if defined(__amd64__) +enum x2apic_state; +#endif + +/* + * Different styles of mapping the memory assigned to a VM into the address + * space of the controlling process. + */ +enum vm_mmap_style { + VM_MMAP_NONE, /* no mapping */ + VM_MMAP_ALL, /* fully and statically mapped */ + VM_MMAP_SPARSE, /* mappings created on-demand */ +}; + +int vm_create(const char *name); +struct vmctx *vm_open(const char *name); +void vm_destroy(struct vmctx *ctx); +int vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len); +int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s); +void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len); +uint32_t vm_get_lowmem_limit(struct vmctx *ctx); +void vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit); +int vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val); +int vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *retval); +int vm_run(struct vmctx *ctx, int vcpu, uint64_t rip, + struct vm_exit *ret_vmexit); +const char *vm_capability_type2name(int type); +int vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, + int *retval); +int vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, + int val); +int vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func); +int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func); +int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, + vm_paddr_t gpa, size_t len, vm_paddr_t hpa); + +/* + * Return a pointer to the statistics buffer. Note that this is not MT-safe. + */ +uint64_t *vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv, + int *ret_entries); +const char *vm_get_stat_desc(struct vmctx *ctx, int index); + +/* Reset vcpu register state */ +int vcpu_reset(struct vmctx *ctx, int vcpu); + +/* + * FreeBSD specific APIs + */ +int vm_setup_freebsd_registers(struct vmctx *ctx, int vcpu, + uint64_t pc, uint64_t sp); +void vm_setup_freebsd_gdt(uint64_t *gdtr); +#endif /* _VMMAPI_H_ */ Added: soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/vmmapi_freebsd.c ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2015/mihai/bhyve-on-arm-head/lib/libvmmapiarm/vmmapi_freebsd.c Sun Jul 5 10:24:17 2015 (r287975) @@ -0,0 +1,30 @@ +#include <sys/cdefs.h> + +#include <sys/types.h> + +#include <machine/vmm.h> + +#include "vmmapi.h" + + + +/* + * Setup the 'vcpu' register set such that it will begin execution at + * 'rip' in long mode. + */ +int +vm_setup_freebsd_registers(struct vmctx *vmctx, int vcpu, + uint64_t pc, uint64_t sp) +{ + int error; + + if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_PC, pc)) != 0) + goto done; + + if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SP, sp)) != 0) + goto done; + + error = 0; +done: + return (error); +} Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm.h ============================================================================== --- soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm.h Sun Jul 5 09:48:03 2015 (r287974) +++ soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm.h Sun Jul 5 10:24:17 2015 (r287975) @@ -1,31 +1,3 @@ -/*- - * Copyright (c) 2011 NetApp, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD: soc2015/mihai/bhyve-on-arm-head/sys/amd64/include/vmm.h 285151 2015-05-06 23:40:24Z neel $ - */ - #ifndef _VMM_H_ #define _VMM_H_ @@ -43,46 +15,21 @@ * Identifiers for architecturally defined registers. */ enum vm_reg_name { - VM_REG_GUEST_RAX, - VM_REG_GUEST_RBX, - VM_REG_GUEST_RCX, - VM_REG_GUEST_RDX, - VM_REG_GUEST_RSI, - VM_REG_GUEST_RDI, - VM_REG_GUEST_RBP, + VM_REG_GUEST_R1, + VM_REG_GUEST_R2, + VM_REG_GUEST_R3, + VM_REG_GUEST_R4, + VM_REG_GUEST_R5, + VM_REG_GUEST_R6, + VM_REG_GUEST_R7, VM_REG_GUEST_R8, VM_REG_GUEST_R9, VM_REG_GUEST_R10, VM_REG_GUEST_R11, VM_REG_GUEST_R12, - VM_REG_GUEST_R13, - VM_REG_GUEST_R14, - VM_REG_GUEST_R15, - VM_REG_GUEST_CR0, - VM_REG_GUEST_CR3, - VM_REG_GUEST_CR4, - VM_REG_GUEST_DR7, - VM_REG_GUEST_RSP, - VM_REG_GUEST_RIP, - VM_REG_GUEST_RFLAGS, - VM_REG_GUEST_ES, - VM_REG_GUEST_CS, - VM_REG_GUEST_SS, - VM_REG_GUEST_DS, - VM_REG_GUEST_FS, - VM_REG_GUEST_GS, - VM_REG_GUEST_LDTR, - VM_REG_GUEST_TR, - VM_REG_GUEST_IDTR, - VM_REG_GUEST_GDTR, - VM_REG_GUEST_EFER, - VM_REG_GUEST_CR2, - VM_REG_GUEST_PDPTE0, - VM_REG_GUEST_PDPTE1, - VM_REG_GUEST_PDPTE2, - VM_REG_GUEST_PDPTE3, - VM_REG_GUEST_INTR_SHADOW, - VM_REG_LAST + VM_REG_GUEST_SP, + VM_REG_GUEST_LR, + VM_REG_GUEST_PC }; enum x2apic_state { Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm_dev.h ============================================================================== --- soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm_dev.h Sun Jul 5 09:48:03 2015 (r287974) +++ soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm_dev.h Sun Jul 5 10:24:17 2015 (r287975) @@ -54,7 +54,9 @@ struct vm_run { int cpuid; + uint64_t pc; struct vm_exit vm_exit; + }; struct vm_exception { @@ -181,11 +183,6 @@ int vcpuid; }; -struct vm_cpuset { - int which; - int cpusetsize; - cpuset_t *cpus; -}; #define VM_ACTIVE_CPUS 0 #define VM_SUSPENDED_CPUS 1 Modified: soc2015/mihai/bhyve-on-arm-head/usr.sbin/Makefile.arm ============================================================================== --- soc2015/mihai/bhyve-on-arm-head/usr.sbin/Makefile.arm Sun Jul 5 09:48:03 2015 (r287974) +++ soc2015/mihai/bhyve-on-arm-head/usr.sbin/Makefile.arm Sun Jul 5 10:24:17 2015 (r287975) @@ -2,3 +2,6 @@ SUBDIR+= ofwdump SUBDIR+= kgmon +.if ${MK_BHYVE} != "no" +SUBDIR+= bhyvearm +.endif Added: soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/Makefile ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/Makefile Sun Jul 5 10:24:17 2015 (r287975) @@ -0,0 +1,20 @@ +# +# $FreeBSD: head/usr.sbin/bhyve/Makefile 256057 2013-10-04 18:44:47Z grehan $ +# + +PROG= bhyvearm + +DEBUG_FLAGS= -g -O0 + +SRCS= bhyverun.c block_if.c mem.c mevent.c + +.PATH: ${.CURDIR}/../../sys/arm/vmm + +NO_MAN= + +DPADD= ${LIBVMMAPIARM} ${LIBMD} ${LIBPTHREAD} +LDADD= -lvmmapiarm -lmd -lpthread + +WARNS?= 2 + +.include <bsd.prog.mk> Added: soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/bhyverun.c ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2015/mihai/bhyve-on-arm-head/usr.sbin/bhyvearm/bhyverun.c Sun Jul 5 10:24:17 2015 (r287975) @@ -0,0 +1,485 @@ +/*- + * Copyright (c) 2011 NetApp, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: head/usr.sbin/bhyve/bhyverun.c 256062 2013-10-04 23:29:07Z grehan $ + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD: head/usr.sbin/bhyve/bhyverun.c 256062 2013-10-04 23:29:07Z grehan $"); + +#include <sys/types.h> +#include <sys/mman.h> +#include <sys/time.h> + +#include <stdio.h> +#include <stdlib.h> +#include <libgen.h> +#include <unistd.h> +#include <assert.h> +#include <errno.h> +#include <pthread.h> +#include <pthread_np.h> + +#include <machine/vmm.h> +#include <vmmapi.h> + +#include "bhyverun.h" +#include "mem.h" +#include "mevent.h" + +#define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */ + +#define VMEXIT_SWITCH 0 /* force vcpu switch in mux mode */ +#define VMEXIT_CONTINUE 1 /* continue from next instruction */ +#define VMEXIT_RESTART 2 /* restart current instruction */ +#define VMEXIT_ABORT 3 /* abort the vm run loop */ +#define VMEXIT_RESET 4 /* guest machine has reset */ + +#define MB (1024UL * 1024) +#define GB (1024UL * MB) + +typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu); + +char *vmname; + +int guest_ncpus; + +static int pincpu = -1; +static int guest_vmexit_on_hlt, guest_vmexit_on_pause; + +static int foundcpus; + +static char *progname; +static const int BSP = 0; + +static int cpumask; + +static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip); + +struct vm_exit vmexit[VM_MAXCPU]; + +struct bhyvestats { + uint64_t vmexit_bogus; + uint64_t vmexit_bogus_switch; + uint64_t vmexit_hlt; + uint64_t vmexit_pause; + uint64_t vmexit_mtrap; + uint64_t vmexit_paging; + uint64_t cpu_switch_rotate; + uint64_t cpu_switch_direct; + int io_reset; +} stats; + +struct mt_vmm_info { + pthread_t mt_thr; + struct vmctx *mt_ctx; + int mt_vcpu; +} mt_vmm_info[VM_MAXCPU]; + +static void +usage(int code) +{ + + fprintf(stderr, + "Usage: %s [-aehAHIP][-g <gdb port>][-s <pci>][-S <pci>]" + "[-c vcpus][-p pincpu][-m mem]" + " <vmname>\n" + " -c: # cpus (default 1)\n" + " -p: pin vcpu 'n' to host cpu 'pincpu + n'\n" + " -H: vmexit from the guest on hlt\n" + " -P: vmexit from the guest on pause\n" + " -h: help\n" + " -m: memory size in MB\n", + progname); + + exit(code); +} + +void * +paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) +{ + + return (vm_map_gpa(ctx, gaddr, len)); +} + +int +fbsdrun_vmexit_on_pause(void) +{ + + return (guest_vmexit_on_pause); +} + +int +fbsdrun_vmexit_on_hlt(void) +{ + + return (guest_vmexit_on_hlt); +} + +static void * +fbsdrun_start_thread(void *param) +{ + char tname[MAXCOMLEN + 1]; + struct mt_vmm_info *mtp; + int vcpu; + + mtp = param; + vcpu = mtp->mt_vcpu; + + snprintf(tname, sizeof(tname), "%s vcpu %d", vmname, vcpu); + pthread_set_name_np(mtp->mt_thr, tname); + + vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip); + + /* not reached */ + exit(1); + return (NULL); +} + +void +fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip) +{ + int error; + + if (cpumask & (1 << vcpu)) { + fprintf(stderr, "addcpu: attempting to add existing cpu %d\n", + vcpu); + exit(1); + } + + cpumask |= 1 << vcpu; + foundcpus++; + + /* + * Set up the vmexit struct to allow execution to start + * at the given RIP + */ + vmexit[vcpu].rip = rip; + vmexit[vcpu].inst_length = 0; + + if (vcpu == BSP) { + mt_vmm_info[vcpu].mt_ctx = ctx; + mt_vmm_info[vcpu].mt_vcpu = vcpu; + + error = pthread_create(&mt_vmm_info[vcpu].mt_thr, NULL, + fbsdrun_start_thread, &mt_vmm_info[vcpu]); + assert(error == 0); + } +} + +static int +fbsdrun_get_next_cpu(int curcpu) +{ + + /* + * Get the next available CPU. Assumes they arrive + * in ascending order with no gaps. + */ + return ((curcpu + 1) % foundcpus); +} + +static int +vmexit_catch_reset(void) +{ + stats.io_reset++; + return (VMEXIT_RESET); +} + +static int +vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu, + uint32_t eax) +{ +#if BHYVE_DEBUG + /* + * put guest-driven debug here + */ +#endif + return (VMEXIT_CONTINUE); +} + +static int +vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) +{ + + fprintf(stderr, "vm exit[%d]\n", *pvcpu); + fprintf(stderr, "\treason\t\tVMX\n"); + fprintf(stderr, "\trip\t\t0x%016llx\n", vmexit->rip); + fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length); + + return (VMEXIT_ABORT); +} + +static int +vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) +{ + + stats.vmexit_bogus++; + + return (VMEXIT_RESTART); +} + +static int +vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) +{ + + stats.vmexit_hlt++; + + /* + * Just continue execution with the next instruction. We use + * the HLT VM exit as a way to be friendly with the host + * scheduler. + */ + return (VMEXIT_CONTINUE); +} + +static int +vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) +{ + + stats.vmexit_pause++; + + return (VMEXIT_CONTINUE); +} + +static int +vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) +{ + + stats.vmexit_mtrap++; + + return (VMEXIT_RESTART); +} + +static int +vmexit_paging(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu) +{ + int err; + stats.vmexit_paging++; + + err = emulate_mem(ctx, *pvcpu, vmexit->u.paging.gpa, NULL); +// &vmexit->u.paging.vie); + + if (err) { + if (err == EINVAL) { + fprintf(stderr, + "Failed to emulate instruction at 0x%llx\n", + vmexit->rip); + } else if (err == ESRCH) { + fprintf(stderr, "Unhandled memory access to 0x%llx\n", *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201507051024.t65AOHfk057179>