From owner-svn-src-head@FreeBSD.ORG Sun May 25 00:57:26 2014 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 2E517645; Sun, 25 May 2014 00:57:26 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 1ABB72CD0; Sun, 25 May 2014 00:57:26 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.8/8.14.8) with ESMTP id s4P0vPah011976; Sun, 25 May 2014 00:57:25 GMT (envelope-from neel@svn.freebsd.org) Received: (from neel@localhost) by svn.freebsd.org (8.14.8/8.14.8/Submit) id s4P0vOWv011967; Sun, 25 May 2014 00:57:24 GMT (envelope-from neel@svn.freebsd.org) Message-Id: <201405250057.s4P0vOWv011967@svn.freebsd.org> From: Neel Natu Date: Sun, 25 May 2014 00:57:24 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r266641 - in head: sys/amd64/include sys/amd64/vmm sys/amd64/vmm/intel usr.sbin/bhyve X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 25 May 2014 00:57:26 -0000 Author: neel Date: Sun May 25 00:57:24 2014 New Revision: 266641 URL: http://svnweb.freebsd.org/changeset/base/266641 Log: Do the linear address calculation for the ins/outs emulation using a new API function 'vie_calculate_gla()'. While the current implementation is simplistic it forms the basis of doing segmentation checks if the guest is in 32-bit protected mode. Modified: head/sys/amd64/include/vmm.h head/sys/amd64/include/vmm_instruction_emul.h head/sys/amd64/vmm/intel/vmx.c head/sys/amd64/vmm/vmm_instruction_emul.c head/sys/amd64/vmm/vmm_ioport.c head/usr.sbin/bhyve/inout.c Modified: head/sys/amd64/include/vmm.h ============================================================================== --- head/sys/amd64/include/vmm.h Sun May 25 00:57:07 2014 (r266640) +++ head/sys/amd64/include/vmm.h Sun May 25 00:57:24 2014 (r266641) @@ -426,7 +426,6 @@ struct vm_inout_str { int addrsize; enum vm_reg_name seg_name; struct seg_desc seg_desc; - uint64_t gla; /* may be set to VIE_INVALID_GLA */ }; struct vm_exit { Modified: head/sys/amd64/include/vmm_instruction_emul.h ============================================================================== --- head/sys/amd64/include/vmm_instruction_emul.h Sun May 25 00:57:07 2014 (r266640) +++ head/sys/amd64/include/vmm_instruction_emul.h Sun May 25 00:57:24 2014 (r266641) @@ -67,6 +67,9 @@ int vie_canonical_check(enum vm_cpu_mode uint64_t vie_size2mask(int size); +int vie_calculate_gla(enum vm_cpu_mode cpu_mode, int addrsize, + enum vm_reg_name seg, struct seg_desc *desc, uint64_t off, uint64_t *gla); + #ifdef _KERNEL /* * APIs to fetch and decode the instruction from nested page fault handler. @@ -89,9 +92,6 @@ int vmm_gla2gpa(struct vm *vm, int vcpui void vie_init(struct vie *vie); -uint64_t vie_segbase(enum vm_reg_name segment, enum vm_cpu_mode cpu_mode, - const struct seg_desc *desc); - /* * Decode the instruction fetched into 'vie' so it can be emulated. * Modified: head/sys/amd64/vmm/intel/vmx.c ============================================================================== --- head/sys/amd64/vmm/intel/vmx.c Sun May 25 00:57:07 2014 (r266640) +++ head/sys/amd64/vmm/intel/vmx.c Sun May 25 00:57:24 2014 (r266641) @@ -2012,7 +2012,6 @@ vmx_exit_process(struct vmx *vmx, int vc vis->count = inout_str_count(vmx, vcpu, vis->inout.rep); vis->addrsize = inout_str_addrsize(inst_info); inout_str_seginfo(vmx, vcpu, inst_info, in, vis); - vis->gla = vmcs_gla(); } break; case EXIT_REASON_CPUID: Modified: head/sys/amd64/vmm/vmm_instruction_emul.c ============================================================================== --- head/sys/amd64/vmm/vmm_instruction_emul.c Sun May 25 00:57:07 2014 (r266640) +++ head/sys/amd64/vmm/vmm_instruction_emul.c Sun May 25 00:57:24 2014 (r266641) @@ -607,6 +607,38 @@ vie_size2mask(int size) return (size2mask[size]); } +int +vie_calculate_gla(enum vm_cpu_mode cpu_mode, int addrsize, enum vm_reg_name seg, + struct seg_desc *desc, uint64_t offset, uint64_t *gla) +{ + uint64_t segbase; + int glasize; + + KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS, + ("%s: invalid segment %d", __func__, seg)); + + glasize = (cpu_mode == CPU_MODE_64BIT) ? 8 : 4; + + /* + * In 64-bit mode all segments except %fs and %gs have a segment + * base address of 0. + */ + if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS && + seg != VM_REG_GUEST_GS) { + segbase = 0; + } else { + segbase = desc->base; + } + + /* + * Truncate 'offset' to the effective address size before adding + * it to the segment base. + */ + offset &= vie_size2mask(addrsize); + *gla = (segbase + offset) & vie_size2mask(glasize); + return (0); +} + #ifdef _KERNEL void vie_init(struct vie *vie) @@ -1271,42 +1303,4 @@ vmm_decode_instruction(struct vm *vm, in return (0); } - -uint64_t -vie_segbase(enum vm_reg_name seg, enum vm_cpu_mode cpu_mode, - const struct seg_desc *desc) -{ - int basesize; - - basesize = 4; /* default segment width in bytes */ - - switch (seg) { - case VM_REG_GUEST_ES: - case VM_REG_GUEST_CS: - case VM_REG_GUEST_SS: - case VM_REG_GUEST_DS: - if (cpu_mode == CPU_MODE_64BIT) { - /* - * Segments having an implicit base address of 0 - * in 64-bit mode. - */ - return (0); - } - break; - case VM_REG_GUEST_FS: - case VM_REG_GUEST_GS: - if (cpu_mode == CPU_MODE_64BIT) { - /* - * In 64-bit mode the FS and GS base address is 8 bytes - * wide. - */ - basesize = 8; - } - break; - default: - panic("%s: invalid segment register %d", __func__, seg); - } - - return (desc->base & size2mask[basesize]); -} #endif /* _KERNEL */ Modified: head/sys/amd64/vmm/vmm_ioport.c ============================================================================== --- head/sys/amd64/vmm/vmm_ioport.c Sun May 25 00:57:07 2014 (r266640) +++ head/sys/amd64/vmm/vmm_ioport.c Sun May 25 00:57:24 2014 (r266641) @@ -144,7 +144,6 @@ static int emulate_inout_str(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu) { struct vm_inout_str *vis; - uint64_t gla, index, segbase; int in; vis = &vmexit->u.inout_str; @@ -182,21 +181,6 @@ emulate_inout_str(struct vm *vm, int vcp return (EINVAL); } - segbase = vie_segbase(vis->seg_name, vis->paging.cpu_mode, - &vis->seg_desc); - index = vis->index & vie_size2mask(vis->addrsize); - gla = segbase + index; - - /* - * Verify that the computed linear address matches with the one - * provided by hardware. - */ - if (vis->gla != VIE_INVALID_GLA) { - KASSERT(gla == vis->gla, ("%s: gla mismatch " - "%#lx/%#lx", __func__, gla, vis->gla)); - } - vis->gla = gla; - *retu = true; return (0); /* Return to userspace to finish emulation */ } Modified: head/usr.sbin/bhyve/inout.c ============================================================================== --- head/usr.sbin/bhyve/inout.c Sun May 25 00:57:07 2014 (r266640) +++ head/usr.sbin/bhyve/inout.c Sun May 25 00:57:24 2014 (r266641) @@ -147,15 +147,25 @@ emulate_inout(struct vmctx *ctx, int vcp /* Count register */ count = vis->count & vie_size2mask(addrsize); - if (vie_alignment_check(vis->paging.cpl, bytes, vis->cr0, - vis->rflags, vis->gla)) { - error = vm_inject_exception2(ctx, vcpu, IDT_AC, 0); - assert(error == 0); - return (INOUT_RESTART); - } - - gla = vis->gla; while (count) { + if (vie_calculate_gla(vis->paging.cpu_mode, + vis->addrsize, vis->seg_name, &vis->seg_desc, + index, &gla)) { + error = vm_inject_exception2(ctx, vcpu, + IDT_GP, 0); + assert(error == 0); + return (INOUT_RESTART); + } + + if (vie_alignment_check(vis->paging.cpl, bytes, + vis->cr0, vis->rflags, gla)) { + error = vm_inject_exception2(ctx, vcpu, + IDT_AC, 0); + assert(error == 0); + return (INOUT_RESTART); + } + + val = 0; if (!in) { error = vm_copyin(ctx, vcpu, &vis->paging, @@ -190,7 +200,6 @@ emulate_inout(struct vmctx *ctx, int vcp index += bytes; count--; - gla += bytes; } /* Update index register */