From owner-svn-src-head@FreeBSD.ORG Thu May 22 03:14:55 2014 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 8CE01A48; Thu, 22 May 2014 03:14:55 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 795032CE8; Thu, 22 May 2014 03:14:55 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.8/8.14.8) with ESMTP id s4M3Ethg073939; Thu, 22 May 2014 03:14:55 GMT (envelope-from neel@svn.freebsd.org) Received: (from neel@localhost) by svn.freebsd.org (8.14.8/8.14.8/Submit) id s4M3Esdn073934; Thu, 22 May 2014 03:14:54 GMT (envelope-from neel@svn.freebsd.org) Message-Id: <201405220314.s4M3Esdn073934@svn.freebsd.org> From: Neel Natu Date: Thu, 22 May 2014 03:14:54 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r266524 - in head/sys/amd64: include vmm X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 22 May 2014 03:14:55 -0000 Author: neel Date: Thu May 22 03:14:54 2014 New Revision: 266524 URL: http://svnweb.freebsd.org/changeset/base/266524 Log: Inject page fault into the guest if the page table walker detects an invalid translation for the guest linear address. Modified: head/sys/amd64/include/vmm.h head/sys/amd64/include/vmm_instruction_emul.h head/sys/amd64/vmm/vmm.c head/sys/amd64/vmm/vmm_instruction_emul.c Modified: head/sys/amd64/include/vmm.h ============================================================================== --- head/sys/amd64/include/vmm.h Thu May 22 00:46:03 2014 (r266523) +++ head/sys/amd64/include/vmm.h Thu May 22 03:14:54 2014 (r266524) @@ -236,6 +236,7 @@ int vm_exception_pending(struct vm *vm, void vm_inject_gp(struct vm *vm, int vcpuid); /* general protection fault */ void vm_inject_ud(struct vm *vm, int vcpuid); /* undefined instruction fault */ +void vm_inject_pf(struct vm *vm, int vcpuid, int error_code); /* page fault */ #endif /* KERNEL */ Modified: head/sys/amd64/include/vmm_instruction_emul.h ============================================================================== --- head/sys/amd64/include/vmm_instruction_emul.h Thu May 22 00:46:03 2014 (r266523) +++ head/sys/amd64/include/vmm_instruction_emul.h Thu May 22 03:14:54 2014 (r266524) @@ -122,6 +122,16 @@ int vmm_fetch_instruction(struct vm *vm, enum vie_paging_mode paging_mode, int cpl, struct vie *vie); +/* + * Translate the guest linear address 'gla' to a guest physical address. + * + * Returns 0 on success and '*gpa' contains the result of the translation. + * Returns 1 if a page fault exception was injected into the guest. + * Returns -1 otherwise. + */ +int vmm_gla2gpa(struct vm *vm, int vcpuid, uint64_t gla, uint64_t cr3, + uint64_t *gpa, enum vie_paging_mode paging_mode, int cpl, int prot); + void vie_init(struct vie *vie); /* Modified: head/sys/amd64/vmm/vmm.c ============================================================================== --- head/sys/amd64/vmm/vmm.c Thu May 22 00:46:03 2014 (r266523) +++ head/sys/amd64/vmm/vmm.c Thu May 22 03:14:54 2014 (r266524) @@ -1155,9 +1155,14 @@ vm_handle_inst_emul(struct vm *vm, int v vie_init(vie); /* Fetch, decode and emulate the faulting instruction */ - if (vmm_fetch_instruction(vm, vcpuid, rip, inst_length, cr3, - paging_mode, cpl, vie) != 0) + error = vmm_fetch_instruction(vm, vcpuid, rip, inst_length, cr3, + paging_mode, cpl, vie); + if (error == 1) + return (0); /* Resume guest to handle page fault */ + else if (error == -1) return (EFAULT); + else if (error != 0) + panic("%s: vmm_fetch_instruction error %d", __func__, error); if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, vie) != 0) return (EFAULT); @@ -1431,6 +1436,18 @@ vm_inject_fault(struct vm *vm, int vcpui } void +vm_inject_pf(struct vm *vm, int vcpuid, int error_code) +{ + struct vm_exception pf = { + .vector = IDT_PF, + .error_code_valid = 1, + .error_code = error_code + }; + + vm_inject_fault(vm, vcpuid, &pf); +} + +void vm_inject_gp(struct vm *vm, int vcpuid) { struct vm_exception gpf = { Modified: head/sys/amd64/vmm/vmm_instruction_emul.c ============================================================================== --- head/sys/amd64/vmm/vmm_instruction_emul.c Thu May 22 00:46:03 2014 (r266523) +++ head/sys/amd64/vmm/vmm_instruction_emul.c Thu May 22 03:14:54 2014 (r266524) @@ -572,6 +572,23 @@ vie_init(struct vie *vie) vie->index_register = VM_REG_LAST; } +static int +pf_error_code(int usermode, int prot, uint64_t pte) +{ + int error_code = 0; + + if (pte & PG_V) + error_code |= PGEX_P; + if (prot & VM_PROT_WRITE) + error_code |= PGEX_W; + if (usermode) + error_code |= PGEX_U; + if (prot & VM_PROT_EXECUTE) + error_code |= PGEX_I; + + return (error_code); +} + static void ptp_release(void **cookie) { @@ -591,11 +608,11 @@ ptp_hold(struct vm *vm, vm_paddr_t ptpph return (ptr); } -static int -gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys, uint64_t *gpa, - enum vie_paging_mode paging_mode, int cpl, int prot) +int +vmm_gla2gpa(struct vm *vm, int vcpuid, uint64_t gla, uint64_t ptpphys, + uint64_t *gpa, enum vie_paging_mode paging_mode, int cpl, int prot) { - int nlevels, ptpshift, ptpindex, retval, usermode, writable; + int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable; u_int retries; uint64_t *ptpbase, pte, pgsize; uint32_t *ptpbase32, pte32; @@ -604,7 +621,7 @@ gla2gpa(struct vm *vm, uint64_t gla, uin usermode = (cpl == 3 ? 1 : 0); writable = prot & VM_PROT_WRITE; cookie = NULL; - retval = -1; + retval = 0; retries = 0; restart: ptp_release(&cookie); @@ -633,11 +650,13 @@ restart: pte32 = ptpbase32[ptpindex]; - if ((pte32 & PG_V) == 0) - goto error; - - if (usermode && (pte32 & PG_U) == 0) - goto error; + if ((pte32 & PG_V) == 0 || + (usermode && (pte32 & PG_U) == 0) || + (writable && (pte32 & PG_RW) == 0)) { + pfcode = pf_error_code(usermode, prot, pte32); + vm_inject_pf(vm, vcpuid, pfcode); + goto pagefault; + } if (writable && (pte32 & PG_RW) == 0) goto error; @@ -689,8 +708,11 @@ restart: pte = ptpbase[ptpindex]; - if ((pte & PG_V) == 0) - goto error; + if ((pte & PG_V) == 0) { + pfcode = pf_error_code(usermode, prot, pte); + vm_inject_pf(vm, vcpuid, pfcode); + goto pagefault; + } ptpphys = pte; @@ -711,11 +733,13 @@ restart: pte = ptpbase[ptpindex]; - if ((pte & PG_V) == 0) - goto error; - - if (usermode && (pte & PG_U) == 0) - goto error; + if ((pte & PG_V) == 0 || + (usermode && (pte & PG_U) == 0) || + (writable && (pte & PG_RW) == 0)) { + pfcode = pf_error_code(usermode, prot, pte); + vm_inject_pf(vm, vcpuid, pfcode); + goto pagefault; + } if (writable && (pte & PG_RW) == 0) goto error; @@ -748,10 +772,14 @@ restart: pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12; *gpa = pte | (gla & (pgsize - 1)); done: - retval = 0; -error: ptp_release(&cookie); return (retval); +error: + retval = -1; + goto done; +pagefault: + retval = 1; + goto done; } int @@ -759,7 +787,7 @@ vmm_fetch_instruction(struct vm *vm, int uint64_t cr3, enum vie_paging_mode paging_mode, int cpl, struct vie *vie) { - int n, err, prot; + int n, error, prot; uint64_t gpa, off; void *hpa, *cookie; @@ -773,9 +801,10 @@ vmm_fetch_instruction(struct vm *vm, int /* Copy the instruction into 'vie' */ while (vie->num_valid < inst_length) { - err = gla2gpa(vm, rip, cr3, &gpa, paging_mode, cpl, prot); - if (err) - break; + error = vmm_gla2gpa(vm, cpuid, rip, cr3, &gpa, paging_mode, + cpl, prot); + if (error) + return (error); off = gpa & PAGE_MASK; n = min(inst_length - vie->num_valid, PAGE_SIZE - off);