From owner-svn-src-projects@FreeBSD.ORG Sat Sep 29 01:15:46 2012 Return-Path: Delivered-To: svn-src-projects@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 3EDE9106566B; Sat, 29 Sep 2012 01:15:46 +0000 (UTC) (envelope-from neel@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 291998FC0A; Sat, 29 Sep 2012 01:15:46 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id q8T1Fk3l037692; Sat, 29 Sep 2012 01:15:46 GMT (envelope-from neel@svn.freebsd.org) Received: (from neel@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id q8T1Fjv3037688; Sat, 29 Sep 2012 01:15:45 GMT (envelope-from neel@svn.freebsd.org) Message-Id: <201209290115.q8T1Fjv3037688@svn.freebsd.org> From: Neel Natu Date: Sat, 29 Sep 2012 01:15:45 +0000 (UTC) To: src-committers@freebsd.org, svn-src-projects@freebsd.org X-SVN-Group: projects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r241041 - in projects/bhyve/sys/amd64: include vmm X-BeenThere: svn-src-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the src " projects" tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 29 Sep 2012 01:15:46 -0000 Author: neel Date: Sat Sep 29 01:15:45 2012 New Revision: 241041 URL: http://svn.freebsd.org/changeset/base/241041 Log: Get rid of assumptions in the hypervisor that the host physical memory associated with guest physical memory is contiguous. In this case vm_malloc() was using vm_gpa2hpa() to indirectly infer whether or not the address range had already been allocated. Replace this instead with an explicit API 'vm_gpa_available()' that returns TRUE if a page is available for allocation in guest physical address space. Modified: projects/bhyve/sys/amd64/include/vmm.h projects/bhyve/sys/amd64/vmm/vmm.c projects/bhyve/sys/amd64/vmm/vmm_dev.c Modified: projects/bhyve/sys/amd64/include/vmm.h ============================================================================== --- projects/bhyve/sys/amd64/include/vmm.h Fri Sep 28 22:26:44 2012 (r241040) +++ projects/bhyve/sys/amd64/include/vmm.h Sat Sep 29 01:15:45 2012 (r241041) @@ -89,7 +89,7 @@ extern struct vmm_ops vmm_ops_amd; struct vm *vm_create(const char *name); void vm_destroy(struct vm *vm); const char *vm_name(struct vm *vm); -int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa); +int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len); int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len); vm_paddr_t vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t size); Modified: projects/bhyve/sys/amd64/vmm/vmm.c ============================================================================== --- projects/bhyve/sys/amd64/vmm/vmm.c Fri Sep 28 22:26:44 2012 (r241040) +++ projects/bhyve/sys/amd64/vmm/vmm.c Sat Sep 29 01:15:45 2012 (r241041) @@ -315,20 +315,63 @@ vm_unmap_mmio(struct vm *vm, vm_paddr_t VM_PROT_NONE, spok)); } +/* + * Returns TRUE if 'gpa' is available for allocation and FALSE otherwise + */ +static boolean_t +vm_gpa_available(struct vm *vm, vm_paddr_t gpa) +{ + int i; + vm_paddr_t gpabase, gpalimit; + + if (gpa & PAGE_MASK) + panic("vm_gpa_available: gpa (0x%016lx) not page aligned", gpa); + + for (i = 0; i < vm->num_mem_segs; i++) { + gpabase = vm->mem_segs[i].gpa; + gpalimit = gpabase + vm->mem_segs[i].len; + if (gpa >= gpabase && gpa < gpalimit) + return (FALSE); + } + + return (TRUE); +} + int -vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa) +vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len) { - int error; - vm_paddr_t hpa; + int error, available, allocated; + vm_paddr_t g, hpa; const boolean_t spok = TRUE; /* superpage mappings are ok */ + + if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0) + return (EINVAL); + available = allocated = 0; + g = gpa; + while (g < gpa + len) { + if (vm_gpa_available(vm, g)) + available++; + else + allocated++; + + g += PAGE_SIZE; + } + /* - * find the hpa if already it was already vm_malloc'd. + * If there are some allocated and some available pages in the address + * range then it is an error. */ - hpa = vm_gpa2hpa(vm, gpa, len); - if (hpa != ((vm_paddr_t)-1)) - goto out; + if (allocated && available) + return (EINVAL); + + /* + * If the entire address range being requested has already been + * allocated then there isn't anything more to do. + */ + if (allocated && available == 0) + return (0); if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS) return (E2BIG); @@ -350,8 +393,7 @@ vm_malloc(struct vm *vm, vm_paddr_t gpa, vm->mem_segs[vm->num_mem_segs].hpa = hpa; vm->mem_segs[vm->num_mem_segs].len = len; vm->num_mem_segs++; -out: - *ret_hpa = hpa; + return (0); } Modified: projects/bhyve/sys/amd64/vmm/vmm_dev.c ============================================================================== --- projects/bhyve/sys/amd64/vmm/vmm_dev.c Fri Sep 28 22:26:44 2012 (r241040) +++ projects/bhyve/sys/amd64/vmm/vmm_dev.c Sat Sep 29 01:15:45 2012 (r241041) @@ -295,7 +295,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long c break; case VM_MAP_MEMORY: seg = (struct vm_memory_segment *)data; - error = vm_malloc(sc->vm, seg->gpa, seg->len, &seg->hpa); + error = vm_malloc(sc->vm, seg->gpa, seg->len); break; case VM_GET_MEMORY_SEG: seg = (struct vm_memory_segment *)data;