Date: Fri, 6 Jun 2014 09:23:59 GMT From: mihai@FreeBSD.org To: svn-soc-all@FreeBSD.org Subject: socsvn commit: r269167 - soc2014/mihai/bhyve-icache-head/sys/amd64/vmm Message-ID: <201406060923.s569NxAn020124@socsvn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mihai Date: Fri Jun 6 09:23:59 2014 New Revision: 269167 URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=269167 Log: sys: amd64: vmm: vmm_instruction_cache.c: change locking type to sx locks Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c ============================================================================== --- soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c Fri Jun 6 08:42:03 2014 (r269166) +++ soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c Fri Jun 6 09:23:59 2014 (r269167) @@ -1108,6 +1108,12 @@ ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, ("vm_handle_paging: invalid fault_type %d", ftype)); + if (ftype == VM_PROT_WRITE) { + /* Remove all the instructions that resides in this page */ + vm_inst_cache_delete(vm, vme->u.paging.gpa, vme->u.paging.cr3); + } + + if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), vme->u.paging.gpa, ftype); @@ -1115,11 +1121,6 @@ goto done; } - if (ftype == VM_PROT_WRITE) { - /* Remove all the instructions that resides in this page */ - vm_inst_cache_delete(vm, vme->u.paging.gla, vme->u.paging.cr3); - } - map = &vm->vmspace->vm_map; rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL); Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c ============================================================================== --- soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c Fri Jun 6 08:42:03 2014 (r269166) +++ soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c Fri Jun 6 09:23:59 2014 (r269167) @@ -34,14 +34,14 @@ #include <sys/kernel.h> #include <sys/sysctl.h> #include <sys/lock.h> -#include <sys/rmlock.h> +#include <sys/sx.h> #include <sys/queue.h> #include <sys/hash.h> #include <vm/vm.h> #include <vm/pmap.h> #include <vm/vm_map.h> - +#include <vm/vm_param.h> #include <machine/vmparam.h> #include <machine/vmm.h> #include <machine/vmm_instruction_cache.h> @@ -76,7 +76,7 @@ struct vie_cached_hash { struct vie_cached_head vie_cached_head; - struct rmlock vie_cached_lock; + struct sx vie_cached_lock; }; static struct vie_cached_hash vie_cached_hash[VIE_CACHE_HASH_SIZE]; @@ -92,26 +92,40 @@ for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) { LIST_INIT(&vie_cached_hash[i].vie_cached_head); - rm_init(&vie_cached_hash[i].vie_cached_lock, + sx_init(&vie_cached_hash[i].vie_cached_lock, "VIE CACHED HASH LOCK"); } return (0); } +/* Clean-up a hash entry */ +static void +vmm_inst_cache_cleanup_helper(int hash) +{ + struct vie_cached *vie_cached, *vie_cached_safe; + int i; + + LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[hash].vie_cached_head, + vie_link, vie_cached_safe) + { + for (i = 0; i < vie_cached->pages_count; i++) + vm_map_protect(&(vm_get_vmspace(vie_cached->vm)->vm_map), + vie_cached->pages[i], + vie_cached->pages[i] + vie_cached->pages_mask[i] + 1, + VM_PROT_ALL, 0); + + LIST_REMOVE(vie_cached, vie_link); + free(vie_cached, M_VIECACHED); + } +} int vmm_inst_cache_cleanup(void) { - struct vie_cached *vie_cached, *vie_cached_safe; int i; for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) { - LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[i].vie_cached_head, - vie_link, vie_cached_safe) - { - LIST_REMOVE(vie_cached, vie_link); - free(vie_cached, M_VIECACHED); - } - rm_destroy(&vie_cached_hash[i].vie_cached_lock); + vmm_inst_cache_cleanup_helper(i); + sx_destroy(&vie_cached_hash[i].vie_cached_lock); } return (0); } @@ -119,7 +133,6 @@ static int sysctl_vmm_cached_instruction(SYSCTL_HANDLER_ARGS) { - struct vie_cached *vie_cached, *vie_cached_safe; int error, temp, i; temp = vmm_cached_instruction_enable; @@ -134,14 +147,9 @@ vmm_cached_instruction_enable = temp; if (temp == 0) { for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) { - rm_wlock(&vie_cached_hash[i].vie_cached_lock); - LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[i].vie_cached_head, - vie_link, vie_cached_safe) - { - LIST_REMOVE(vie_cached, vie_link); - free(vie_cached, M_VIECACHED); - } - rm_wunlock(&vie_cached_hash[i].vie_cached_lock); + sx_xlock(&vie_cached_hash[i].vie_cached_lock); + vmm_inst_cache_cleanup_helper(i); + sx_xunlock(&vie_cached_hash[i].vie_cached_lock); } } } @@ -158,7 +166,7 @@ { int nlevels, ptpshift, ptpindex; uint64_t *ptpbase, pte, pgsize; - uint32_t *ptpbase32, pte32, i; + uint32_t *ptpbase32, pte32; void *cookie; uint64_t gla; @@ -209,7 +217,7 @@ vie_cached->pages_mask[vie_cached->pages_count] = (1 << ptpshift) - 1; vie_cached->pages[vie_cached->pages_count++] = pte32; - goto protect; + return (0); } if (paging_mode == PAGING_MODE_PAE) { @@ -279,16 +287,7 @@ vie_cached->pages_mask[vie_cached->pages_count] = (1 << ptpshift) - 1; vie_cached->pages[vie_cached->pages_count++] = pte; -protect: - i=0; - for (i = 0; i < vie_cached->pages_count; i++) { -// printf("vm_map_protect %d, %lx -> %lx\n",i, vie_cached->pages[i], vie_cached->pages_mask[i]); - vm_map_protect(&(vm_get_vmspace(vm)->vm_map), vie_cached->pages[i], - vie_cached->pages[i] + vie_cached->pages_mask[i] + 1, - VM_PROT_READ | VM_PROT_EXECUTE, 0); - } return (0); - error: return (-1); } @@ -298,7 +297,7 @@ struct vie *vie) { struct vie_cached *vie_cached; - int hash; + int hash, i; /* Check to see if caching is enabled */ if (!vmm_cached_instruction_enable) @@ -318,9 +317,16 @@ hash = jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK; - rm_wlock(&vie_cached_hash[hash].vie_cached_lock); + sx_xlock(&vie_cached_hash[hash].vie_cached_lock); + LIST_INSERT_HEAD(&vie_cached_hash[hash].vie_cached_head, vie_cached, vie_link); - rm_wunlock(&vie_cached_hash[hash].vie_cached_lock); + + for (i = 0; i < vie_cached->pages_count; i++) + vm_map_protect(&(vm_get_vmspace(vm)->vm_map), vie_cached->pages[i], + vie_cached->pages[i] + vie_cached->pages_mask[i] + 1, + VM_PROT_READ | VM_PROT_EXECUTE, 0); + + sx_xunlock(&vie_cached_hash[hash].vie_cached_lock); return (0); } @@ -330,7 +336,6 @@ struct vie *vie) { struct vie_cached *vie_cached; - struct rm_priotracker tracker; int hash; /* Check to see if caching is enabled */ @@ -339,7 +344,7 @@ hash = jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK; - rm_rlock(&vie_cached_hash[hash].vie_cached_lock, &tracker); + sx_slock(&vie_cached_hash[hash].vie_cached_lock); LIST_FOREACH(vie_cached, &vie_cached_hash[hash].vie_cached_head, vie_link) { if (vie_cached->vm == vm && @@ -347,20 +352,20 @@ vie_cached->cr3 == cr3) { bcopy(&vie_cached->vie, vie, sizeof(struct vie)); - rm_runlock(&vie_cached_hash[hash].vie_cached_lock, &tracker); + sx_sunlock(&vie_cached_hash[hash].vie_cached_lock); return(0); } } - rm_runlock(&vie_cached_hash[hash].vie_cached_lock, &tracker); + sx_sunlock(&vie_cached_hash[hash].vie_cached_lock); return (-1); } int vm_inst_cache_delete(struct vm *vm, uint64_t fault_address, uint64_t cr3) { - struct vie_cached *vie_cached; + struct vie_cached *vie_cached, *vie_cached_safe; int hash, i; - + uint64_t page_addr, page_size; /* Check to see if caching is enabled */ if (!vmm_cached_instruction_enable) return (0); @@ -368,9 +373,14 @@ hash = jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK; - rm_wlock(&vie_cached_hash[hash].vie_cached_lock); + page_addr = fault_address & ~PAGE_MASK; + page_size = PAGE_SIZE; - LIST_FOREACH(vie_cached, &vie_cached_hash[hash].vie_cached_head, vie_link) { + sx_xlock(&vie_cached_hash[hash].vie_cached_lock); + + LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[hash].vie_cached_head, + vie_link, vie_cached_safe) + { if (vie_cached->vm == vm && vie_cached->cr3 == cr3) { @@ -381,6 +391,10 @@ { /* Remove the RIP found and continue searching */ LIST_REMOVE(vie_cached, vie_link); + + page_addr = vie_cached->pages[i]; + page_size = vie_cached->pages_mask[i] + 1; + /* Free the removed node */ free(vie_cached, M_VIECACHED); break; @@ -389,7 +403,10 @@ } } - rm_wunlock(&vie_cached_hash[hash].vie_cached_lock); + vm_map_protect(&(vm_get_vmspace(vm)->vm_map), page_addr, + page_addr + page_size, VM_PROT_ALL, 0); + + sx_xunlock(&vie_cached_hash[hash].vie_cached_lock); return (0); }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201406060923.s569NxAn020124>
