Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 20 May 2014 20:32:06 GMT
From:      mihai@FreeBSD.org
To:        svn-soc-all@FreeBSD.org
Subject:   socsvn commit: r268393 - in soc2014/mihai/bhyve-icache-head/sys/amd64: include vmm
Message-ID:  <201405202032.s4KKW6sJ094394@socsvn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mihai
Date: Tue May 20 20:32:05 2014
New Revision: 268393
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=268393

Log:
  soc2014: mihai: sys: amd64: vmm: fix interface naming for instr cache and copyrights

Modified:
  soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h
  soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_emul.h
  soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c
  soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c

Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h	Tue May 20 19:55:59 2014	(r268392)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h	Tue May 20 20:32:05 2014	(r268393)
@@ -1,15 +1,41 @@
+/*-
+ * Copyright (c) 2014 Mihai Carabas.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
 #ifndef	_VMM_INSTRUCTION_CACHE_H_
 #define	_VMM_INSTRUCTION_CACHE_H_
 
-#ifdef _KERNEL
+struct vm;
+struct vie;
 
-int vmm_init_cached_instruction(void);
-int vmm_cleanup_cached_instruction(void);
-int vmm_add_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
+int vmm_inst_cache_init(void);
+int vmm_inst_cache_cleanup(void);
+int vm_inst_cache_add(struct vm *vm, uint64_t rip, uint64_t cr3,
 		    struct vie *vie);
-int vmm_get_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
+int vm_inst_cache_lookup(struct vm *vm, uint64_t rip, uint64_t cr3,
 		    struct vie *vie);
-int vmm_rm_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3);
-#endif	/* _KERNEL */
+int vm_inst_cache_delete(struct vm *vm, uint64_t rip, uint64_t cr3);
 
 #endif	/* _VMM_INSTRUCTION_EMUL_H_ */

Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_emul.h
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_emul.h	Tue May 20 19:55:59 2014	(r268392)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_emul.h	Tue May 20 20:32:05 2014	(r268393)
@@ -138,8 +138,6 @@
 int vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
 			   enum vie_cpu_mode cpu_mode, struct vie *vie);
 
-#include <machine/vmm_instruction_cache.h>
-
 #endif	/* _KERNEL */
 
 #endif	/* _VMM_INSTRUCTION_EMUL_H_ */

Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c	Tue May 20 19:55:59 2014	(r268392)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c	Tue May 20 20:32:05 2014	(r268393)
@@ -62,6 +62,7 @@
 
 #include <machine/vmm.h>
 #include <machine/vmm_dev.h>
+#include <machine/vmm_instruction_cache.h>
 
 #include "vmm_ktr.h"
 #include "vmm_host.h"
@@ -291,7 +292,7 @@
 		error = vmm_init();
 		if (error == 0) {
 			vmm_initialized = 1;
-			vmm_init_cached_instruction();
+			vmm_inst_cache_init();
 		}
 		break;
 	case MOD_UNLOAD:
@@ -309,7 +310,7 @@
 			if (error)
 				vmm_initialized = 0;
 
-			vmm_cleanup_cached_instruction();
+			vmm_inst_cache_cleanup();
 		}
 		break;
 	default:
@@ -1116,7 +1117,7 @@
 
 	if (ftype == VM_PROT_WRITE) {
 		/* Remove all the instructions that resides in this page */
-		vmm_rm_cached_instruction(vm, vme->u.paging.gla, vme->u.paging.cr3);
+		vm_inst_cache_delete(vm, vme->u.paging.gla, vme->u.paging.cr3);
 	}
 
 	map = &vm->vmspace->vm_map;
@@ -1161,7 +1162,7 @@
 	vie = &vme->u.inst_emul.vie;
 
 	/* Check to see if the instruction is cached */
-	if (vmm_get_cached_instruction(vm, rip, cr3, vie)) {
+	if (vm_inst_cache_lookup(vm, rip, cr3, vie)) {
 		vie_init(vie);
 
 		/* Fetch, decode and emulate the faulting instruction */
@@ -1178,7 +1179,7 @@
 		    VM_PROT_READ | VM_PROT_EXECUTE, 0);
 
 		/* Cache decoded instruction for further use */
-		vmm_add_cached_instruction(vm, rip, cr3, vie);
+		vm_inst_cache_add(vm, rip, cr3, vie);
 	}
 
 	/* return to userland unless this is an in-kernel emulated device */

Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c	Tue May 20 19:55:59 2014	(r268392)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c	Tue May 20 20:32:05 2014	(r268393)
@@ -1,4 +1,28 @@
-#ifdef _KERNEL
+/*-
+ * Copyright (c) 2014 Mihai Carabas.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
 
 #include <sys/cdefs.h>
 
@@ -19,7 +43,7 @@
 
 #include <machine/vmparam.h>
 #include <machine/vmm.h>
-
+#include <machine/vmm_instruction_cache.h>
 
 /* Instruction caching */
 
@@ -54,25 +78,27 @@
 static int vmm_cached_instruction_enable = 1;
 
 int
-vmm_init_cached_instruction(void)
+vmm_inst_cache_init(void)
 {
 	int i;
 
 	for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) {
 		LIST_INIT(&vie_cached_hash[i].vie_cached_head);
-		rm_init(&vie_cached_hash[i].vie_cached_lock, "VIE CACHED HASH LOCK");
+		rm_init(&vie_cached_hash[i].vie_cached_lock,
+		    "VIE CACHED HASH LOCK");
 	}
 	return (0);
 }
 
 int
-vmm_cleanup_cached_instruction(void)
+vmm_inst_cache_cleanup(void)
 {
 	struct vie_cached *vie_cached, *vie_cached_safe;
 	int i;
 
 	for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) {
-		LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[i].vie_cached_head, vie_link, vie_cached_safe)
+		LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[i].vie_cached_head,
+		    vie_link, vie_cached_safe)
 		{
 			LIST_REMOVE(vie_cached, vie_link);
 			free(vie_cached, M_VIECACHED);
@@ -101,7 +127,8 @@
 			if (temp == 0) {
 				for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) {
 					rm_wlock(&vie_cached_hash[i].vie_cached_lock);
-					LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[i].vie_cached_head, vie_link, vie_cached_safe)
+					LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[i].vie_cached_head,
+					    vie_link, vie_cached_safe)
 					{
 						LIST_REMOVE(vie_cached, vie_link);
 						free(vie_cached, M_VIECACHED);
@@ -118,12 +145,14 @@
 
 
 int
-vmm_add_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
+vm_inst_cache_add(struct vm *vm, uint64_t rip, uint64_t cr3,
 		    struct vie *vie)
 {
-	struct vie_cached *vie_cached = malloc(sizeof(struct vie_cached), M_VIECACHED, M_WAITOK | M_ZERO);
+	struct vie_cached *vie_cached;
 	int hash;
 
+	vie_cached = malloc(sizeof(struct vie_cached), M_VIECACHED, M_WAITOK | M_ZERO);
+
 	/* Check to see if caching is enabled */
 	if (!vmm_cached_instruction_enable)
 		return (0);
@@ -142,7 +171,7 @@
 }
 
 int
-vmm_get_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
+vm_inst_cache_lookup(struct vm *vm, uint64_t rip, uint64_t cr3,
 		    struct vie *vie)
 {
 	struct vie_cached *vie_cached;
@@ -172,7 +201,7 @@
 }
 
 int
-vmm_rm_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3)
+vm_inst_cache_delete(struct vm *vm, uint64_t rip, uint64_t cr3)
 {
 	struct vie_cached *vie_cached;
 	int hash;
@@ -201,4 +230,3 @@
 	rm_wunlock(&vie_cached_hash[hash].vie_cached_lock);
 	return (0);
 }
-#endif	/* _KERNEL */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201405202032.s4KKW6sJ094394>