From owner-svn-src-head@FreeBSD.ORG Wed Jun 25 22:13:36 2014 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [8.8.178.115]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 59B25F35; Wed, 25 Jun 2014 22:13:36 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 3C6002B5F; Wed, 25 Jun 2014 22:13:36 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.8/8.14.8) with ESMTP id s5PMDakp072982; Wed, 25 Jun 2014 22:13:36 GMT (envelope-from grehan@svn.freebsd.org) Received: (from grehan@localhost) by svn.freebsd.org (8.14.8/8.14.8/Submit) id s5PMDZtT072978; Wed, 25 Jun 2014 22:13:35 GMT (envelope-from grehan@svn.freebsd.org) Message-Id: <201406252213.s5PMDZtT072978@svn.freebsd.org> From: Peter Grehan Date: Wed, 25 Jun 2014 22:13:35 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r267884 - head/sys/amd64/vmm X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 25 Jun 2014 22:13:36 -0000 Author: grehan Date: Wed Jun 25 22:13:35 2014 New Revision: 267884 URL: http://svnweb.freebsd.org/changeset/base/267884 Log: Expose the amount of resident and wired memory from the guest's vmspace. This is different than the amount shown for the process e.g. by /usr/bin/top - that is the mappings faulted in by the mmap'd region of guest memory. The values can be fetched with bhyvectl # bhyvectl --get-stats --vm=myvm ... Resident memory 413749248 Wired memory 0 ... vmm_stat.[ch] - Modify the counter code in bhyve to allow direct setting of a counter as opposed to incrementing, and providing a callback to fetch a counter's value. Reviewed by: neel Modified: head/sys/amd64/vmm/vmm.c head/sys/amd64/vmm/vmm_stat.c head/sys/amd64/vmm/vmm_stat.h Modified: head/sys/amd64/vmm/vmm.c ============================================================================== --- head/sys/amd64/vmm/vmm.c Wed Jun 25 20:30:47 2014 (r267883) +++ head/sys/amd64/vmm/vmm.c Wed Jun 25 22:13:35 2014 (r267884) @@ -1992,3 +1992,34 @@ vm_segment_name(int seg) ("%s: invalid segment encoding %d", __func__, seg)); return (seg_names[seg]); } + + +/* + * Return the amount of in-use and wired memory for the VM. Since + * these are global stats, only return the values with for vCPU 0 + */ +VMM_STAT_DECLARE(VMM_MEM_RESIDENT); +VMM_STAT_DECLARE(VMM_MEM_WIRED); + +static void +vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) +{ + + if (vcpu == 0) { + vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, + PAGE_SIZE * vmspace_resident_count(vm->vmspace)); + } +} + +static void +vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) +{ + + if (vcpu == 0) { + vmm_stat_set(vm, vcpu, VMM_MEM_WIRED, + PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace))); + } +} + +VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); +VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); Modified: head/sys/amd64/vmm/vmm_stat.c ============================================================================== --- head/sys/amd64/vmm/vmm_stat.c Wed Jun 25 20:30:47 2014 (r267883) +++ head/sys/amd64/vmm/vmm_stat.c Wed Jun 25 22:13:35 2014 (r267884) @@ -83,12 +83,21 @@ vmm_stat_register(void *arg) int vmm_stat_copy(struct vm *vm, int vcpu, int *num_stats, uint64_t *buf) { - int i; + struct vmm_stat_type *vst; uint64_t *stats; + int i; if (vcpu < 0 || vcpu >= VM_MAXCPU) return (EINVAL); - + + /* Let stats functions update their counters */ + for (i = 0; i < vst_num_types; i++) { + vst = vsttab[i]; + if (vst->func != NULL) + (*vst->func)(vm, vcpu, vst); + } + + /* Copy over the stats */ stats = vcpu_stats(vm, vcpu); for (i = 0; i < vst_num_elems; i++) buf[i] = stats[i]; Modified: head/sys/amd64/vmm/vmm_stat.h ============================================================================== --- head/sys/amd64/vmm/vmm_stat.h Wed Jun 25 20:30:47 2014 (r267883) +++ head/sys/amd64/vmm/vmm_stat.h Wed Jun 25 22:13:35 2014 (r267884) @@ -42,21 +42,29 @@ enum vmm_stat_scope { VMM_STAT_SCOPE_AMD, /* AMD SVM specific statistic */ }; +struct vmm_stat_type; +typedef void (*vmm_stat_func_t)(struct vm *vm, int vcpu, + struct vmm_stat_type *stat); + struct vmm_stat_type { int index; /* position in the stats buffer */ int nelems; /* standalone or array */ const char *desc; /* description of statistic */ + vmm_stat_func_t func; enum vmm_stat_scope scope; }; void vmm_stat_register(void *arg); -#define VMM_STAT_DEFINE(type, nelems, desc, scope) \ +#define VMM_STAT_FDEFINE(type, nelems, desc, func, scope) \ struct vmm_stat_type type[1] = { \ - { -1, nelems, desc, scope } \ + { -1, nelems, desc, func, scope } \ }; \ SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type) +#define VMM_STAT_DEFINE(type, nelems, desc, scope) \ + VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope) + #define VMM_STAT_DECLARE(type) \ extern struct vmm_stat_type type[1] @@ -67,6 +75,9 @@ void vmm_stat_register(void *arg); #define VMM_STAT_AMD(type, desc) \ VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD) +#define VMM_STAT_FUNC(type, desc, func) \ + VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY) + #define VMM_STAT_ARRAY(type, nelems, desc) \ VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY) @@ -93,9 +104,22 @@ vmm_stat_array_incr(struct vm *vm, int v stats[vst->index + statidx] += x; #endif } - static void __inline +vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, + int statidx, uint64_t val) +{ +#ifdef VMM_KEEP_STATS + uint64_t *stats; + + stats = vcpu_stats(vm, vcpu); + + if (vst->index >= 0 && statidx < vst->nelems) + stats[vst->index + statidx] = val; +#endif +} + +static void __inline vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x) { @@ -104,6 +128,15 @@ vmm_stat_incr(struct vm *vm, int vcpu, s #endif } +static void __inline +vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val) +{ + +#ifdef VMM_KEEP_STATS + vmm_stat_array_set(vm, vcpu, vst, 0, val); +#endif +} + VMM_STAT_DECLARE(VCPU_MIGRATIONS); VMM_STAT_DECLARE(VMEXIT_COUNT); VMM_STAT_DECLARE(VMEXIT_EXTINT);