From owner-svn-src-all@FreeBSD.ORG Fri Feb 22 00:46:33 2013 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.FreeBSD.org [8.8.178.115]) by hub.freebsd.org (Postfix) with ESMTP id AFC7C399; Fri, 22 Feb 2013 00:46:33 +0000 (UTC) (envelope-from grehan@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) by mx1.freebsd.org (Postfix) with ESMTP id 89F6E9A2; Fri, 22 Feb 2013 00:46:33 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.5/8.14.5) with ESMTP id r1M0kXN7039941; Fri, 22 Feb 2013 00:46:33 GMT (envelope-from grehan@svn.freebsd.org) Received: (from grehan@localhost) by svn.freebsd.org (8.14.5/8.14.5/Submit) id r1M0kWvD039938; Fri, 22 Feb 2013 00:46:32 GMT (envelope-from grehan@svn.freebsd.org) Message-Id: <201302220046.r1M0kWvD039938@svn.freebsd.org> From: Peter Grehan Date: Fri, 22 Feb 2013 00:46:32 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r247144 - head/usr.sbin/bhyve X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 22 Feb 2013 00:46:33 -0000 Author: grehan Date: Fri Feb 22 00:46:32 2013 New Revision: 247144 URL: http://svnweb.freebsd.org/changeset/base/247144 Log: Add the ability to have a 'fallback' search for memory ranges. These set of ranges will be looked at if a standard memory range isn't found, and won't be installed in the cache. Use this to implement the memory behaviour of the PCI hole on x86 systems, where writes are ignored and reads always return -1. This allows breakpoints to be set when issuing a 'boot -d', which has the side effect of accessing the PCI hole when changing the PTE protection on kernel code, since the pmap layer hasn't been initialized (a bug, but present in existing FreeBSD releases so has to be handled). Reviewed by: neel Obtained from: NetApp Modified: head/usr.sbin/bhyve/mem.c head/usr.sbin/bhyve/mem.h head/usr.sbin/bhyve/pci_emul.c Modified: head/usr.sbin/bhyve/mem.c ============================================================================== --- head/usr.sbin/bhyve/mem.c Thu Feb 21 22:48:25 2013 (r247143) +++ head/usr.sbin/bhyve/mem.c Fri Feb 22 00:46:32 2013 (r247144) @@ -62,7 +62,7 @@ struct mmio_rb_range { struct mmio_rb_tree; RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); -RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rbroot; +RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback; /* * Per-vCPU cache. Since most accesses from a vCPU will be to @@ -82,13 +82,14 @@ mmio_rb_range_compare(struct mmio_rb_ran } static int -mmio_rb_lookup(uint64_t addr, struct mmio_rb_range **entry) +mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr, + struct mmio_rb_range **entry) { struct mmio_rb_range find, *res; find.mr_base = find.mr_end = addr; - res = RB_FIND(mmio_rb_tree, &mmio_rbroot, &find); + res = RB_FIND(mmio_rb_tree, rbt, &find); if (res != NULL) { *entry = res; @@ -99,11 +100,11 @@ mmio_rb_lookup(uint64_t addr, struct mmi } static int -mmio_rb_add(struct mmio_rb_range *new) +mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new) { struct mmio_rb_range *overlap; - overlap = RB_INSERT(mmio_rb_tree, &mmio_rbroot, new); + overlap = RB_INSERT(mmio_rb_tree, rbt, new); if (overlap != NULL) { #ifdef RB_DEBUG @@ -120,11 +121,11 @@ mmio_rb_add(struct mmio_rb_range *new) #if 0 static void -mmio_rb_dump(void) +mmio_rb_dump(struct mmio_rb_tree *rbt) { struct mmio_rb_range *np; - RB_FOREACH(np, mmio_rb_tree, &mmio_rbroot) { + RB_FOREACH(np, mmio_rb_tree, rbt) { printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end, np->mr_param.name); } @@ -172,22 +173,22 @@ emulate_mem(struct vmctx *ctx, int vcpu, entry = NULL; if (entry == NULL) { - if (mmio_rb_lookup(paddr, &entry)) + if (!mmio_rb_lookup(&mmio_rb_root, paddr, &entry)) { + /* Update the per-vCPU cache */ + mmio_hint[vcpu] = entry; + } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) { return (ESRCH); - - /* Update the per-vCPU cache */ - mmio_hint[vcpu] = entry; + } } - assert(entry != NULL && entry == mmio_hint[vcpu]); - + assert(entry != NULL); err = vmm_emulate_instruction(ctx, vcpu, paddr, vie, mem_read, mem_write, &entry->mr_param); return (err); } -int -register_mem(struct mem_range *memp) +static int +register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp) { struct mmio_rb_range *mrp; int err; @@ -201,7 +202,7 @@ register_mem(struct mem_range *memp) mrp->mr_base = memp->base; mrp->mr_end = memp->base + memp->size - 1; - err = mmio_rb_add(mrp); + err = mmio_rb_add(rbt, mrp); if (err) free(mrp); } else @@ -210,9 +211,24 @@ register_mem(struct mem_range *memp) return (err); } +int +register_mem(struct mem_range *memp) +{ + + return (register_mem_int(&mmio_rb_root, memp)); +} + +int +register_mem_fallback(struct mem_range *memp) +{ + + return (register_mem_int(&mmio_rb_fallback, memp)); +} + void init_mem(void) { - RB_INIT(&mmio_rbroot); + RB_INIT(&mmio_rb_root); + RB_INIT(&mmio_rb_fallback); } Modified: head/usr.sbin/bhyve/mem.h ============================================================================== --- head/usr.sbin/bhyve/mem.h Thu Feb 21 22:48:25 2013 (r247143) +++ head/usr.sbin/bhyve/mem.h Fri Feb 22 00:46:32 2013 (r247144) @@ -53,5 +53,6 @@ void init_mem(void); int emulate_mem(struct vmctx *, int vcpu, uint64_t paddr, struct vie *vie); int register_mem(struct mem_range *memp); +int register_mem_fallback(struct mem_range *memp); #endif /* _MEM_H_ */ Modified: head/usr.sbin/bhyve/pci_emul.c ============================================================================== --- head/usr.sbin/bhyve/pci_emul.c Thu Feb 21 22:48:25 2013 (r247143) +++ head/usr.sbin/bhyve/pci_emul.c Fri Feb 22 00:46:32 2013 (r247144) @@ -846,12 +846,29 @@ pci_emul_iscap(struct pci_devinst *pi, i return (found); } +static int +pci_emul_fallback_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, + int size, uint64_t *val, void *arg1, long arg2) +{ + /* + * Ignore writes; return 0xff's for reads. The mem read code + * will take care of truncating to the correct size. + */ + if (dir == MEM_F_READ) { + *val = 0xffffffffffffffff; + } + + return (0); +} + void init_pci(struct vmctx *ctx) { + struct mem_range memp; struct pci_devemu *pde; struct slotinfo *si; int slot, func; + int error; pci_emul_iobase = PCI_EMUL_IOBASE; pci_emul_membase32 = PCI_EMUL_MEMBASE32; @@ -879,6 +896,20 @@ init_pci(struct vmctx *ctx) lirq[11].li_generic = 1; lirq[12].li_generic = 1; lirq[15].li_generic = 1; + + /* + * Setup the PCI hole to return 0xff's when accessed in a region + * with no devices + */ + memset(&memp, 0, sizeof(struct mem_range)); + memp.name = "PCI hole"; + memp.flags = MEM_F_RW; + memp.base = lomem_sz; + memp.size = (4ULL * 1024 * 1024 * 1024) - lomem_sz; + memp.handler = pci_emul_fallback_handler; + + error = register_mem_fallback(&memp); + assert(error == 0); } int