From owner-svn-src-projects@FreeBSD.ORG Sun Mar 13 22:04:19 2011 Return-Path: Delivered-To: svn-src-projects@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id EB21E106566B; Sun, 13 Mar 2011 22:04:19 +0000 (UTC) (envelope-from marcel@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id D9B8E8FC15; Sun, 13 Mar 2011 22:04:19 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id p2DM4JUO067330; Sun, 13 Mar 2011 22:04:19 GMT (envelope-from marcel@svn.freebsd.org) Received: (from marcel@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id p2DM4JN0067327; Sun, 13 Mar 2011 22:04:19 GMT (envelope-from marcel@svn.freebsd.org) Message-Id: <201103132204.p2DM4JN0067327@svn.freebsd.org> From: Marcel Moolenaar Date: Sun, 13 Mar 2011 22:04:19 +0000 (UTC) To: src-committers@freebsd.org, svn-src-projects@freebsd.org X-SVN-Group: projects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r219626 - projects/altix/sys/boot/ia64/common X-BeenThere: svn-src-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the src " projects" tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 13 Mar 2011 22:04:20 -0000 Author: marcel Date: Sun Mar 13 22:04:19 2011 New Revision: 219626 URL: http://svn.freebsd.org/changeset/base/219626 Log: o Make sure the page table has a size that is mappable. Certain page sizes are not supported. o Map the PBVM page table. o Map the PBVM using the largest possible power of 2 that is less than the amount of PBVM used and round down to a valid page size. Note that the current kernel is between 8MB and 16MB in size, which would mean that 8MB would be the typical size of the mapping, if only 8MB wasn't an invalid page size. In practice, we end up mapping the first 4MB of PBVM in most cases. Modified: projects/altix/sys/boot/ia64/common/copy.c projects/altix/sys/boot/ia64/common/exec.c Modified: projects/altix/sys/boot/ia64/common/copy.c ============================================================================== --- projects/altix/sys/boot/ia64/common/copy.c Sun Mar 13 21:51:47 2011 (r219625) +++ projects/altix/sys/boot/ia64/common/copy.c Sun Mar 13 22:04:19 2011 (r219626) @@ -43,6 +43,8 @@ pgtbl_extend(u_int idx) u_int pot; pgtblsz = (idx + 1) << 3; + + /* The minimum size is 4KB. */ if (pgtblsz < 4096) pgtblsz = 4096; @@ -52,6 +54,14 @@ pgtbl_extend(u_int idx) pgtblsz = pgtblsz | (pgtblsz >> pot); pgtblsz++; + /* The maximum size is 1MB. */ + if (pgtblsz > 1048576) + return (ENOMEM); + + /* Make sure the size is a valid (mappable) page size. */ + if (pgtblsz == 32*1024 || pgtblsz == 128*1024 || pgtblsz == 512*1024) + pgtblsz <<= 1; + /* Allocate naturally aligned memory. */ pgtbl = (void *)ia64_platform_alloc(0, pgtblsz); if (pgtbl == NULL) Modified: projects/altix/sys/boot/ia64/common/exec.c ============================================================================== --- projects/altix/sys/boot/ia64/common/exec.c Sun Mar 13 21:51:47 2011 (r219625) +++ projects/altix/sys/boot/ia64/common/exec.c Sun Mar 13 22:04:19 2011 (r219626) @@ -85,9 +85,52 @@ enter_kernel(uint64_t start, struct boot } static void -mmu_setup_legacy(uint64_t entry) +mmu_wire(vm_offset_t va, vm_paddr_t pa, u_int sz, u_int acc) { + static u_int iidx = 0, didx = 0; pt_entry_t pte; + u_int shft; + + /* Round up to the smallest possible page size. */ + if (sz < 4096) + sz = 4096; + /* Determine the exponent (base 2). */ + shft = 0; + while (sz > 1) { + shft++; + sz >>= 1; + } + /* Truncate to the largest possible page size (256MB). */ + if (shft > 28) + shft = 28; + /* Round down to a valid (mappable) page size. */ + if (shft > 14 && (shft & 1) != 0) + shft--; + + pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY | + PTE_PL_KERN | (acc & PTE_AR_MASK) | (pa & PTE_PPN_MASK); + + __asm __volatile("mov cr.ifa=%0" :: "r"(va)); + __asm __volatile("mov cr.itir=%0" :: "r"(shft << 2)); + __asm __volatile("srlz.d;;"); + __asm __volatile("ptr.d %0,%1" :: "r"(va), "r"(shft << 2)); + __asm __volatile("srlz.d;;"); + __asm __volatile("itr.d dtr[%0]=%1" :: "r"(didx), "r"(pte)); + __asm __volatile("srlz.d;;"); + didx++; + + if (acc == PTE_AR_RWX) { + __asm __volatile("ptr.i %0,%1;;" :: "r"(va), "r"(shft << 2)); + __asm __volatile("srlz.i;;"); + __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(iidx), "r"(pte)); + __asm __volatile("srlz.i;;"); + iidx++; + } +} + +static void +mmu_setup_legacy(uint64_t entry) +{ /* * Region 6 is direct mapped UC and region 7 is direct mapped @@ -99,53 +142,28 @@ mmu_setup_legacy(uint64_t entry) ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2)); __asm __volatile("srlz.i;;"); - pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY | - PTE_PL_KERN | PTE_AR_RWX | PTE_ED; - pte |= IA64_RR_MASK(entry) & PTE_PPN_MASK; - - __asm __volatile("mov cr.ifa=%0" :: "r"(entry)); - __asm __volatile("mov cr.itir=%0" :: "r"(28 << 2)); - __asm __volatile("ptr.i %0,%1" :: "r"(entry), "r"(28<<2)); - __asm __volatile("ptr.d %0,%1" :: "r"(entry), "r"(28<<2)); - __asm __volatile("srlz.i;;"); - __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(0), "r"(pte)); - __asm __volatile("srlz.i;;"); - __asm __volatile("itr.d dtr[%0]=%1;;" :: "r"(0), "r"(pte)); - __asm __volatile("srlz.i;;"); + mmu_wire(entry, IA64_RR_MASK(entry), 1UL << 28, PTE_AR_RWX); } static void -mmu_setup_paged(void) +mmu_setup_paged(vm_offset_t pbvm_top) { - pt_entry_t pte; u_int sz; - ia64_set_rr(IA64_RR_BASE(4), (4 << 8) | (IA64_PBVM_PAGE_SHIFT << 2)); + ia64_set_rr(IA64_RR_BASE(IA64_PBVM_RR), + (IA64_PBVM_RR << 8) | (IA64_PBVM_PAGE_SHIFT << 2)); __asm __volatile("srlz.i;;"); - /* - * Wire the PBVM page table. - */ - - pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY | - PTE_PL_KERN | PTE_AR_RWX | PTE_ED; - pte |= ia64_pgtbl[0] & PTE_PPN_MASK; - - /* - * Size of the translation. This should be the largest power of 2 - * smaller than the LVM in use. - */ - sz = 24; - - __asm __volatile("mov cr.ifa=%0" :: "r"(IA64_PBVM_BASE)); - __asm __volatile("mov cr.itir=%0" :: "r"(sz << 2)); - __asm __volatile("ptr.i %0,%1" :: "r"(IA64_PBVM_BASE), "r"(sz << 2)); - __asm __volatile("ptr.d %0,%1" :: "r"(IA64_PBVM_BASE), "r"(sz << 2)); - __asm __volatile("srlz.i;;"); - __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(0), "r"(pte)); - __asm __volatile("srlz.i;;"); - __asm __volatile("itr.d dtr[%0]=%1;;" :: "r"(0), "r"(pte)); - __asm __volatile("srlz.i;;"); + /* Wire the PBVM page table. */ + mmu_wire(IA64_PBVM_PGTBL, (uintptr_t)ia64_pgtbl, ia64_pgtblsz, + PTE_AR_RW); + + /* Wire as much of the PBVM we can. This must be a power of 2. */ + pbvm_top = (pbvm_top + IA64_PBVM_PAGE_MASK) & ~IA64_PBVM_PAGE_MASK; + sz = pbvm_top - IA64_PBVM_BASE; + while (sz & (sz - 1)) + sz -= IA64_PBVM_PAGE_SIZE; + mmu_wire(IA64_PBVM_BASE, ia64_pgtbl[0], sz, PTE_AR_RWX); } static int @@ -177,7 +195,7 @@ elf64_exec(struct preloaded_file *fp) if (IS_LEGACY_KERNEL()) mmu_setup_legacy(hdr->e_entry); else - mmu_setup_paged(); + mmu_setup_paged((uintptr_t)(bi + 1)); enter_kernel(hdr->e_entry, bi); /* NOTREACHED */