Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 20 Apr 2014 14:45:18 +0000 (UTC)
From:      Andrew Turner <andrew@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r264695 - in projects/arm64/sys/arm64: arm64 include
Message-ID:  <201404201445.s3KEjIpj075315@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: andrew
Date: Sun Apr 20 14:45:17 2014
New Revision: 264695
URL: http://svnweb.freebsd.org/changeset/base/264695

Log:
  Start working on the pmap code. For now just allocate the full 1GiB
  available in the L2 table.

Added:
  projects/arm64/sys/arm64/include/machdep.h   (contents, props changed)
Modified:
  projects/arm64/sys/arm64/arm64/locore.S
  projects/arm64/sys/arm64/arm64/machdep.c
  projects/arm64/sys/arm64/arm64/pmap.c
  projects/arm64/sys/arm64/include/pmap.h
  projects/arm64/sys/arm64/include/pte.h

Modified: projects/arm64/sys/arm64/arm64/locore.S
==============================================================================
--- projects/arm64/sys/arm64/arm64/locore.S	Sun Apr 20 13:37:22 2014	(r264694)
+++ projects/arm64/sys/arm64/arm64/locore.S	Sun Apr 20 14:45:17 2014	(r264695)
@@ -66,31 +66,55 @@ _start:
 	/* Create the page tables */
 	bl	create_pagetables
 
+	/*
+	 * At this point:
+	 * x27 = TTBR0 table
+	 * x26 = TTBR1 table
+	 */
+
 	/* Enable the mmu */
 	bl	start_mmu
 
-	ldr	x29, .Lvirtdone
-	br	x29
+	/* Jump to the virtual address space */
+	ldr	x15, .Lvirtdone
+	br	x15
 
 virtdone:
 	/* Set up the stack */
-	adr	x29, initstack
-	mov	sp, x29
+	adr	x15, initstack
+	mov	sp, x15
 
 	/* Zero the BSS */
-	ldr	x29, .Lbss
-	ldr	x28, .Lend
+	ldr	x15, .Lbss
+	ldr	x14, .Lend
 	b	2f
 1:
-	str	xzr, [x29], #8
-	cmp	x29, x28
+	str	xzr, [x15], #8
+	cmp	x15, x14
 	b.lo	1b
 2:
 
+	/* Backup the module pointer */
+	mov	x1, x0
+
+	/* Make the page table base a virtual address */
+	sub	x26, x26, x29
+
+	sub	sp, sp, #(64 * 4)
+	mov	x0, sp
+
+	/* Degate the delda so it is VA -> PA */
+	neg	x29, x29
+
+	str	x1,  [x0]	/* modulep */
+	str	x26, [x0, 8]	/* kern_l1pt */
+	str	x29, [x0, 16]	/* kern_delta */
+
 	/* Branch to C code */
 	bl	initarm
 
-3:	b	3b
+3:	wfi
+	b	3b
 
 	.align 3
 .Lvirtdone:

Modified: projects/arm64/sys/arm64/arm64/machdep.c
==============================================================================
--- projects/arm64/sys/arm64/arm64/machdep.c	Sun Apr 20 13:37:22 2014	(r264694)
+++ projects/arm64/sys/arm64/arm64/machdep.c	Sun Apr 20 14:45:17 2014	(r264695)
@@ -43,7 +43,10 @@ __FBSDID("$FreeBSD$");
 #include <sys/sysproto.h>
 #include <sys/ucontext.h>
 
+#include <vm/pmap.h>
+
 #include <machine/cpu.h>
+#include <machine/machdep.h>
 #include <machine/metadata.h>
 #include <machine/pcb.h>
 #include <machine/reg.h>
@@ -260,8 +263,6 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, 
 	panic("sendsig");
 }
 
-void initarm(vm_offset_t);
-
 #ifdef EARLY_PRINTF
 static void 
 foundation_early_putc(int c)
@@ -440,24 +441,35 @@ add_efi_map_entries(struct efi_map_heade
 			break;
 	}
 }
+
 void
-initarm(vm_offset_t modulep)
+initarm(struct arm64_bootparams *abp)
 {
 	vm_paddr_t physmap[PHYSMAP_SIZE];
 	struct efi_map_header *efihdr;
+	vm_offset_t lastaddr;
 	int physmap_idx;
 	caddr_t kmdp;
 	vm_paddr_t mem_len;
 	int i;
 
-	printf("In initarm on arm64 %llx\n", modulep);
+	printf("In initarm on arm64\n");
+
+	/* Set the module data location */
+	preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
 
 	/* Find the kernel address */
-	preload_metadata = (caddr_t)(uintptr_t)(modulep);
 	kmdp = preload_search_by_type("elf kernel");
 	if (kmdp == NULL)
 		kmdp = preload_search_by_type("elf64 kernel");
 
+	/* Find the address to start allocating from */
+	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
+
+	/* Bootstrap enough of pmap  to enter the kernel proper */
+	pmap_bootstrap(abp->kern_l1pt, KERNBASE - abp->kern_delta,
+	    lastaddr - KERNBASE);
+
 	/* Load the physical memory ranges */
 	physmap_idx = 0;
 	efihdr = (struct efi_map_header *)preload_search_info(kmdp,

Modified: projects/arm64/sys/arm64/arm64/pmap.c
==============================================================================
--- projects/arm64/sys/arm64/arm64/pmap.c	Sun Apr 20 13:37:22 2014	(r264694)
+++ projects/arm64/sys/arm64/arm64/pmap.c	Sun Apr 20 14:45:17 2014	(r264695)
@@ -35,6 +35,8 @@ __FBSDID("$FreeBSD$");
 #include <vm/vm_page.h>
 #include <vm/vm_map.h>
 
+#include <machine/vmparam.h>
+
 #if !defined(DIAGNOSTIC)
 #ifdef __GNUC_GNU_INLINE__
 #define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
@@ -56,6 +58,84 @@ struct pmap kernel_pmap_store;
 
 struct msgbuf *msgbufp = NULL;
 
+void
+pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
+{
+	u_int l1_slot, l2_slot;
+	uint64_t kern_delta;
+	pt_entry_t *ptep;
+	pd_entry_t *pde;
+	vm_offset_t va;
+	vm_paddr_t pa;
+
+	kern_delta = KERNBASE - kernstart;
+
+	printf("pmap_bootstrap %llx %llx %llx\n", l1pt, kernstart, kernlen);
+	printf("%llx\n", l1pt);
+	printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
+
+	/*
+	 * Read the page table to find out what is already mapped.
+	 * This assumes we have mapped a block of memory from KERNBASE
+	 * using a single L1 entry.
+	 */
+	pde = (pd_entry_t *)l1pt;
+	l1_slot = (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK;
+
+	/* Sanity check the index, KERNBASE should be the first VA */
+	KASSERT(l1_slot == 0, ("The L1 index is non-zero"));
+	/* Check locore has used a table L1 map */
+	KASSERT((pde[l1_slot] & ATTR_DESCR_MASK) == L1_TABLE,
+	   ("Invalid bootstrap L1 table"));
+
+	/* Find the address of the L2 table */
+	ptep = (pt_entry_t *)((pde[l1_slot] & ~ATTR_MASK) + kern_delta);
+	l2_slot = (KERNBASE >> L2_SHIFT) & Ln_ADDR_MASK;
+	/* Sanity check the index, KERNBASE should be the first VA */
+	KASSERT(l2_slot == 0, ("The L2 index is non-zero"));
+
+	va = KERNBASE;
+	pa = KERNBASE - kern_delta; /* Set to an invalid address */
+
+	/* Find how many pages we have mapped */
+	for (; l2_slot < Ln_ENTRIES; l2_slot++) {
+		if ((ptep[l2_slot] & ATTR_DESCR_MASK) == 0)
+			break;
+
+		printf("ptep[%u] = %016llx\n", l2_slot, ptep[l2_slot]);
+
+		/* Check locore used L2 blocks */
+		KASSERT((ptep[l2_slot] & ATTR_DESCR_MASK) == L2_BLOCK,
+		    ("Invalid bootstrap L2 table"));
+		KASSERT((ptep[l2_slot] & ~ATTR_DESCR_MASK) == pa,
+		    ("Incorrect PA in L2 table"));
+
+		va += L2_SIZE;
+		pa += L2_SIZE;
+	}
+	/* And map the rest of L2 table */
+	for (; l2_slot < Ln_ENTRIES; l2_slot++) {
+		KASSERT(ptep[l2_slot] == 0, ("Invalid bootstrap L2 table"));
+		KASSERT(((va >> L2_SHIFT) & Ln_ADDR_MASK) == l2_slot,
+		    ("VA inconsistency detected"));
+
+		/* TODO: Check if this pa is valid */
+		ptep[l2_slot] = (pa & ~L2_OFFSET) | ATTR_AF | L2_BLOCK;
+
+		va += L2_SIZE;
+		pa += L2_SIZE;
+	}
+
+	/* Flush the cache and tlb to ensure the new entries are valid */
+	/* TODO: Flush the cache, we are relying on it being off */
+	/* TODO: Move this to a function */
+	__asm __volatile(
+	    "dsb  sy		\n"
+	    "tlbi vmalle1is	\n"
+	    "dsb  sy		\n"
+	    "isb		\n");
+}
+
 /*
  * Initialize a vm_page's machine-dependent fields.
  */

Added: projects/arm64/sys/arm64/include/machdep.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/arm64/sys/arm64/include/machdep.h	Sun Apr 20 14:45:17 2014	(r264695)
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MACHDEP_H_
+#define	_MACHINE_MACHDEP_H_
+
+struct arm64_bootparams {
+	vm_offset_t	modulep;
+	vm_offset_t	kern_l1pt;	/* L1 page table for the kernel */
+	uint64_t	kern_delta;
+};
+
+void initarm(struct arm64_bootparams *);
+
+#endif /* _MACHINE_MACHDEP_H_ */

Modified: projects/arm64/sys/arm64/include/pmap.h
==============================================================================
--- projects/arm64/sys/arm64/include/pmap.h	Sun Apr 20 13:37:22 2014	(r264694)
+++ projects/arm64/sys/arm64/include/pmap.h	Sun Apr 20 14:45:17 2014	(r264695)
@@ -111,7 +111,14 @@ extern vm_paddr_t dump_avail[];
 extern vm_offset_t virtual_avail;
 extern vm_offset_t virtual_end;
 
-void	pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt);
+/*
+ * Macros to test if a mapping is mappable with an L1 Section mapping
+ * or an L2 Large Page mapping.
+ */
+#define	L1_MAPPABLE_P(va, pa, size)					\
+	((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE)
+
+void	pmap_bootstrap(vm_offset_t, vm_paddr_t, vm_size_t);
 void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
 vm_paddr_t pmap_kextract(vm_offset_t va);
 void	pmap_kremove(vm_offset_t);

Modified: projects/arm64/sys/arm64/include/pte.h
==============================================================================
--- projects/arm64/sys/arm64/include/pte.h	Sun Apr 20 13:37:22 2014	(r264694)
+++ projects/arm64/sys/arm64/include/pte.h	Sun Apr 20 14:45:17 2014	(r264695)
@@ -44,7 +44,9 @@ typedef	uint64_t	pt_entry_t;		/* page ta
 
 /* Block and Page attributes */
 /* TODO: Add the upper attributes */
-#define	ATTR_MASK_L	0xfff
+#define	ATTR_MASK_H	UINT64_C(0xfff0000000000000)
+#define	ATTR_MASK_L	UINT64_C(0x0000000000000fff)
+#define	ATTR_MASK	(ATTR_MASK_H | ATTR_MASK_L)
 #define	ATTR_nG		(1 << 11)
 #define	ATTR_AF		(1 << 10)
 #define	ATTR_SH(x)	((x) << 8)
@@ -52,6 +54,8 @@ typedef	uint64_t	pt_entry_t;		/* page ta
 #define	ATTR_NS		(1 << 5)
 #define	ATTR_IDX(x)	((x) << 3)
 
+#define	ATTR_DESCR_MASK	3
+
 /* Level 0 table, 512GiB per entry */
 #define	L0_SHIFT	39
 #define	L0_INVAL	0x0 /* An invalid address */
@@ -61,12 +65,16 @@ typedef	uint64_t	pt_entry_t;		/* page ta
 
 /* Level 1 table, 1GiB per entry */
 #define	L1_SHIFT	30
+#define	L1_SIZE 	(1 << L1_SHIFT)
+#define	L1_OFFSET 	(L1_SIZE - 1)
 #define	L1_INVAL	L0_INVAL
 #define	L1_BLOCK	L0_BLOCK
 #define	L1_TABLE	L0_TABLE
 
 /* Level 2 table, 2MiB per entry */
 #define	L2_SHIFT	21
+#define	L2_SIZE 	(1 << L2_SHIFT)
+#define	L2_OFFSET 	(L2_SIZE - 1)
 #define	L2_INVAL	L0_INVAL
 #define	L2_BLOCK	L0_BLOCK
 #define	L2_TABLE	L0_TABLE
@@ -80,7 +88,8 @@ typedef	uint64_t	pt_entry_t;		/* page ta
 	/* 0x2 also marks an invalid address */
 #define	L3_TABLE	0x3
 
-#define	Ln_ADDR_MASK	((1 << 9) - 1)
+#define	Ln_ENTRIES	(1 << 9)
+#define	Ln_ADDR_MASK	(Ln_ENTRIES - 1)
 
 #endif /* !_MACHINE_PTE_H_ */
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201404201445.s3KEjIpj075315>