Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 2 Aug 2009 02:10:27 +0000 (UTC)
From:      Nathan Whitehorn <nwhitehorn@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r196029 - in projects/ppc64/sys/powerpc: aim aim64 include powerpc
Message-ID:  <200908020210.n722ARDg099075@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: nwhitehorn
Date: Sun Aug  2 02:10:27 2009
New Revision: 196029
URL: http://svn.freebsd.org/changeset/base/196029

Log:
  Fix the SLB handling to do something reasonable now. Processes have a full
  64-bit address space, but can't map more than 16 GB for now. Note that this
  breaks the onfault handling used by copyinout, which I will fix later.
  
  This is done by assigning new, random VSIDs on demand per-segment, instead
  of using a per-pmap hash and appending the ESID to it. This requires a
  look-up table and is otherwise worse but is required due to the small
  65-bit VA space on a wide variety of CPUs, including at least the POWER5,
  970, and Cell.

Added:
  projects/ppc64/sys/powerpc/include/slb.h
     - copied, changed from r195954, projects/ppc64/sys/powerpc/include/sr.h
Modified:
  projects/ppc64/sys/powerpc/aim/copyinout.c
  projects/ppc64/sys/powerpc/aim/ofw_machdep.c
  projects/ppc64/sys/powerpc/aim/trap.c
  projects/ppc64/sys/powerpc/aim64/mmu_oea64.c
  projects/ppc64/sys/powerpc/aim64/trap_subr.S
  projects/ppc64/sys/powerpc/include/pmap.h
  projects/ppc64/sys/powerpc/include/sr.h
  projects/ppc64/sys/powerpc/powerpc/genassym.c

Modified: projects/ppc64/sys/powerpc/aim/copyinout.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/copyinout.c	Sun Aug  2 00:20:40 2009	(r196028)
+++ projects/ppc64/sys/powerpc/aim/copyinout.c	Sun Aug  2 02:10:27 2009	(r196029)
@@ -66,24 +66,17 @@ __FBSDID("$FreeBSD$");
 
 #include <machine/pcb.h>
 #include <machine/sr.h>
+#include <machine/slb.h>
 
 int	setfault(faultbuf);	/* defined in locore.S */
 
 /*
  * Makes sure that the right segment of userspace is mapped in.
  */
-static __inline register_t
-va_to_vsid(pmap_t pm, const volatile void *va)
-{
-        #ifdef __powerpc64__
-        return (((uint64_t)pm->pm_context << 17) |
-            ((uintptr_t)va >> ADDR_SR_SHFT));
-        #else
-        return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
-        #endif
-}
 
 #ifdef __powerpc64__
+uint64_t va_to_vsid(pmap_t pm, const volatile void *va);
+
 static __inline void
 set_user_sr(register_t vsid)
 {
@@ -99,6 +92,12 @@ set_user_sr(register_t vsid)
 	isync();
 }
 #else
+static __inline register_t
+va_to_vsid(pmap_t pm, const volatile void *va)
+{
+        return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
+}
+
 static __inline void
 set_user_sr(register_t vsid)
 {

Modified: projects/ppc64/sys/powerpc/aim/ofw_machdep.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/ofw_machdep.c	Sun Aug  2 00:20:40 2009	(r196028)
+++ projects/ppc64/sys/powerpc/aim/ofw_machdep.c	Sun Aug  2 02:10:27 2009	(r196029)
@@ -383,21 +383,24 @@ OF_bootstrap()
 static int
 openfirmware(void *args)
 {
-	long	oldmsr;
-	int	result;
-	u_int	srsave[16];
-	u_int   i;
+	long		oldmsr;
+	int		result;
+	#ifndef __powerpc64__
+	register_t	srsave[16];
+	u_int		i;
+	#endif
 
 	if (pmap_bootstrapped && ofw_real_mode)
 		args = (void *)pmap_kextract((vm_offset_t)args);
 
 	ofw_sprg_prepare();
 
+	#ifndef __powerpc64__
 	if (pmap_bootstrapped && !ofw_real_mode) {
 		/*
 		 * Swap the kernel's address space with Open Firmware's
 		 */
-		for (i = 0; i < 16; i++) {
+		if (!ppc64) for (i = 0; i < 16; i++) {
 			srsave[i] = mfsrin(i << ADDR_SR_SHFT);
 			mtsrin(i << ADDR_SR_SHFT, ofw_pmap.pm_sr[i]);
 		}
@@ -411,6 +414,7 @@ openfirmware(void *args)
 		}
 		isync();
 	}
+	#endif
 
 	__asm __volatile(	"\t"
 		"sync\n\t"
@@ -429,6 +433,7 @@ openfirmware(void *args)
 		: : "r" (oldmsr)
 	);
 
+	#ifndef __powerpc64__
 	if (pmap_bootstrapped && !ofw_real_mode) {
 		/*
 		 * Restore the kernel's addr space. The isync() doesn;t
@@ -440,6 +445,7 @@ openfirmware(void *args)
 			isync();
 		}
 	}
+	#endif
 
 	ofw_sprg_restore();
 

Modified: projects/ppc64/sys/powerpc/aim/trap.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/trap.c	Sun Aug  2 00:20:40 2009	(r196028)
+++ projects/ppc64/sys/powerpc/aim/trap.c	Sun Aug  2 02:10:27 2009	(r196029)
@@ -523,6 +523,8 @@ trap_pfault(struct trapframe *frame, int
 			    : "=r"(user_sr)
 			    : "r"(USER_SR));
 
+			/* XXX: THIS DOES NOT WORK */
+
 			user_sr >>= 12;
 
 			/* XXX - limit to 46 byte EA space */

Modified: projects/ppc64/sys/powerpc/aim64/mmu_oea64.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim64/mmu_oea64.c	Sun Aug  2 00:20:40 2009	(r196028)
+++ projects/ppc64/sys/powerpc/aim64/mmu_oea64.c	Sun Aug  2 02:10:27 2009	(r196029)
@@ -159,6 +159,7 @@ __FBSDID("$FreeBSD$");
 #define	MOEA_DEBUG
 
 #define TODO	panic("%s: not implemented", __func__);
+static uintptr_t moea64_get_unique_vsid(void); 
 
 static __inline register_t
 cntlzd(volatile register_t a) {
@@ -167,16 +168,52 @@ cntlzd(volatile register_t a) {
 	return b;
 }
 
+#ifdef __powerpc64__
+uint64_t va_to_vsid(pmap_t pm, vm_offset_t va);
+
+uint64_t
+va_to_vsid(pmap_t pm, vm_offset_t va)
+{
+	uint64_t slbe, slbv, i;
+
+	slbe = (uintptr_t)va >> ADDR_SR_SHFT;
+	slbe = (slbe << SLBE_ESID_SHIFT) | SLBE_VALID;
+	slbv = 0;
+
+	for (i = 0; i < sizeof(pm->pm_slb)/sizeof(pm->pm_slb[0]); i++) {
+		if (pm->pm_slb[i].slbe == (slbe | i)) {
+			slbv = pm->pm_slb[i].slbv;
+			break;
+		}
+	}
+
+	/* XXX: Have a long list for processes mapping more than 16 GB */
+
+	/*
+	 * If there is no vsid for this VA, we need to add a new entry
+	 * to the PMAP's segment table.
+	 */
+
+	if (slbv == 0) {
+		slbv = moea64_get_unique_vsid() << SLBV_VSID_SHIFT;
+		for (i = 0; i < sizeof(pm->pm_slb)/sizeof(pm->pm_slb[0]); i++) {
+			if (!(pm->pm_slb[i].slbe & SLBE_VALID)) {
+				pm->pm_slb[i].slbv = slbv;
+				pm->pm_slb[i].slbe = slbe | i;
+				break;
+			}
+		}
+	}
+
+	return ((slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
+}
+#else
 static __inline uint64_t
 va_to_vsid(pmap_t pm, vm_offset_t va)
 {
-	#ifdef __powerpc64__
-	return (((uint64_t)pm->pm_context << 17) |
-	    ((uintptr_t)va >> ADDR_SR_SHFT));
-	#else
 	return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
-	#endif
 }
+#endif
 
 #define	TLBSYNC()	__asm __volatile("tlbsync; ptesync");
 #define	SYNC()		__asm __volatile("sync");
@@ -327,7 +364,14 @@ static struct	pvo_entry *moea64_bpvo_poo
 static int	moea64_bpvo_pool_index = 0;
 
 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
-static u_int	moea64_vsid_bitmap[NPMAPS / VSID_NBPW];
+#ifdef __powerpc64__
+#define	NVSIDS		(NPMAPS * 16)
+#define VSID_HASHMASK	0xffffffffUL
+#else
+#define NVSIDS		NPMAPS
+#define VSID_HASHMASK	0xfffffUL
+#endif
+static u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
 
 static boolean_t moea64_initialized = FALSE;
 
@@ -720,10 +764,6 @@ moea64_bridge_cpu_bootstrap(mmu_t mmup, 
 {
 	int i = 0;
 
-	#ifdef __powerpc64__
-	register_t slb1, slb2;
-	#endif
-
 	/*
 	 * Initialize segment registers and MMU
 	 */
@@ -737,21 +777,16 @@ moea64_bridge_cpu_bootstrap(mmu_t mmup, 
 	#ifdef __powerpc64__
 		slbia();
 
-		for (i = 0; i < NSEGS; i++) {
-			if (!kernel_pmap->pm_sr[i])
+		for (i = 0; i < 64; i++) {
+			if (!(kernel_pmap->pm_slb[i].slbe & SLBE_VALID))
 				continue;
 
-			/* The right-most bit is a validity bit */
-			slb1 = ((register_t)kernel_pmap->pm_context << 17) |
-			    (kernel_pmap->pm_sr[i] >> 1);
-			slb1 <<= 12;
-			slb2 = kernel_pmap->pm_sr[i] << 27 | i;
-			
-			__asm __volatile ("slbmte %0, %1" :: "r"(slb1),
-			    "r"(slb2)); 
+			__asm __volatile ("slbmte %0, %1" :: 
+			    "r"(kernel_pmap->pm_slb[i].slbv),
+			    "r"(kernel_pmap->pm_slb[i].slbe)); 
 		}
 	#else
-		for (i = 0; i < NSEGS; i++)
+		for (i = 0; i < 16; i++)
 			mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
 	#endif
 
@@ -919,18 +954,21 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_o
 	/*
 	 * Make sure kernel vsid is allocated as well as VSID 0.
 	 */
-	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
+	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
 	moea64_vsid_bitmap[0] |= 1;
 
 	/*
 	 * Initialize the kernel pmap (which is statically allocated).
 	 */
-	kernel_pmap->pm_context = 0xfffff;
 	#ifdef __powerpc64__
-	for (i = 0; i < 16; i++) 
-		kernel_pmap->pm_sr[i] = (i << 1) | 1;
-	kernel_pmap->pm_sr[USER_SR] = 0;
+	for (i = 0; i < 16; i++) {
+		kernel_pmap->pm_slb[i].slbv = ((KERNEL_VSIDBITS << 17) | i) <<
+		    SLBV_VSID_SHIFT;
+		kernel_pmap->pm_slb[i].slbe = ((uint64_t)i << SLBE_ESID_SHIFT) |
+		    SLBE_VALID | i;
+	}
+	kernel_pmap->pm_slb[USER_SR].slbe = 0;
 	#else
 	for (i = 0; i < 16; i++) 
 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
@@ -1787,29 +1825,20 @@ moea64_page_wired_mappings(mmu_t mmu, vm
 	return (count);
 }
 
-static u_int	moea64_vsidcontext;
-
-void
-moea64_pinit(mmu_t mmu, pmap_t pmap)
-{
-	int	i, mask;
-	u_int	entropy;
+static uintptr_t	moea64_vsidcontext;
 
-	PMAP_LOCK_INIT(pmap);
+static uintptr_t
+moea64_get_unique_vsid(void) {
+	u_int entropy;
+	register_t hash;
+	uint32_t mask;
+	int i;
 
 	entropy = 0;
 	__asm __volatile("mftb %0" : "=r"(entropy));
 
-	if (pmap_bootstrapped)
-		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, (vm_offset_t)pmap);
-	else
-		pmap->pmap_phys = pmap;
-
-	/*
-	 * Allocate some segment registers for this pmap.
-	 */
-	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
-		u_int	hash, n;
+	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
+		u_int	n;
 
 		/*
 		 * Create a new value by mutiplying by a prime and adding in
@@ -1819,12 +1848,12 @@ moea64_pinit(mmu_t mmu, pmap_t pmap)
 		 * instead of a multiply.)
 		 */
 		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
-		hash = moea64_vsidcontext & (NPMAPS - 1);
+		hash = moea64_vsidcontext & (NVSIDS - 1);
 		if (hash == 0)		/* 0 is special, avoid it */
 			continue;
 		n = hash >> 5;
 		mask = 1 << (hash & (VSID_NBPW - 1));
-		hash = (moea64_vsidcontext & 0xfffff);
+		hash = (moea64_vsidcontext & VSID_HASHMASK);
 		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
 			/* anything free in this bucket? */
 			if (moea64_vsid_bitmap[n] == 0xffffffff) {
@@ -1833,23 +1862,53 @@ moea64_pinit(mmu_t mmu, pmap_t pmap)
 			}
 			i = ffs(~moea64_vsid_bitmap[i]) - 1;
 			mask = 1 << i;
-			hash &= 0xfffff & ~(VSID_NBPW - 1);
+			hash &= VSID_HASHMASK & ~(VSID_NBPW - 1);
 			hash |= i;
 		}
 		moea64_vsid_bitmap[n] |= mask;
+		return (hash);
+	}
 
-		#ifdef __powerpc64__
-			pmap->pm_context = hash;
-			for (i = 0; i < NSEGS; i++) 
-				pmap->pm_sr[i] = 0;
-		#else
-			for (i = 0; i < 16; i++) 
-				pmap->pm_sr[i] = VSID_MAKE(i, hash);
-		#endif
-		return;
+	panic("%s: out of segments",__func__);
+}
+
+void
+moea64_pinit(mmu_t mmu, pmap_t pmap)
+{
+	int	i;
+	#ifndef __powerpc64__
+	register_t hash;
+	#endif
+
+	PMAP_LOCK_INIT(pmap);
+
+	if (pmap_bootstrapped)
+		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
+		    (vm_offset_t)pmap);
+	else
+		pmap->pmap_phys = pmap;
+
+	#ifdef __powerpc64__
+	/*
+	 * 64-bit PowerPC uses lazy segment allocation, so NULL
+	 * all the segment entries for now.
+	 */
+	for (i = 0; i < sizeof(pmap->pm_slb)/sizeof(pmap->pm_slb[0]); i++) {
+		pmap->pm_slb[i].slbv = 0;
+		pmap->pm_slb[i].slbe = 0;
 	}
 
-	panic("moea64_pinit: out of segments");
+	#else
+
+	/*
+	 * Allocate some segment registers for this pmap.
+	 */
+	hash = moea64_get_unique_vsid();
+
+	for (i = 0; i < 16; i++) 
+		pmap->pm_sr[i] = VSID_MAKE(i, hash);
+
+	#endif
 }
 
 /*
@@ -1954,21 +2013,36 @@ moea64_qremove(mmu_t mmu, vm_offset_t va
 	}
 }
 
+static __inline void
+moea64_release_vsid(uint64_t vsid)
+{
+        int idx, mask;
+
+        idx = vsid & (NVSIDS-1);
+        mask = 1 << (idx % VSID_NBPW);
+        idx /= VSID_NBPW;
+        moea64_vsid_bitmap[idx] &= ~mask;
+}
+	
+
 void
 moea64_release(mmu_t mmu, pmap_t pmap)
 {
-        int idx, mask;
         
 	/*
-	 * Free segment register's VSID
+	 * Free segment registers' VSIDs
 	 */
+    #ifdef __powerpc64__
+	int i;
+	for (i = 0; i < sizeof(pmap->pm_slb)/sizeof(pmap->pm_slb[0]); i++)
+		moea64_release_vsid(pmap->pm_slb[i].slbv);
+    #else
         if (pmap->pm_sr[0] == 0)
                 panic("moea64_release");
 
-        idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
-        mask = 1 << (idx % VSID_NBPW);
-        idx /= VSID_NBPW;
-        moea64_vsid_bitmap[idx] &= ~mask;
+	moea64_release_vsid(pmap->pm_sr[0]);
+    #endif
+
 	PMAP_LOCK_DESTROY(pmap);
 }
 
@@ -2111,6 +2185,7 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z
 	 */
 	va &= ~ADDR_POFF;
 	vsid = va_to_vsid(pm, va);
+
 	ptegidx = va_to_pteg(vsid, va);
 
 	/*

Modified: projects/ppc64/sys/powerpc/aim64/trap_subr.S
==============================================================================
--- projects/ppc64/sys/powerpc/aim64/trap_subr.S	Sun Aug  2 00:20:40 2009	(r196028)
+++ projects/ppc64/sys/powerpc/aim64/trap_subr.S	Sun Aug  2 02:10:27 2009	(r196029)
@@ -46,34 +46,26 @@
 /*
  * Restore SRs for a pmap
  *
- * Requires that r27-r31 be scratch, with r27 initialized to the pmap
+ * Requires that r28-r31 be scratch, with r28 initialized to the pmap
  */
 
 restoresrs: 
 	slbia;
-	li	%r29, 0;
-	mr	%r28, %r27
-	lwz	%r27, PM_CONTEXT(%r27);
+	li	%r29, 0;		/* Set the counter to zero */
 instslb:
-	li	%r30, 12;
-	sld	%r30, %r27, %r30;
-	ld	%r31, PM_SR(%r28);
+	ld	%r31, PM_SLB+8(%r28);	/* Load SLBE */
 
-	cmpli	0, %r31, 0;
+	cmpli	0, %r31, 0;		/* If SLBE is not valid, get the next */
 	beq	nslb;
-	
-	srdi	%r31, %r31, 1;
-	or	%r30, %r30, %r31;
-	sldi	%r30, %r30, 12;
-	ld	%r31, PM_SR(%r28);
-	sldi	%r31, %r31, 27;
-	or 	%r31, %r31, %r29;
-	slbmte	%r30, %r31;
+		
+
+	ld	%r30, PM_SLB(%r28)	/* Load SLBV */
+	slbmte	%r30, %r31;		/* Install SLB entry */
 
 nslb:
-	addi	%r28, %r28, 8;
+	addi	%r28, %r28, 8;		/* Advance */
 	addi	%r29, %r29, 1;
-	cmpli	0, %r29, 63;
+	cmpli	0, %r29, 63;		/* Repeat if we are not at the end */
 	blt instslb;
 
 	blr;
@@ -82,16 +74,16 @@ nslb:
  * User SRs are loaded through a pointer to the current pmap.
  */
 #define RESTORE_USER_SRS() \
-	GET_CPUINFO(%r27); \
-	ld	%r27,PC_CURPMAP(%r27); \
+	GET_CPUINFO(%r28); \
+	ld	%r28,PC_CURPMAP(%r28); \
 	bl	restoresrs;
 
 /*
  * Kernel SRs are loaded directly from kernel_pmap_
  */
 #define RESTORE_KERN_SRS() \
-	lis	%r27,CNAME(kernel_pmap_store)@ha; \
-	addi	%r27,%r27,CNAME(kernel_pmap_store)@l; \
+	lis	%r28,CNAME(kernel_pmap_store)@ha; \
+	addi	%r28,%r28,CNAME(kernel_pmap_store)@l; \
 	bl	restoresrs;
 
 /*

Modified: projects/ppc64/sys/powerpc/include/pmap.h
==============================================================================
--- projects/ppc64/sys/powerpc/include/pmap.h	Sun Aug  2 00:20:40 2009	(r196028)
+++ projects/ppc64/sys/powerpc/include/pmap.h	Sun Aug  2 02:10:27 2009	(r196029)
@@ -70,6 +70,7 @@
 #include <machine/sr.h>
 #include <machine/pte.h>
 #include <machine/tlb.h>
+#include <machine/slb.h>
 
 struct pmap_md {
 	u_int		md_index;
@@ -84,18 +85,15 @@ struct pmap_md {
 #define	NPMAPS		32768
 #endif /* !defined(NPMAPS) */
 
-#ifdef __powerpc64__
-#define NSEGS	64	/* Typical SLB size. */
-#else
-#define NSEGS	16
-#endif
-
 struct	pmap {
 	struct	mtx	pm_mtx;
 	
-	register_t	pm_sr[NSEGS];
+    #ifdef __powerpc64__
+	struct slb	pm_slb[64];
+    #else
+	register_t	pm_sr[16];
+    #endif
 	u_int		pm_active;
-	u_int		pm_context;
 
 	struct pmap	*pmap_phys;
 	struct		pmap_statistics	pm_stats;

Copied and modified: projects/ppc64/sys/powerpc/include/slb.h (from r195954, projects/ppc64/sys/powerpc/include/sr.h)
==============================================================================
--- projects/ppc64/sys/powerpc/include/sr.h	Wed Jul 29 21:54:34 2009	(r195954, copy source)
+++ projects/ppc64/sys/powerpc/include/slb.h	Sun Aug  2 02:10:27 2009	(r196029)
@@ -1,5 +1,5 @@
 /*-
- * Copyright (C) 2002 Benno Rice.
+ * Copyright (C) 2009 Nathan Whitehorn
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
- * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
@@ -25,32 +25,36 @@
  * $FreeBSD$
  */
 
-#ifndef _MACHINE_SR_H_
-#define	_MACHINE_SR_H_
+#ifndef _MACHINE_SLB_H_
+#define	_MACHINE_SLB_H_
 
 /*
- * Bit definitions for segment registers.
+ * Bit definitions for segment lookaside buffer entries.
  *
- * PowerPC Microprocessor Family: The Programming Environments for 32-bit
- * Microprocessors, section 2.3.5
+ * PowerPC Microprocessor Family: The Programming Environments for 64-bit
+ * Microprocessors, section 7.4.2.1
+ *
+ * Note that these bitmasks are relative to the values for one of the two
+ * values for slbmte, slbmfee, and slbmfev, not the internal SLB
+ * representation.
  */
 
-#define	SR_TYPE		0x80000000	/* Type selector */
-#define	SR_KS		0x40000000	/* Supervisor-state protection key */
-#define	SR_KP		0x20000000	/* User-state protection key */
-#define	SR_N		0x10000000	/* No-execute protection */
-#define	SR_VSID_MASK	0x00ffffff	/* Virtual Segment ID mask */
-
-/* Kernel segment register usage */
-#define	USER_SR		12
-#define	KERNEL_SR	13
-#define	KERNEL2_SR	14
-#define	KERNEL_VSIDBITS	0xfffff
-#define	KERNEL_SEGMENT	(0xfffff0 + KERNEL_SR)
-#define	KERNEL2_SEGMENT	(0xfffff0 + KERNEL2_SR)
-#define	EMPTY_SEGMENT	0xfffff0
-#define	USER_ADDR	((void *)((register_t)USER_SR << ADDR_SR_SHFT))
-#define	SEGMENT_LENGTH	0x10000000
-#define	SEGMENT_MASK	0xf0000000
+#define	SLBV_KS		0x0000000000000800UL /* Supervisor-state prot key */
+#define	SLBV_KP		0x0000000000000400UL /* User-state prot key */
+#define	SLBV_N		0x0000000000000200UL /* No-execute protection */
+#define	SLBV_L		0x0000000000000100UL /* Large page selector */
+#define	SLBV_CLASS	0x0000000000000080UL /* Class selector */
+#define	SLBV_VSID_MASK	0xfffffffffffff000UL /* Virtual segment ID mask */
+#define	SLBV_VSID_SHIFT	12
+
+#define	SLBE_VALID	0x0000000008000000UL /* SLB entry valid */
+#define	SLBE_INDEX_MASK	0x0000000000000fffUL /* SLB index mask*/
+#define	SLBE_ESID_MASK	0xfffffffff0000000UL /* Effective segment ID mask */
+#define	SLBE_ESID_SHIFT	28
+
+struct slb {
+	uint64_t	slbv;
+	uint64_t	slbe;
+};
 
-#endif /* !_MACHINE_SR_H_ */
+#endif /* !_MACHINE_SLB_H_ */

Modified: projects/ppc64/sys/powerpc/include/sr.h
==============================================================================
--- projects/ppc64/sys/powerpc/include/sr.h	Sun Aug  2 00:20:40 2009	(r196028)
+++ projects/ppc64/sys/powerpc/include/sr.h	Sun Aug  2 02:10:27 2009	(r196029)
@@ -45,7 +45,7 @@
 #define	USER_SR		12
 #define	KERNEL_SR	13
 #define	KERNEL2_SR	14
-#define	KERNEL_VSIDBITS	0xfffff
+#define	KERNEL_VSIDBITS	0xfffffUL
 #define	KERNEL_SEGMENT	(0xfffff0 + KERNEL_SR)
 #define	KERNEL2_SEGMENT	(0xfffff0 + KERNEL2_SR)
 #define	EMPTY_SEGMENT	0xfffff0

Modified: projects/ppc64/sys/powerpc/powerpc/genassym.c
==============================================================================
--- projects/ppc64/sys/powerpc/powerpc/genassym.c	Sun Aug  2 00:20:40 2009	(r196028)
+++ projects/ppc64/sys/powerpc/powerpc/genassym.c	Sun Aug  2 02:10:27 2009	(r196029)
@@ -102,10 +102,11 @@ ASSYM(TLBSAVE_BOOKE_R31, TLBSAVE_BOOKE_R
 ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
 
 #if defined(AIM)
-ASSYM(PM_KERNELSR, offsetof(struct pmap, pm_sr[KERNEL_SR]));
-ASSYM(PM_USRSR, offsetof(struct pmap, pm_sr[USER_SR]));
+#ifdef __powerpc64__
+ASSYM(PM_SLB, offsetof(struct pmap, pm_slb));
+#else
 ASSYM(PM_SR, offsetof(struct pmap, pm_sr));
-ASSYM(PM_CONTEXT, offsetof(struct pmap, pm_context));
+#endif
 #elif defined(E500)
 ASSYM(PM_PDIR, offsetof(struct pmap, pm_pdir));
 #endif



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200908020210.n722ARDg099075>