Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 10 Jan 2011 10:21:54 +0530
From:      "Jayachandran C." <c.jayachandran@gmail.com>
To:        freebsd-mips@freebsd.org
Subject:   [PATCH] Support for 64-bit PTEs on n64
Message-ID:  <AANLkTinpdhjO_TXqkS4GRRZDtSG91GZYHhLzYmWi5yf6@mail.gmail.com>

index | next in thread | raw e-mail

[-- Attachment #1 --]
Here's my initial code to support >4GB physical memory on MIPS.  This
patch adds 64 bit page table entries in n64 compilation.

I would like to add this as an optional feature for n32/o32
compilation with 'option PHYSADDR_64_BIT' later.

Comments welcome, I will check this in some time later this week if
all goes well.

JC.

[-- Attachment #2 --]
Index: sys/mips/include/param.h
===================================================================
--- sys/mips/include/param.h	(revision 217166)
+++ sys/mips/include/param.h	(working copy)
@@ -123,13 +123,22 @@
 #define	NPTEPG		(PAGE_SIZE/(sizeof (pt_entry_t)))
 #define	NPDEPG		(PAGE_SIZE/(sizeof (pd_entry_t)))
 
+#ifdef __mips_n64 /* PHYSADDR_64_BIT */
+#define	NPTEPGSHIFT	9               /* LOG2(NPTEPG) */
+#define	NPDEPGSHIFT	9               /* LOG2(NPTEPG) */
+#else
+#define	NPTEPGSHIFT	10               /* LOG2(NPTEPG) */
+#define	NPDEPGSHIFT	9               /* LOG2(NPTEPG) */
+#endif
+
+
 #if defined(__mips_n64)
-#define	SEGSHIFT	31		/* LOG2(NBSEG) */
-#define	NBSEG		(1ul << SEGSHIFT)	/* bytes/segment */
-#define	PDRSHIFT	22              /* second level */
+#define	SEGSHIFT	(PAGE_SHIFT + NPTEPGSHIFT + NPDEPGSHIFT)
+#define	NBSEG		(1ul << SEGSHIFT)
+#define	PDRSHIFT	(PAGE_SHIFT + NPTEPGSHIFT)
 #define	PDRMASK		((1 << PDRSHIFT) - 1)
 #else
-#define	SEGSHIFT	22		/* LOG2(NBSEG) */
+#define	SEGSHIFT	(PAGE_SHIFT + NPTEPGSHIFT)
 #define	NBSEG		(1 << SEGSHIFT)	/* bytes/segment */
 #define	PDRSHIFT	SEGSHIFT	/* alias for SEG in 32 bit */
 #define	PDRMASK		((1 << PDRSHIFT) - 1)
Index: sys/mips/include/cpufunc.h
===================================================================
--- sys/mips/include/cpufunc.h	(revision 217166)
+++ sys/mips/include/cpufunc.h	(working copy)
@@ -137,12 +137,13 @@
 } struct __hack
 
 #if defined(__mips_n64)
-MIPS_RW64_COP0(excpc, MIPS_COP_0_EXC_PC);
-MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
-MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
 MIPS_RW64_COP0(entryhi, MIPS_COP_0_TLB_HI);
 MIPS_RW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
 #endif
+#if defined(__mips_n64) /* PHYSADDR_64_BIT */
+MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
+MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
+#endif
 MIPS_RW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT);
 
 #undef	MIPS_RW64_COP0
@@ -221,11 +222,13 @@
 
 /* XXX: Some of these registers are specific to MIPS32. */
 #if !defined(__mips_n64)
-MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
-MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
 MIPS_RW32_COP0(entryhi, MIPS_COP_0_TLB_HI);
 MIPS_RW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
 #endif
+#if !defined(__mips_n64) /* PHYSADDR_64_BIT */
+MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
+MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
+#endif
 MIPS_RW32_COP0(prid, MIPS_COP_0_PRID);
 /* XXX 64-bit?  */
 MIPS_RW32_COP0_SEL(ebase, MIPS_COP_0_PRID, 1);
Index: sys/mips/include/pte.h
===================================================================
--- sys/mips/include/pte.h	(revision 217166)
+++ sys/mips/include/pte.h	(working copy)
@@ -30,8 +30,11 @@
 #define	_MACHINE_PTE_H_
 
 #ifndef _LOCORE
-/* pt_entry_t is 32 bit for now, has to be made 64 bit for n64 */
+#ifdef __mips_n64 /* PHYSADDR_64_BIT */
+typedef	uint64_t pt_entry_t;
+#else
 typedef	uint32_t pt_entry_t;
+#endif
 typedef	pt_entry_t *pd_entry_t;
 #endif
 
@@ -61,10 +64,15 @@
  * written as anything, but otherwise they have as much meaning as
  * other 0 fields.
  */
+#ifdef __mips_n64 /* PHYSADDR_64_BIT */
+#define	TLBLO_SWBITS_SHIFT	(34)
+#define	TLBLO_PFN_MASK		0x3FFFFFFC0ULL
+#else
 #define	TLBLO_SWBITS_SHIFT	(30)
-#define	TLBLO_SWBITS_MASK	(0x3U << TLBLO_SWBITS_SHIFT)
+#define	TLBLO_PFN_MASK		(0x3FFFFFC0)
+#endif
 #define	TLBLO_PFN_SHIFT		(6)
-#define	TLBLO_PFN_MASK		(0x3FFFFFC0)
+#define	TLBLO_SWBITS_MASK	((pt_entry_t)0x3 << TLBLO_SWBITS_SHIFT)
 #define	TLBLO_PA_TO_PFN(pa)	((((pa) >> TLB_PAGE_SHIFT) << TLBLO_PFN_SHIFT) & TLBLO_PFN_MASK)
 #define	TLBLO_PFN_TO_PA(pfn)	((vm_paddr_t)((pfn) >> TLBLO_PFN_SHIFT) << TLB_PAGE_SHIFT)
 #define	TLBLO_PTE_TO_PFN(pte)	((pte) & TLBLO_PFN_MASK)
@@ -96,10 +104,10 @@
 #define	TLBHI_ENTRY(va, asid)	((TLBHI_VA_R((va))) /* Region. */ | \
 				 (TLBHI_VA_TO_VPN2((va))) /* VPN2. */ | \
 				 ((asid) & TLBHI_ASID_MASK))
-#else
+#else /* !defined(__mips_n64) */
 #define	TLBHI_PAGE_MASK		(2 * PAGE_SIZE - 1)
 #define	TLBHI_ENTRY(va, asid)	(((va) & ~TLBHI_PAGE_MASK) | ((asid) & TLBHI_ASID_MASK))
-#endif
+#endif /* defined(__mips_n64) */
 
 /*
  * TLB flags managed in hardware:
@@ -125,8 +133,8 @@
  * 		listen to requests to write to it.
  * 	W:	Wired.  ???
  */
-#define	PTE_RO	(0x01 << TLBLO_SWBITS_SHIFT)
-#define	PTE_W	(0x02 << TLBLO_SWBITS_SHIFT)
+#define	PTE_RO	((pt_entry_t)0x01 << TLBLO_SWBITS_SHIFT)
+#define	PTE_W	((pt_entry_t)0x02 << TLBLO_SWBITS_SHIFT)
 
 /*
  * PTE management functions for bits defined above.
Index: sys/mips/include/proc.h
===================================================================
--- sys/mips/include/proc.h	(revision 217166)
+++ sys/mips/include/proc.h	(working copy)
@@ -43,17 +43,17 @@
  * Machine-dependent part of the proc structure.
  */
 struct mdthread {
-	int	md_flags;		/* machine-dependent flags */
-	int	md_upte[KSTACK_PAGES];	/* ptes for mapping u pcb */
-	int	md_ss_addr;		/* single step address for ptrace */
-	int	md_ss_instr;		/* single step instruction for ptrace */
+	int		md_flags;	/* machine-dependent flags */
+	long		md_upte[KSTACK_PAGES];	/* PHYSADDR_64_BIT ptes for mapping u pcb */
+	int		md_ss_addr;	/* single step address for ptrace */
+	int		md_ss_instr;	/* single step instruction for ptrace */
 	register_t	md_saved_intr;
-	u_int	md_spinlock_count;
+	u_int		md_spinlock_count;
 /* The following is CPU dependent, but kept in for compatibility */
-	int	md_pc_ctrl;		/* performance counter control */
-	int	md_pc_count;		/* performance counter */
-	int	md_pc_spill;		/* performance counter spill */
-	void	*md_tls;
+	int		md_pc_ctrl;	/* performance counter control */
+	int		md_pc_count;	/* performance counter */
+	int		md_pc_spill;	/* performance counter spill */
+	void		*md_tls;
 };
 
 /* md_flags */
Index: sys/mips/rmi/xlr_machdep.c
===================================================================
--- sys/mips/rmi/xlr_machdep.c	(revision 217166)
+++ sys/mips/rmi/xlr_machdep.c	(working copy)
@@ -347,13 +347,11 @@
 				break;
 			}
 			if (j == 0) {
-				/* TODO FIXME  */
 				/* start after kernel end */
 				phys_avail[0] = (vm_paddr_t)
 				    MIPS_KSEG0_TO_PHYS(&_end) + 0x20000;
 				/* boot loader start */
 				/* HACK to Use bootloaders memory region */
-				/* TODO FIXME  */
 				if (boot_map->physmem_map[0].size == 0x0c000000) {
 					boot_map->physmem_map[0].size = 0x0ff00000;
 				}
@@ -367,11 +365,6 @@
 				dump_avail[1] = phys_avail[1];
 
 			} else {
-/*
- * Can't use this code yet, because most of the fixed allocations happen from
- * the biggest physical area. If we have more than 512M memory the kernel will try
- * to map from the second are which is not in KSEG0 and not mapped
- */
 				phys_avail[j] = (vm_paddr_t)
 				    boot_map->physmem_map[i].addr;
 				phys_avail[j + 1] = phys_avail[j] +
@@ -401,7 +394,6 @@
 		}
 	}
 
-	/* FIXME XLR TODO */
 	phys_avail[j] = phys_avail[j + 1] = 0;
 	realmem = physmem = btoc(physsz);
 }
Index: sys/mips/mips/exception.S
===================================================================
--- sys/mips/mips/exception.S	(revision 217166)
+++ sys/mips/mips/exception.S	(working copy)
@@ -66,21 +66,34 @@
 
 #include "assym.s"
 
-/*
- * Clear the software-managed bits in a PTE in register pr.
- */
-#define	CLEAR_PTE_SWBITS(pr)							\
-	sll		pr, 2 ;							\
-	srl		pr, 2			# keep bottom 30 bits
+	.set	noreorder		# Noreorder is default style!
 
-	.set	noreorder			# Noreorder is default style!
-
 /*
  * Reasonable limit
  */
 #define	INTRCNT_COUNT	128
 
 /* Pointer size and mask for n64 */
+#ifdef __mips_n64 /* PHYSADDR_64_BIT */
+#define	PTESHIFT	3
+#define	PTE2MASK	0xff0		/* for the 2-page lo0/lo1 */
+#define	PTEMASK		0xff8
+#define	PTESIZE		8
+#define	PTE_L		ld
+#define	PTE_MTC0	dmtc0
+#define	CLEAR_PTE_SWBITS(pr)
+#else
+#define	PTESHIFT	2
+#define	PTE2MASK	0xff8		/* for the 2-page lo0/lo1 */
+#define	PTEMASK		0xffc
+#define	PTESIZE		4
+#define	PTE_L		lw
+#define	PTE_MTC0	mtc0
+#define	CLEAR_PTE_SWBITS(pr)						\
+	sll		pr, 2 ;						\
+	srl		pr, 2		# keep bottom 30 bits
+#endif
+
 #if defined(__mips_n64)
 #define	PTRSHIFT	3
 #define	PTRMASK		0xff8
@@ -145,16 +158,16 @@
 	MFC0		k0, MIPS_COP_0_BAD_VADDR	# k0=bad address (again)
 	beq		k1, zero, 2f			# ==0 -- no page table
 #endif
-	PTR_SRL		k0, PAGE_SHIFT - 2		#0b: k0=VPN (aka va>>10)
-	andi		k0, k0, 0xff8			#0c: k0=page tab offset
+	PTR_SRL		k0, PAGE_SHIFT - PTESHIFT	#0b: k0=VPN (aka va>>10)
+	andi		k0, k0, PTE2MASK		#0c: k0=page tab offset
 	PTR_ADDU	k1, k1, k0			#0d: k1=pte address
-	lw		k0, 0(k1)			#0e: k0=lo0 pte
-	lw		k1, 4(k1)			#0f: k1=lo0 pte
+	PTE_L		k0, 0(k1)			#0e: k0=lo0 pte
+	PTE_L		k1, PTESIZE(k1)			#0f: k1=lo0 pte
 	CLEAR_PTE_SWBITS(k0)
-	MTC0		k0, MIPS_COP_0_TLB_LO0		#12: lo0 is loaded
+	PTE_MTC0	k0, MIPS_COP_0_TLB_LO0		#12: lo0 is loaded
 	COP0_SYNC
 	CLEAR_PTE_SWBITS(k1)
-	MTC0		k1, MIPS_COP_0_TLB_LO1		#15: lo1 is loaded
+	PTE_MTC0	k1, MIPS_COP_0_TLB_LO1		#15: lo1 is loaded
 	COP0_SYNC
 	tlbwr						#1a: write to tlb
 	HAZARD_DELAY
@@ -845,10 +858,10 @@
 	nop
 #endif
 	MFC0		k0, MIPS_COP_0_BAD_VADDR	# k0=bad address (again)
-	PTR_SRL		k0, PAGE_SHIFT - 2		# k0=VPN
-	andi		k0, k0, 0xffc			# k0=page tab offset
+	PTR_SRL		k0, PAGE_SHIFT - PTESHIFT	# k0=VPN
+	andi		k0, k0, PTEMASK			# k0=page tab offset
 	PTR_ADDU	k1, k1, k0			# k1=pte address
-	lw		k0, 0(k1)			# k0=this PTE
+	PTE_L		k0, 0(k1)			# k0=this PTE
 
 	/* Validate page table entry.  */
 	andi		k0, PTE_V
@@ -856,30 +869,30 @@
 	nop
 
 	/* Check whether this is an even or odd entry.  */
-	andi		k0, k1, 4
+	andi		k0, k1, PTESIZE
 	bnez		k0, odd_page
 	nop
 
-	lw		k0, 0(k1)
-	lw		k1, 4(k1)
+	PTE_L		k0, 0(k1)
+	PTE_L		k1, PTESIZE(k1)
 	CLEAR_PTE_SWBITS(k0)
-	MTC0		k0, MIPS_COP_0_TLB_LO0
+	PTE_MTC0	k0, MIPS_COP_0_TLB_LO0
 	COP0_SYNC
 	CLEAR_PTE_SWBITS(k1)
-	MTC0		k1, MIPS_COP_0_TLB_LO1
+	PTE_MTC0	k1, MIPS_COP_0_TLB_LO1
 	COP0_SYNC
 
 	b		tlb_insert_entry
 	nop
 
 odd_page:
-	lw		k0, -4(k1)
-	lw		k1, 0(k1)
+	PTE_L		k0, -PTESIZE(k1)
+	PTE_L		k1, 0(k1)
 	CLEAR_PTE_SWBITS(k0)
-	MTC0		k0, MIPS_COP_0_TLB_LO0
+	PTE_MTC0	k0, MIPS_COP_0_TLB_LO0
 	COP0_SYNC
 	CLEAR_PTE_SWBITS(k1)
-	MTC0		k1, MIPS_COP_0_TLB_LO1
+	PTE_MTC0	k1, MIPS_COP_0_TLB_LO1
 	COP0_SYNC
 
 tlb_insert_entry:
@@ -1012,16 +1025,16 @@
 	MFC0		k0, MIPS_COP_0_BAD_VADDR	# k0=bad address (again)
   	beq		k1, zero, MipsKernGenException	# ==0 -- no page table
 #endif
-	PTR_SRL		k0, PAGE_SHIFT - 2		# k0=VPN
-	andi		k0, k0, 0xff8			# k0=page tab offset
+	PTR_SRL		k0, PAGE_SHIFT - PTESHIFT	# k0=VPN
+	andi		k0, k0, PTE2MASK		# k0=page tab offset
 	PTR_ADDU	k1, k1, k0			# k1=pte address
-	lw		k0, 0(k1)			# k0=lo0 pte
-	lw		k1, 4(k1)			# k1=lo1 pte
+	PTE_L		k0, 0(k1)			# k0=lo0 pte
+	PTE_L		k1, PTESIZE(k1)			# k1=lo1 pte
 	CLEAR_PTE_SWBITS(k0)
-	MTC0		k0, MIPS_COP_0_TLB_LO0		# lo0 is loaded
+	PTE_MTC0	k0, MIPS_COP_0_TLB_LO0		# lo0 is loaded
 	COP0_SYNC
 	CLEAR_PTE_SWBITS(k1)
-	MTC0		k1, MIPS_COP_0_TLB_LO1		# lo1 is loaded
+	PTE_MTC0	k1, MIPS_COP_0_TLB_LO1		# lo1 is loaded
 	COP0_SYNC
 	tlbwr					# write to tlb
 	HAZARD_DELAY
Index: sys/mips/mips/pmap.c
===================================================================
--- sys/mips/mips/pmap.c	(revision 217166)
+++ sys/mips/mips/pmap.c	(working copy)
@@ -184,7 +184,7 @@
 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
-static int init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
+static vm_paddr_t init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
 
 #ifdef SMP
 static void pmap_invalidate_page_action(void *arg);
@@ -1562,7 +1562,7 @@
 {
 	pt_entry_t oldpte;
 	vm_page_t m;
-	vm_offset_t pa;
+	vm_paddr_t pa;
 
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1850,12 +1850,12 @@
 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
     vm_prot_t prot, boolean_t wired)
 {
-	vm_offset_t pa, opa;
+	vm_paddr_t pa, opa;
 	pt_entry_t *pte;
 	pt_entry_t origpte, newpte;
 	pv_entry_t pv;
 	vm_page_t mpte, om;
-	int rw = 0;
+	pt_entry_t rw = 0;
 
 	if (pmap == NULL)
 		return;
@@ -2063,7 +2063,7 @@
     vm_prot_t prot, vm_page_t mpte)
 {
 	pt_entry_t *pte;
-	vm_offset_t pa;
+	vm_paddr_t pa;
 
 	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 	    (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
@@ -2903,7 +2903,7 @@
 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
 {
 	pt_entry_t *ptep, pte;
-	vm_offset_t pa;
+	vm_paddr_t pa;
 	vm_page_t m;
 	int val;
 	boolean_t managed;
@@ -3066,8 +3066,8 @@
 					continue;
 				pa = TLBLO_PTE_TO_PA(pte);
 				va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT);
-				db_printf("\t\t[%04d] va: %p pte: %8x pa:%lx\n",
-				       k, (void *)va, pte, (u_long)pa);
+				db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n",
+				       k, (void *)va, (uintmax_t)pte, (uintmax_t)pa);
 			}
 		}
 	}
@@ -3171,10 +3171,10 @@
 	return (0);
 }
 
-static int
+static pt_entry_t
 init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
 {
-	int rw;
+	pt_entry_t rw;
 
 	if (!(prot & VM_PROT_WRITE))
 		rw =  PTE_V | PTE_RO | PTE_C_CACHE;
@@ -3203,7 +3203,7 @@
 {
 	vm_page_t m;
 	pt_entry_t *pte;
- 	vm_offset_t pa;
+ 	vm_paddr_t pa;
 
 	PMAP_LOCK(pmap);
 	pte = pmap_pte(pmap, va);
Index: sys/mips/mips/trap.c
===================================================================
--- sys/mips/mips/trap.c	(revision 217166)
+++ sys/mips/mips/trap.c	(working copy)
@@ -1276,8 +1276,8 @@
 	if (!(pc & 3) &&
 	    useracc((caddr_t)(intptr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
 		/* dump page table entry for faulting instruction */
-		log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#x\n",
-		    (intmax_t)pc, (void *)(intptr_t)*pdep, ptep ? *ptep : 0);
+		log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
+		    (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
 
 		addr = (unsigned int *)(intptr_t)pc;
 		log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
@@ -1285,8 +1285,8 @@
 		log(LOG_ERR, "%08x %08x %08x %08x\n",
 		    addr[0], addr[1], addr[2], addr[3]);
 	} else {
-		log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#x\n",
-		    (intmax_t)pc, (void *)(intptr_t)*pdep, ptep ? *ptep : 0);
+		log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
+		    (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
 	}
 }
 
@@ -1340,8 +1340,8 @@
 	    (trap_type != T_BUS_ERR_IFETCH) &&
 	    useracc((caddr_t)(intptr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
 		/* dump page table entry for faulting instruction */
-		log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#x\n",
-		    (intmax_t)pc, (void *)(intptr_t)*pdep, ptep ? *ptep : 0);
+		log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
+		    (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
 
 		addr = (unsigned int *)(intptr_t)pc;
 		log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
@@ -1349,13 +1349,13 @@
 		log(LOG_ERR, "%08x %08x %08x %08x\n",
 		    addr[0], addr[1], addr[2], addr[3]);
 	} else {
-		log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#x\n",
-		    (intmax_t)pc, (void *)(intptr_t)*pdep, ptep ? *ptep : 0);
+		log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
+		    (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
 	}
 
 	get_mapping_info((vm_offset_t)frame->badvaddr, &pdep, &ptep);
-	log(LOG_ERR, "Page table info for bad address %#jx: pde = %p, pte = %#x\n",
-	    (intmax_t)frame->badvaddr, (void *)(intptr_t)*pdep, ptep ? *ptep : 0);
+	log(LOG_ERR, "Page table info for bad address %#jx: pde = %p, pte = %#jx\n",
+	    (intmax_t)frame->badvaddr, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
 }
 
 
Index: sys/mips/mips/swtch.S
===================================================================
--- sys/mips/mips/swtch.S	(revision 217166)
+++ sys/mips/mips/swtch.S	(working copy)
@@ -91,6 +91,16 @@
 #define	RESTORE_U_PCB_CONTEXT(reg, offs, base) \
 	REG_L	reg, U_PCB_CONTEXT + (SZREG * offs) (base)
 
+#ifdef __mips_n64 /* PHYSADDR_64_BIT */
+#define	PTE_L		ld
+#define	PTE_MTC0	dmtc0
+#define	PTESIZE		8
+#else
+#define	PTE_L		lw
+#define	PTE_MTC0	mtc0
+#define	PTESIZE		4
+#endif
+
 /*
  * Setup for and return to user.
  */
@@ -284,8 +294,8 @@
 	PTR_LI	s0, MIPS_KSEG2_START		# If Uarea addr is below kseg2,
 #endif
 	bltu	v0, s0, sw2			# no need to insert in TLB.
-	lw	a1, TD_UPTE + 0(s7)		# a1 = u. pte #0
-	lw	a2, TD_UPTE + 4(s7)		# a2 = u. pte #1
+	PTE_L	a1, TD_UPTE + 0(s7)		# a1 = u. pte #0
+	PTE_L	a2, TD_UPTE + PTESIZE(s7)	# a2 = u. pte #1
 /*
  * Wiredown the USPACE of newproc in TLB entry#0.  Check whether target
  * USPACE is already in another place of TLB before that, and if so
@@ -306,8 +316,8 @@
 	sll	s0, PAGE_SHIFT + 1
 	addu	t1, s0
 	MTC0	t1, MIPS_COP_0_TLB_HI
-	mtc0	zero, MIPS_COP_0_TLB_LO0
-	mtc0	zero, MIPS_COP_0_TLB_LO1
+	PTE_MTC0	zero, MIPS_COP_0_TLB_LO0
+	PTE_MTC0	zero, MIPS_COP_0_TLB_LO1
 	HAZARD_DELAY
 	tlbwi
 	HAZARD_DELAY
@@ -317,9 +327,9 @@
 /* SMP!! - Works only for  unshared TLB case - i.e. no v-cpus */
 	mtc0	zero, MIPS_COP_0_TLB_INDEX		# TLB entry #0
 	HAZARD_DELAY
-	mtc0	a1, MIPS_COP_0_TLB_LO0		# upte[0]
+	PTE_MTC0	a1, MIPS_COP_0_TLB_LO0		# upte[0]
 	HAZARD_DELAY
-	mtc0	a2, MIPS_COP_0_TLB_LO1		# upte[1]
+	PTE_MTC0	a2, MIPS_COP_0_TLB_LO1		# upte[1]
 	HAZARD_DELAY
 	tlbwi					# set TLB entry #0
 	HAZARD_DELAY
help

Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?AANLkTinpdhjO_TXqkS4GRRZDtSG91GZYHhLzYmWi5yf6>