Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 16 Aug 2010 21:40:00 +0530
From:      "Jayachandran C." <c.jayachandran@gmail.com>
To:        Juli Mallett <jmallett@freebsd.org>, "M. Warner Losh" <imp@bsdimp.com>, Alan Cox <alc@cs.rice.edu>,  Randall Stewart <rrs@lakerest.net>, Neel Natu <neelnatu@gmail.com>
Cc:        freebsd-mips@freebsd.org
Subject:   [PATCH] Move from kseg0 to xkphys for 64 bit.
Message-ID:  <AANLkTinc2P7mO2qu%2BAiDtB=%2BoH3Winfc0AOAUxXS2XBh@mail.gmail.com>

next in thread | raw e-mail | index | archive | help

[-- Attachment #1 --]
I've attached the changes to move the 64bit port to use 64bit XKPHYS
mapping of the physical memory instead of the current KSEG0.  With
this changes the 64bit port will use just one freelist, and can
allocate page table pages from anywhere in the memory.

The changes are mainly to introduce macros like
MIPS_PHYS_TO_DIRECT(pa), MIPS_DIRECT_TO_PHYS(), which will use KSEG0
in 32 bit compilation and XKPHYS in 64 bit compilation. I also ended
up changing the macro based PMAP_LMEM_MAP1(), PMAP_LMEM_MAP2(),
PMAP_LMEM_UNMAP() to inline functions.

I have also introduced a macro MIPS_DIRECT_MAPPABLE(pa), which will
further reduce the cases in which we will need to have a special case
for 64 bit compilation.

Please let me know your comments.

Thanks,
JC.

[-- Attachment #2 --]
Index: sys/mips/include/cpuregs.h
===================================================================
--- sys/mips/include/cpuregs.h	(revision 211332)
+++ sys/mips/include/cpuregs.h	(working copy)
@@ -76,8 +76,10 @@
  * Caching of mapped addresses is controlled by bits in the TLB entry.
  */
 
-#define	MIPS_KSEG0_LARGEST_PHYS         (0x20000000)
+#define	MIPS_KSEG0_LARGEST_PHYS		(0x20000000)
 #define	MIPS_PHYS_MASK			(0x1fffffff)
+#define	MIPS_XKPHYS_LARGEST_PHYS	(0x10000000000)
+#define	MIPS_XPHYS_MASK			(0x0ffffffffff)
 
 #ifndef LOCORE
 #define	MIPS_KUSEG_START		0x00000000
@@ -107,9 +109,6 @@
 #define	MIPS_IS_VALID_PTR(x)		(MIPS_IS_KSEG0_ADDR(x) || \
 					    MIPS_IS_KSEG1_ADDR(x))
 
-#define	MIPS_XKPHYS_START		0x8000000000000000
-#define	MIPS_XKPHYS_END			0xbfffffffffffffff
-
 /*
  * Cache Coherency Attributes:
  *	UC:	Uncached.
@@ -180,19 +179,34 @@
 #define	MIPS_PHYS_TO_XKPHYS_UNCACHED(x) \
 	((0x2ULL << 62) | ((unsigned long long)(MIPS_CCA_UNCACHED) << 59) | (x))
 
-#define	MIPS_XKPHYS_TO_PHYS(x)		((x) & 0x07ffffffffffffffULL)
+#define	MIPS_XKPHYS_TO_PHYS(x)		((uintptr_t)(x) & 0x07ffffffffffffff)
 
+#define	MIPS_XKPHYS_START		0x8000000000000000
+#define	MIPS_XKPHYS_END			0xbfffffffffffffff
 #define	MIPS_XUSEG_START		0x0000000000000000
 #define	MIPS_XUSEG_END			0x0000010000000000
-
 #define	MIPS_XKSEG_START		0xc000000000000000
 #define	MIPS_XKSEG_END			0xc00000ff80000000
 
+#ifdef __mips_n64
+#define	MIPS_DIRECT_MAPPABLE(pa)	1
+#define	MIPS_PHYS_TO_DIRECT(pa)		MIPS_PHYS_TO_XKPHYS_CACHED(pa)
+#define	MIPS_PHYS_TO_DIRECT_UNCACHED(pa)	MIPS_PHYS_TO_XKPHYS_UNCACHED(pa)
+#define	MIPS_DIRECT_TO_PHYS(va)		MIPS_XKPHYS_TO_PHYS(va)
+#else
+#define	MIPS_DIRECT_MAPPABLE(pa)	((pa) < MIPS_KSEG0_LARGEST_PHYS)
+#define	MIPS_PHYS_TO_DIRECT(pa)		MIPS_PHYS_TO_KSEG0(pa)
+#define	MIPS_PHYS_TO_DIRECT_UNCACHED(pa)	MIPS_PHYS_TO_KSEG1(pa)
+#define	MIPS_DIRECT_TO_PHYS(va)		MIPS_KSEG0_TO_PHYS(va)
+#endif
+
 /* CPU dependent mtc0 hazard hook */
 #ifdef CPU_CNMIPS
 #define	COP0_SYNC  nop; nop; nop; nop; nop;
 #elif defined(CPU_SB1)
 #define COP0_SYNC  ssnop; ssnop; ssnop; ssnop; ssnop; ssnop; ssnop; ssnop; ssnop
+#elif defined(CPU_RMI)
+#define COP0_SYNC
 #else
 /*
  * Pick a reasonable default based on the "typical" spacing described in the
Index: sys/mips/include/vmparam.h
===================================================================
--- sys/mips/include/vmparam.h	(revision 211332)
+++ sys/mips/include/vmparam.h	(working copy)
@@ -96,7 +96,7 @@
 #define	VM_MINUSER_ADDRESS	((vm_offset_t)0x00000000)
 #define	VM_MAX_MMAP_ADDR	VM_MAXUSER_ADDRESS
 
-#if defined(__mips_n64)
+#ifdef __mips_n64
 #define	VM_MAXUSER_ADDRESS	(VM_MINUSER_ADDRESS + (NPDEPG * NBSEG))
 #define	VM_MIN_KERNEL_ADDRESS	((vm_offset_t)0xc000000000000000)
 #define	VM_MAX_KERNEL_ADDRESS	(VM_MIN_KERNEL_ADDRESS + (NPDEPG * NBSEG))
@@ -185,7 +185,7 @@
  *	  allocations use HIGHMEM if available, and then DEFAULT. 
  *	- HIGHMEM for other pages 
  */
-#if 0 /* Not yet, change n64 to use xkphys */
+#ifdef __mips_n64
 #define	VM_NFREELIST		1
 #define	VM_FREELIST_DEFAULT	0
 #define	VM_FREELIST_DIRECT	VM_FREELIST_DEFAULT
Index: sys/mips/mips/pmap.c
===================================================================
--- sys/mips/mips/pmap.c	(revision 211332)
+++ sys/mips/mips/pmap.c	(working copy)
@@ -198,75 +198,143 @@
 static void pmap_update_page_action(void *arg);
 #endif
 
-#if !defined(__mips_n64)
+#ifndef __mips_n64
+/*
+ * This structure is for high memory (memory above 512Meg in 32 bit)
+ * This memory area does not have direct mapping, so we a mechanism to do
+ * temporary per-CPU mapping to access these addresses.
+ *
+ * At bootup we reserve 2 virtual pages per CPU for mapping highmem pages, to 
+ * access a highmem physical address on a CPU, we will disable interrupts and
+ * add the mapping from the reserved virtual address for the CPU to the physical
+ * address in the kernel pagetable.
+ */
 struct local_sysmaps {
-	vm_offset_t base;
-	uint16_t valid1, valid2;
+	vm_offset_t	base;
+	uint32_t	saved_intr;
+	uint16_t	valid1, valid2;
 };
-
-/* This structure is for large memory
- * above 512Meg. We can't (in 32 bit mode)
- * just use the direct mapped MIPS_KSEG0_TO_PHYS()
- * macros since we can't see the memory and must
- * map it in when we need to access it. In 64
- * bit mode this goes away.
- */
 static struct local_sysmaps sysmap_lmem[MAXCPU];
 
-#define	PMAP_LMEM_MAP1(va, phys)					\
-	int cpu;							\
-	struct local_sysmaps *sysm;					\
-	pt_entry_t *pte, npte;						\
-									\
-	intr = intr_disable();						\
-	cpu = PCPU_GET(cpuid);						\
-	sysm = &sysmap_lmem[cpu];					\
-	va = sysm->base;						\
-	npte = TLBLO_PA_TO_PFN(phys) |					\
-	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;		\
-	pte = pmap_pte(kernel_pmap, va);				\
-	*pte = npte;							\
-	sysm->valid1 = 1
+static __inline void
+pmap_alloc_lmem_map(void)
+{
+	int i;
 
-#define	PMAP_LMEM_MAP2(va1, phys1, va2, phys2)				\
-	int cpu;							\
-	struct local_sysmaps *sysm;					\
-	pt_entry_t *pte, npte;						\
-									\
-	intr = intr_disable();						\
-	cpu = PCPU_GET(cpuid);						\
-	sysm = &sysmap_lmem[cpu];					\
-	va1 = sysm->base;						\
-	va2 = sysm->base + PAGE_SIZE;					\
-	npte = TLBLO_PA_TO_PFN(phys1) |					\
-	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;		\
-	pte = pmap_pte(kernel_pmap, va1);				\
-	*pte = npte;							\
-	npte =  TLBLO_PA_TO_PFN(phys2) |				\
-	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;		\
-	pte = pmap_pte(kernel_pmap, va2);				\
-	*pte = npte;							\
-	sysm->valid1 = 1;						\
+	for (i = 0; i < MAXCPU; i++) {
+		sysmap_lmem[i].base = virtual_avail;
+		virtual_avail += PAGE_SIZE * 2;
+		sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
+	}
+}
+
+static __inline vm_offset_t
+pmap_lmem_map1(vm_paddr_t phys)
+{
+	struct local_sysmaps *sysm;
+	pt_entry_t *pte, npte;
+	vm_offset_t va;
+	uint32_t intr;
+	int cpu;
+
+	intr = intr_disable();
+	cpu = PCPU_GET(cpuid);
+	sysm = &sysmap_lmem[cpu];
+	sysm->saved_intr = intr;
+	va = sysm->base;
+	npte = TLBLO_PA_TO_PFN(phys) |
+	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
+	pte = pmap_pte(kernel_pmap, va);
+	*pte = npte;
+	sysm->valid1 = 1;
+	return va;
+}
+
+static __inline vm_offset_t
+pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
+{
+	struct local_sysmaps *sysm;
+	pt_entry_t *pte, npte;
+	vm_offset_t va1, va2;
+	uint32_t intr;
+	int cpu;
+
+	intr = intr_disable();
+	cpu = PCPU_GET(cpuid);
+	sysm = &sysmap_lmem[cpu];
+	sysm->saved_intr = intr;
+	va1 = sysm->base;
+	va2 = sysm->base + PAGE_SIZE;
+	npte = TLBLO_PA_TO_PFN(phys1) |
+	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
+	pte = pmap_pte(kernel_pmap, va1);
+	*pte = npte;
+	npte =  TLBLO_PA_TO_PFN(phys2) |
+	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
+	pte = pmap_pte(kernel_pmap, va2);
+	*pte = npte;
+	sysm->valid1 = 1;
 	sysm->valid2 = 1;
+	return va1;
+}
 
-#define	PMAP_LMEM_UNMAP()						\
-	pte = pmap_pte(kernel_pmap, sysm->base);			\
-	*pte = PTE_G;							\
-	tlb_invalidate_address(kernel_pmap, sysm->base);		\
-	sysm->valid1 = 0;						\
-	pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);		\
-	*pte = PTE_G;							\
-	tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);	\
-	sysm->valid2 = 0;						\
-	intr_restore(intr)
-#endif
+static __inline void
+pmap_lmem_unmap(void)
+{
+	struct local_sysmaps *sysm;
+	pt_entry_t *pte;
+	int cpu;
 
+	cpu = PCPU_GET(cpuid);
+	sysm = &sysmap_lmem[cpu];
+	pte = pmap_pte(kernel_pmap, sysm->base);
+	*pte = PTE_G;
+	tlb_invalidate_address(kernel_pmap, sysm->base);
+	sysm->valid1 = 0;
+	if (sysm->valid2) {
+		pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
+		*pte = PTE_G;
+		tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
+		sysm->valid2 = 0;
+	}
+	intr_restore(sysm->saved_intr);
+}
+#else  /* __mips_n64 */
+
+static __inline void
+pmap_alloc_lmem_map(void)
+{
+}
+
+static __inline vm_offset_t
+pmap_lmem_map1(vm_paddr_t phys)
+{
+
+	return (0);
+}
+
+static __inline vm_offset_t
+pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
+{
+
+	return (0);
+}
+
+static __inline vm_offset_t 
+pmap_lmem_unmap(void)
+{
+
+	return (0);
+}
+#endif /* !__mips_n64 */
+
 /*
  * Page table entry lookup routines.
  */
 static __inline pd_entry_t *
 pmap_segmap(pmap_t pmap, vm_offset_t va)
 {
+
 	return (&pmap->pm_segtab[pmap_seg_index(va)]);
 }
 
@@ -295,12 +363,14 @@
 static __inline pd_entry_t *
 pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
 {
+
 	return pdpe;
 }
 
 static __inline 
 pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
 {
+
 	return pmap_segmap(pmap, va);
 }
 #endif
@@ -351,10 +421,9 @@
 
 	pa = phys_avail[0];
 	phys_avail[0] += size;
-	if (pa >= MIPS_KSEG0_LARGEST_PHYS) {
+	if (MIPS_DIRECT_MAPPABLE(pa) == 0)
 		panic("Out of memory below 512Meg?");
-	}
-	va = MIPS_PHYS_TO_KSEG0(pa);
+	va = MIPS_PHYS_TO_DIRECT(pa);
 	bzero((caddr_t)va, size);
 	return va;
 }
@@ -426,9 +495,7 @@
 pmap_bootstrap(void)
 {
 	int i;
-#if !defined(__mips_n64)
-	int memory_larger_than_512meg = 0;
-#endif
+	int need_local_mappings = 0; 
 
 	/* Sort. */
 again:
@@ -456,10 +523,13 @@
 		}
 	}
 
-#if !defined(__mips_n64)
-	if (phys_avail[i - 1] >= MIPS_KSEG0_LARGEST_PHYS)
-		memory_larger_than_512meg = 1;
-#endif
+       	/*
+	 * In 32 bit, we may have memory which cannot be mapped directly
+	 * this memory will need temporary mapping before it can be
+	 * accessed.
+	 */
+	if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1]))
+		need_local_mappings = 1;
 
 	/*
 	 * Copy the phys_avail[] array before we start stealing memory from it.
@@ -518,22 +588,8 @@
 		printf("pcpu is available at virtual address %p.\n", pcpup);
 #endif
 
-#if !defined(__mips_n64)
-	/*
-	 * Steal some virtual space that will not be in kernel_segmap. This
-	 * va memory space will be used to map in kernel pages that are
-	 * outside the 512Meg region. Note that we only do this steal when
-	 * we do have memory in this region, that way for systems with
-	 * smaller memory we don't "steal" any va ranges :-)
-	 */
-	if (memory_larger_than_512meg) {
-		for (i = 0; i < MAXCPU; i++) {
-			sysmap_lmem[i].base = virtual_avail;
-			virtual_avail += PAGE_SIZE * 2;
-			sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
-		}
-	}
-#endif
+	if (need_local_mappings)
+		pmap_alloc_lmem_map();
 	pmap_create_kernel_pagetable();
 	pmap_max_asid = VMNUM_PIDS;
 	mips_wr_entryhi(0);
@@ -847,20 +903,13 @@
  *
  *	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
  */
-#if defined(__mips_n64)
 vm_offset_t
 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
 {
-	return (MIPS_PHYS_TO_XKPHYS_CACHED(start));
-}
-#else
-vm_offset_t
-pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
-{
 	vm_offset_t va, sva;
 
-	if (end <= MIPS_KSEG0_LARGEST_PHYS)
-		return (MIPS_PHYS_TO_KSEG0(start));
+	if (MIPS_DIRECT_MAPPABLE(end))
+		return (MIPS_PHYS_TO_DIRECT(start));
 
 	va = sva = *virt;
 	while (start < end) {
@@ -871,7 +920,6 @@
 	*virt = va;
 	return (sva);
 }
-#endif
 
 /*
  * Add a list of wired pages to the kva
@@ -968,7 +1016,7 @@
 		 * Recursively decrement next level pagetable refcount
 		 */
 		pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
-		pdpg = PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(pdp));
+		pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
 		pmap_unwire_pte_hold(pmap, va, pdpg);
 	}
 #endif
@@ -1003,7 +1051,7 @@
 			mpte = pmap->pm_ptphint;
 		} else {
 			pteva = *pmap_pde(pmap, va);
-			mpte = PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(pteva));
+			mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pteva));
 			pmap->pm_ptphint = mpte;
 		}
 	}
@@ -1032,7 +1080,11 @@
 pmap_grow_pte_page_cache()
 {
 
+#ifdef __mips_n64
+	vm_contig_grow_cache(3, 0, 0xffffffffffUL);
+#else
 	vm_contig_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
+#endif
 }
 
 static vm_page_t
@@ -1072,7 +1124,7 @@
 	while ((ptdpg = pmap_alloc_pte_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL)
 	       pmap_grow_pte_page_cache();
 
-	ptdva = MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(ptdpg));
+	ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
 	pmap->pm_segtab = (pd_entry_t *)ptdva;
 	pmap->pm_active = 0;
 	pmap->pm_ptphint = NULL;
@@ -1123,7 +1175,7 @@
 	 * Map the pagetable page into the process address space, if it
 	 * isn't already there.
 	 */
-	pageva = MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(m));
+	pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
 
 #ifdef __mips_n64
 	if (ptepindex >= NUPDE) {
@@ -1146,7 +1198,7 @@
 				return (NULL);
 			}
 		} else {
-			pg = PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(*pdep));
+			pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
 			pg->wire_count++;
 		}
 		/* Next level entry */
@@ -1199,7 +1251,7 @@
 		    (pmap->pm_ptphint->pindex == ptepindex)) {
 			m = pmap->pm_ptphint;
 		} else {
-			m = PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(*pde));
+			m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
 			pmap->pm_ptphint = m;
 		}
 		m->wire_count++;
@@ -1247,7 +1299,7 @@
 	    pmap->pm_stats.resident_count));
 
 	ptdva = (vm_offset_t)pmap->pm_segtab;
-	ptdpg = PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(ptdva));
+	ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
 
 	ptdpg->wire_count--;
 	atomic_subtract_int(&cnt.v_wire_count, 1);
@@ -1278,7 +1330,7 @@
 			nkpg = pmap_alloc_pte_page(nkpt, VM_ALLOC_INTERRUPT);
 			if (nkpg == NULL)
 				panic("pmap_growkernel: no memory to grow kernel");
-			*pdpe = (pd_entry_t)MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(nkpg));
+			*pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
 			continue; /* try again */
 		}
 #endif
@@ -1299,7 +1351,7 @@
 		if (!nkpg)
 			panic("pmap_growkernel: no memory to grow kernel");
 		nkpt++;
-		*pde = (pd_entry_t)MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(nkpg));
+		*pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
 
 		/*
 		 * The R[4-7]?00 stores only one copy of the Global bit in
@@ -2077,7 +2129,7 @@
 					mpte = pmap->pm_ptphint;
 				} else {
 					mpte = PHYS_TO_VM_PAGE(
-						MIPS_KSEG0_TO_PHYS(*pde));
+						MIPS_DIRECT_TO_PHYS(*pde));
 					pmap->pm_ptphint = mpte;
 				}
 				mpte->wire_count++;
@@ -2153,30 +2205,21 @@
  *
  * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
  */
-#if defined(__mips_n64)
 void *
 pmap_kenter_temporary(vm_paddr_t pa, int i)
 {
-	return ((void *)MIPS_PHYS_TO_XKPHYS_CACHED(pa));
-}
-void
-pmap_kenter_temporary_free(vm_paddr_t pa)
-{
-}
-#else
-void *
-pmap_kenter_temporary(vm_paddr_t pa, int i)
-{
 	vm_offset_t va;
-	register_t intr;
+
 	if (i != 0)
 		printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
 		    __func__);
 
-	if (pa < MIPS_KSEG0_LARGEST_PHYS) {
-		va = MIPS_PHYS_TO_KSEG0(pa);
+	if (MIPS_DIRECT_MAPPABLE(pa)) {
+		va = MIPS_PHYS_TO_DIRECT(pa);
 	} else {
+#ifndef __mips_n64    /* XXX : to be converted to new style */
 		int cpu;
+		register_t intr;
 		struct local_sysmaps *sysm;
 		pt_entry_t *pte, npte;
 
@@ -2196,6 +2239,7 @@
 		pmap_update_page(kernel_pmap, sysm->base, npte);
 		va = sysm->base;
 		intr_restore(intr);
+#endif
 	}
 	return ((void *)va);
 }
@@ -2203,14 +2247,17 @@
 void
 pmap_kenter_temporary_free(vm_paddr_t pa)
 {
+#ifndef __mips_n64    /* XXX : to be converted to new style */
 	int cpu;
 	register_t intr;
 	struct local_sysmaps *sysm;
+#endif
 
-	if (pa < MIPS_KSEG0_LARGEST_PHYS) {
+	if (MIPS_DIRECT_MAPPABLE(pa)) {
 		/* nothing to do for this case */
 		return;
 	}
+#ifndef __mips_n64    /* XXX : to be converted to new style */
 	cpu = PCPU_GET(cpuid);
 	sysm = &sysmap_lmem[cpu];
 	if (sysm->valid1) {
@@ -2223,8 +2270,8 @@
 		intr_restore(intr);
 		sysm->valid1 = 0;
 	}
-}
 #endif
+}
 
 /*
  * Moved the code to Machine Independent
@@ -2333,113 +2380,65 @@
  *
  * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
  */
-#if defined (__mips_n64)
 void
 pmap_zero_page(vm_page_t m)
 {
 	vm_offset_t va;
 	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
 
-	va = MIPS_PHYS_TO_XKPHYS_CACHED(phys);
-	bzero((caddr_t)va, PAGE_SIZE);
-	mips_dcache_wbinv_range(va, PAGE_SIZE);
-}
-#else
-void
-pmap_zero_page(vm_page_t m)
-{
-	vm_offset_t va;
-	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
-	register_t intr;
-
-	if (phys < MIPS_KSEG0_LARGEST_PHYS) {
-		va = MIPS_PHYS_TO_KSEG0(phys);
-
+	if (MIPS_DIRECT_MAPPABLE(phys)) {
+		va = MIPS_PHYS_TO_DIRECT(phys);
 		bzero((caddr_t)va, PAGE_SIZE);
 		mips_dcache_wbinv_range(va, PAGE_SIZE);
 	} else {
-		PMAP_LMEM_MAP1(va, phys);
-
+		va = pmap_lmem_map1(phys);
 		bzero((caddr_t)va, PAGE_SIZE);
 		mips_dcache_wbinv_range(va, PAGE_SIZE);
-
-		PMAP_LMEM_UNMAP();
+		pmap_lmem_unmap();
 	}
 }
-#endif
+
 /*
  *	pmap_zero_page_area zeros the specified hardware page by mapping
  *	the page into KVM and using bzero to clear its contents.
  *
  *	off and size may not cover an area beyond a single hardware page.
  */
-#if defined (__mips_n64)
 void
 pmap_zero_page_area(vm_page_t m, int off, int size)
 {
 	vm_offset_t va;
 	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
 
-	va = MIPS_PHYS_TO_XKPHYS_CACHED(phys);
-	bzero((char *)(caddr_t)va + off, size);
-	mips_dcache_wbinv_range(va + off, size);
-}
-#else
-void
-pmap_zero_page_area(vm_page_t m, int off, int size)
-{
-	vm_offset_t va;
-	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
-	register_t intr;
-
-	if (phys < MIPS_KSEG0_LARGEST_PHYS) {
-		va = MIPS_PHYS_TO_KSEG0(phys);
+	if (MIPS_DIRECT_MAPPABLE(phys)) {
+		va = MIPS_PHYS_TO_DIRECT(phys);
 		bzero((char *)(caddr_t)va + off, size);
 		mips_dcache_wbinv_range(va + off, size);
 	} else {
-		PMAP_LMEM_MAP1(va, phys);
-
+		va = pmap_lmem_map1(phys);
 		bzero((char *)va + off, size);
 		mips_dcache_wbinv_range(va + off, size);
-
-		PMAP_LMEM_UNMAP();
+		pmap_lmem_unmap();
 	}
 }
-#endif
 
-#if defined (__mips_n64)
 void
 pmap_zero_page_idle(vm_page_t m)
 {
 	vm_offset_t va;
 	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
 
-	va = MIPS_PHYS_TO_XKPHYS_CACHED(phys);
-	bzero((caddr_t)va, PAGE_SIZE);
-	mips_dcache_wbinv_range(va, PAGE_SIZE);
-}
-#else
-void
-pmap_zero_page_idle(vm_page_t m)
-{
-	vm_offset_t va;
-	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
-	register_t intr;
-
-	if (phys < MIPS_KSEG0_LARGEST_PHYS) {
-		va = MIPS_PHYS_TO_KSEG0(phys);
+	if (MIPS_DIRECT_MAPPABLE(phys)) {
+		va = MIPS_PHYS_TO_DIRECT(phys);
 		bzero((caddr_t)va, PAGE_SIZE);
 		mips_dcache_wbinv_range(va, PAGE_SIZE);
 	} else {
-		PMAP_LMEM_MAP1(va, phys);
-
+		va = pmap_lmem_map1(phys);
 		bzero((caddr_t)va, PAGE_SIZE);
 		mips_dcache_wbinv_range(va, PAGE_SIZE);
-
-		PMAP_LMEM_UNMAP();
+		pmap_lmem_unmap();
 	}
 }
-#endif
 
 /*
  *	pmap_copy_page copies the specified (machine independent)
@@ -2449,31 +2448,14 @@
  *
  * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
  */
-#if defined (__mips_n64)
 void
 pmap_copy_page(vm_page_t src, vm_page_t dst)
 {
 	vm_offset_t va_src, va_dst;
-	vm_paddr_t phy_src = VM_PAGE_TO_PHYS(src);
-	vm_paddr_t phy_dst = VM_PAGE_TO_PHYS(dst);
+	vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
+	vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
 
-	pmap_flush_pvcache(src);
-	mips_dcache_wbinv_range_index(MIPS_PHYS_TO_XKPHYS_CACHED(phy_dst), PAGE_SIZE);
-	va_src = MIPS_PHYS_TO_XKPHYS_CACHED(phy_src);
-	va_dst = MIPS_PHYS_TO_XKPHYS_CACHED(phy_dst);
-	bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
-	mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
-}
-#else
-void
-pmap_copy_page(vm_page_t src, vm_page_t dst)
-{
-	vm_offset_t va_src, va_dst;
-	vm_paddr_t phy_src = VM_PAGE_TO_PHYS(src);
-	vm_paddr_t phy_dst = VM_PAGE_TO_PHYS(dst);
-	register_t intr;
-
-	if ((phy_src < MIPS_KSEG0_LARGEST_PHYS) && (phy_dst < MIPS_KSEG0_LARGEST_PHYS)) {
+	if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
 		/* easy case, all can be accessed via KSEG0 */
 		/*
 		 * Flush all caches for VA that are mapped to this page
@@ -2481,21 +2463,19 @@
 		 */
 		pmap_flush_pvcache(src);
 		mips_dcache_wbinv_range_index(
-		    MIPS_PHYS_TO_KSEG0(phy_dst), PAGE_SIZE);
-		va_src = MIPS_PHYS_TO_KSEG0(phy_src);
-		va_dst = MIPS_PHYS_TO_KSEG0(phy_dst);
+		    MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
+		va_src = MIPS_PHYS_TO_DIRECT(phys_src);
+		va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
 		bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
 		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
 	} else {
-		PMAP_LMEM_MAP2(va_src, phy_src, va_dst, phy_dst);
-
+		va_src = pmap_lmem_map2(phys_src, phys_dst);
+		va_dst = va_src + PAGE_SIZE;
 		bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
 		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
-
-		PMAP_LMEM_UNMAP();
+		pmap_lmem_unmap();
 	}
 }
-#endif
 
 /*
  * Returns true if the pmap's pv is one of the first
@@ -2911,29 +2891,17 @@
  *
  * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
  */
-#if defined(__mips_n64)
 void *
 pmap_mapdev(vm_offset_t pa, vm_size_t size)
 {
-	return ((void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(pa));
-}
-
-void
-pmap_unmapdev(vm_offset_t va, vm_size_t size)
-{
-}
-#else
-void *
-pmap_mapdev(vm_offset_t pa, vm_size_t size)
-{
         vm_offset_t va, tmpva, offset;
 
 	/* 
 	 * KSEG1 maps only first 512M of phys address space. For 
 	 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
 	 */
-	if ((pa + size - 1) < MIPS_KSEG0_LARGEST_PHYS)
-		return (void *)MIPS_PHYS_TO_KSEG1(pa);
+	if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
+		return (void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa);
 	else {
 		offset = pa & PAGE_MASK;
 		size = roundup(size + offset, PAGE_SIZE);
@@ -2956,6 +2924,7 @@
 void
 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 {
+#ifndef __mips_n64
 	vm_offset_t base, offset, tmpva;
 
 	/* If the address is within KSEG1 then there is nothing to do */
@@ -2968,8 +2937,8 @@
 	for (tmpva = base; tmpva < base + size; tmpva += PAGE_SIZE)
 		pmap_kremove(tmpva);
 	kmem_free(kernel_map, base, size);
-}
 #endif
+}
 
 /*
  * perform the pmap work for mincore

Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?AANLkTinc2P7mO2qu%2BAiDtB=%2BoH3Winfc0AOAUxXS2XBh>