Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 25 May 2003 14:46:54 -0700 (PDT)
From:      Juli Mallett <jmallett@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 31868 for review
Message-ID:  <200305252146.h4PLksGP011950@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=31868

Change 31868 by jmallett@jmallett_dalek on 2003/05/25 14:46:14

	o) Add END addresses for various segments.
	o) Add TBIA() wrappers.
	o) Switch to using 8K pages.
	o) Add ASID related stuff to pcpu.
	o) Fill in pmap.h with stuff Alpha pmap uses.
	o) Use a 64-bit PTE.
	o) Add a SW managed bit to the PTE.
	o) Note that we have UMA_MD_SMALL_ALLOC, which is required for
	   Alpha pmap to work properly (otherwise, need to use a custom
	   allocator for PV tables like on i386).
	o) Point kernel addresses into XKSEG.
	o) Add bogus vectors for new exception code, nuke the old ones
	   from NetBSD.  All of the TLB related exceptions will need new
	   code to use the new pmap.
	o) Use dm[ft]c0 for exception program counter.
	o) Move some calls into mips_init that are not platform-ordered.
	o) Move physsz/physmem stuff into mips_init.
	o) Remove the exception switch.
	o) Put a lightly-modified Alpha pmap in place, which will
	   obviously need a lot of hacking, etc., but this is enough of
	   a fleshed-out placeholder to get just as far as the fake
	   direct-mapped pmap got us, in real terms.  This obviously
	   is missing code to actually use the TLB and to make proper
	   use of the page tables, and is overly complex to fit what
	   PAL on Alpha uses.  That will change.  This is mostly just
	   to have something to work from.
	o) Make phys_avail really physical addresses, this is very very
	   key to having real physical vs. virtual distinctions, and it
	   being fake was just a remnant of being influenced by SPARC64,
	   and of the lazy pmap.
	o) Sneak in some cpu_intr that hands off to iointr, but this is
	   not really used.

Affected files ...

.. //depot/projects/mips/sys/mips/include/cpuregs.h#9 edit
.. //depot/projects/mips/sys/mips/include/locore.h#5 edit
.. //depot/projects/mips/sys/mips/include/param.h#10 edit
.. //depot/projects/mips/sys/mips/include/pcpu.h#4 edit
.. //depot/projects/mips/sys/mips/include/pmap.h#10 edit
.. //depot/projects/mips/sys/mips/include/pte.h#4 edit
.. //depot/projects/mips/sys/mips/include/vmparam.h#8 edit
.. //depot/projects/mips/sys/mips/mips/exception.S#2 edit
.. //depot/projects/mips/sys/mips/mips/machdep.c#27 edit
.. //depot/projects/mips/sys/mips/mips/mips_subr.S#6 edit
.. //depot/projects/mips/sys/mips/mips/pmap.c#14 edit
.. //depot/projects/mips/sys/mips/sgimips/machdep_sgimips.c#18 edit

Differences ...

==== //depot/projects/mips/sys/mips/include/cpuregs.h#9 (text+ko) ====

@@ -76,9 +76,13 @@
  */
 
 #define	MIPS_KUSEG_START		0x0
+#define	MIPS_KUSEG_END			0x000000ffffffffff
 #define	MIPS_XKSEG_START		0xc000000000000000
+#define	MIPS_XKSEG_END			0xc00000ff7fffffff
 #define	MIPS_KSEG0_START		0xffffffff80000000
+#define	MIPS_KSEG0_END			0xffffffff9fffffff
 #define	MIPS_KSEG1_START		0xffffffffa0000000
+#define	MIPS_KSEG1_END			0xffffffffbfffffff
 #define	MIPS_KSEG2_START		0xffffffffc0000000
 #define	MIPS_MAX_MEM_ADDR		0xbe000000
 #define	MIPS_RESERVED_ADDR		0xbfc80000

==== //depot/projects/mips/sys/mips/include/locore.h#5 (text+ko) ====

@@ -88,6 +88,7 @@
  */
 typedef struct  {
 	void (*setTLBpid)(int pid);
+	void (*TBIA)(int);
 	void (*TBIAP)(int);
 	void (*TBIS)(vm_paddr_t);
 	int  (*tlbUpdate)(u_int highreg, u_int lowreg);
@@ -103,6 +104,7 @@
 extern long *mips_locoresw[];
 
 #define MachSetPID		mips64_SetPID
+#define	MIPS_TBIA		mips64_TBIA
 #define MIPS_TBIAP()		mips64_TBIAP(mips_num_tlb_entries)
 #define MIPS_TBIS		mips64_TBIS
 #define MachTLBUpdate		mips64_TLBUpdate

==== //depot/projects/mips/sys/mips/include/param.h#10 (text+ko) ====

@@ -10,10 +10,10 @@
 #endif
 
 /*
- * We use a 4K page on MIPS systems.  Override PAGE_* definitions
+ * We use an 8K page on MIPS systems.  Override PAGE_* definitions
  * to compile-time constants.
  */
-#define	PAGE_SHIFT	12
+#define	PAGE_SHIFT	13
 #define	PAGE_SIZE	(1 << PAGE_SHIFT)
 #define	PAGE_MASK	(PAGE_SIZE - 1)
 
@@ -68,9 +68,9 @@
 #define ALIGNBYTES	_ALIGNBYTES
 #define ALIGN(p)	_ALIGN(p)
 
-#define	NBPG		4096		/* bytes/page */
+#define	NBPG		8192		/* bytes/page */
 #define	PGOFSET		(NBPG-1)	/* byte offset into page */
-#define	PGSHIFT		12		/* LOG2(NBPG) */
+#define	PGSHIFT		13		/* LOG2(NBPG) */
 #define	NPTEPG		(NBPG/4)
 
 #define NBSEG		0x400000	/* bytes/segment */

==== //depot/projects/mips/sys/mips/include/pcpu.h#4 (text+ko) ====

@@ -35,7 +35,8 @@
 #include <machine/cpufunc.h>
 
 #define	PCPU_MD_FIELDS							\
-	/* XXX Nothing to see here. */
+	u_char	pc_next_asid;		/* next ASID to alloc */	\
+	u_int	pc_current_asidgen;	/* ASID rollover check */
 
 #define PCPUP	(pcpup)
 

==== //depot/projects/mips/sys/mips/include/pmap.h#10 (text+ko) ====

@@ -31,22 +31,42 @@
 
 #include <machine/pte.h>
 
-struct md_page {
-};
+#ifndef	LOCORE
 
-#define	PMAP_SEGTABSIZE	512
+struct pv_entry;
 
-struct segtab {
-	union pt_entry	*seg_tab[PMAP_SEGTABSIZE];
-};
-
-struct pmap {
+typedef	struct pmap {
 	struct pmap_statistics	pm_stats;
 	int			pm_asid;
 	int			pm_asidgen;
+	int			pm_active;
+	pt_entry_t		*pm_lev1;	/* KVA of lev0map */
+	vm_object_t		pm_pteobj;	/* Container for pte's */
+	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
+	struct	vm_page		*pm_ptphint;	/* pmap ptp hint */
+	LIST_ENTRY(pmap)	pm_list;	/* list of all pmaps. */
+} *pmap_t;
+
+struct md_page {
+	int pv_list_count;
+	TAILQ_HEAD(,pv_entry)	pv_list;
 };
 
-typedef	struct pmap *pmap_t;
+#define	ASID_BITS	8
+#define	ASIDGEN_BITS	(sizeof(int) * 9 - ASID_BITS)
+#define	ASIDGEN_MASK	((1 << ASIDGEN_BITS) - 1)
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
+ */
+typedef struct pv_entry {
+	pmap_t		pv_pmap;	/* pmap where mapping lies */
+	vm_offset_t	pv_va;		/* virtual address for mapping */
+	TAILQ_ENTRY(pv_entry)	pv_list;
+	TAILQ_ENTRY(pv_entry)	pv_plist;
+	vm_page_t	pv_ptem;	/* VM page for pte */
+} *pv_entry_t;
 
 extern	pmap_t kernel_pmap;
 extern	vm_offset_t avail_start;
@@ -55,13 +75,41 @@
 extern	vm_size_t physsz;
 extern	vm_offset_t virtual_avail;
 extern	vm_offset_t virtual_end;
-extern	struct segtab *segbase;
 
-void pmap_bootstrap(vm_offset_t);
+void pmap_bootstrap(void);
 vm_offset_t pmap_kextract(vm_offset_t);
 vm_offset_t pmap_steal_memory(vm_size_t);
 
 #define	pmap_resident_count(pm)	((pm)->pm_stats.resident_count)
 #define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
 
+#endif /*!LOCORE*/
+
+#define	NLPT			3	/* levels of page tables */
+#define MIPS_PTSHIFT		(PAGE_SHIFT-NLPT) /* bits that index within page tables */
+#define	MIPS_PGBYTES		(1 << PAGE_SHIFT)
+#define MIPS_L3SHIFT		PAGE_SHIFT
+#define MIPS_L2SHIFT		(MIPS_L3SHIFT+MIPS_PTSHIFT)
+#define MIPS_L1SHIFT		(MIPS_L2SHIFT+MIPS_PTSHIFT)
+
+/*
+ * Pte related macros
+ */
+#define VADDR(l1, l2, l3)	(((l1) << MIPS_L1SHIFT)		\
+				 + ((l2) << MIPS_L2SHIFT)	\
+				 + ((l3) << MIPS_L3SHIFT)
+
+#ifndef NKPT
+#define	NKPT			9	/* initial number of kernel page tables */
+#endif
+#define NKLEV2MAPS		255	/* max number of lev2 page tables */
+#define NKLEV3MAPS		(NKLEV2MAPS << MIPS_PTSHIFT) /* max number of lev3 page tables */
+
+#define PTLEV1I		(NPTEPG-1)	/* Lev0 entry that points to Lev0 */
+#define K0SEGLEV1I	(NPTEPG/2)
+#define K1SEGLEV1I	(K0SEGLEV1I+(NPTEPG/4))
+
+#define NUSERLEV2MAPS	(NPTEPG/2)
+#define NUSERLEV3MAPS	(NUSERLEV2MAPS << MIPS_PTSHIFT)
+
 #endif	/* !_MACHINE_PMAP_H_ */

==== //depot/projects/mips/sys/mips/include/pte.h#4 (text+ko) ====

@@ -100,10 +100,11 @@
  */
 
 #ifndef LOCORE
+#if	0/*DOCUMENTATION&OLD*/
 struct mips3_pte {
 #if BYTE_ORDER == BIG_ENDIAN
-unsigned int	pg_prot:2,		/* SW: access control */
-		pg_pfnum:24,		/* HW: core page frame number or 0 */
+unsigned int	pg_prot:3,		/* SW: access control */
+		pg_pfnum:23,		/* HW: core page frame number or 0 */
 		pg_attr:3,		/* HW: cache attribute */
 		pg_m:1,			/* HW: dirty bit */
 		pg_v:1,			/* HW: valid bit */
@@ -114,10 +115,11 @@
 		pg_v:1,			/* HW: valid bit */
 		pg_m:1,			/* HW: dirty bit */
 		pg_attr:3,		/* HW: cache attribute */
-		pg_pfnum:24,		/* HW: core page frame number or 0 */
-		pg_prot:2;		/* SW: access control */
+		pg_pfnum:23,		/* HW: core page frame number or 0 */
+		pg_prot:3;		/* SW: access control */
 #endif
 };
+#endif
 
 /*
  * Structure defining an tlb entry data set.
@@ -133,6 +135,7 @@
 
 #define MIPS3_PG_WIRED	0x80000000	/* SW */
 #define MIPS3_PG_RO	0x40000000	/* SW */
+#define	MIPS3_PG_M	0x20000000	/* SW */
 
 #define	MIPS3_PG_SVPN	0xfffff000	/* Software page no mask */
 #define	MIPS3_PG_HVPN	0xffffe000	/* Hardware page no mask */
@@ -202,10 +205,13 @@
 #ifndef LOCORE
 #include <machine/cpu.h>
 
+typedef unsigned long pt_entry_t;
+#if 0
 typedef union pt_entry {
 	unsigned int	 pt_entry;	/* for copying, etc. */
 	struct mips3_pte pt_mips3_pte;
 } pt_entry_t;
+#endif
 
 #define	mips_pg_nv_bit()	(MIPS1_PG_NV)	/* same on mips1 and mips3 */
 
@@ -227,18 +233,34 @@
 #define	mips_tlbpfn_to_paddr(x)		mips3_tlbpfn_to_paddr((vm_offset_t)(x))
 #define	mips_paddr_to_tlbpfn(x)		mips3_paddr_to_tlbpfn((x))
 
-#endif /* ! LOCORE */
+/*
+ * Address of current address space page table maps
+ */
+#ifdef _KERNEL
+/*
+ * PTmap is recursive pagemap at top of virtual address space.
+ * Within PTmap, the lev1 and lev0 page tables can be found.
+ */
+	/* lev3 page tables */
+#define	PTmap		((pt_entry_t*)VPTBASE)
+	/* lev2 page tables */
+#define	PTlev2		((pt_entry_t*)(PTmap+(PTLEV1I<<MIPS_L2SHIFT)))
+	/* lev1 page tables */
+#define	PTlev1		((pt_entry_t*)(PTlev2+(PTLEV1I<<MIPS_L2SHIFT)))
+	/* pte that maps lev1 page table */
+#define	PTlev1pte	((pt_entry_t*)(PTlev1+(PTLEV1I*sizeof(pt_entry_t))))
+#endif
 
-#if defined(_KERNEL) && !defined(LOCORE)
+#ifdef _KERNEL
 /*
- * Kernel virtual address to page table entry and visa versa.
+ * virtual address to page table entry and
+ * to physical address.
+ * Note: this work recursively, thus vtopte of a pte will give
+ * the corresponding lev1 that in turn maps it.
  */
-#define	kvtopte(va) \
-	(kptemap + (((vm_offset_t)(va) - VM_MIN_KERNEL_ADDRESS) >> PGSHIFT))
-#define	ptetokv(pte) \
-	((((pt_entry_t *)(pte) - kptemap) << PGSHIFT) + VM_MIN_KERNEL_ADDRESS)
+#define	vtopte(va)	(PTmap + (btop(va) \
+				  & ((1 << 3*MIPS_PTSHIFT)-1)))
+#endif /* _KERNEL */
 
-extern	pt_entry_t *kptemap;		/* kernel pte table */
-extern	vm_size_t kptemapsize;		/* number of pte's in Sysmap */
-#endif	/* defined(_KERNEL) && !defined(LOCORE) */
+#endif /* ! LOCORE */
 #endif /* __MIPS_PTE_H__ */

==== //depot/projects/mips/sys/mips/include/vmparam.h#8 (text+ko) ====

@@ -114,6 +114,12 @@
 #define	MAXSLP 		20
 
 /*
+ * MIPS provides a machine specific single page allocator through the use
+ * of KSEG0.
+ */
+#define UMA_MD_SMALL_ALLOC
+
+/*
  * Mach derived constants
  */
 
@@ -124,9 +130,10 @@
 #define VM_MIN_ADDRESS		((vm_offset_t)0x0000000000000000)
 #define VM_MAXUSER_ADDRESS	((vm_offset_t)0x0000010000000000)
 #define VM_MAX_ADDRESS		((vm_offset_t)0x0000010000000000)
-#define VM_MIN_KERNEL_ADDRESS	((vm_offset_t)0xFFFFFFFF80000000)
-#define VM_MAX_KERNEL_ADDRESS	((vm_offset_t)0xFFFFFFFFA0000000)
+#define VM_MIN_KERNEL_ADDRESS	((vm_offset_t)MIPS_XKSEG_START)
+#define VM_MAX_KERNEL_ADDRESS	((vm_offset_t)VPTBASE-1)
 #define	KERNBASE		(VM_MIN_KERNEL_ADDRESS)
+#define	VPTBASE			(MIPS_XKSEG_START+0xffffffff)
 
 /* virtual sizes (bytes) for various kernel submaps */
 #define	VM_KMEM_SIZE		(16*1024*1024)		/* XXX ??? */

==== //depot/projects/mips/sys/mips/mips/exception.S#2 (text+ko) ====

@@ -58,6 +58,44 @@
 	.set at
 VEND(ExceptionVector)
 
+LEAF(CacheVector)
+	.set noat
+
+	dsubu	sp, sp, TF_SIZE
+	dla	k0, 1f
+	j	exception_save_registers
+	move	k1, sp
+1:
+	/*
+	 * No turning back, and nothing we can do.  Just call into
+	 * trap and let it tell the user lovely things about how bad
+	 * their cache has been,
+	 */
+	mfc0	a1, MIPS_COP_0_CAUSE
+	dmfc0	a2, MIPS_COP_0_BAD_VADDR
+	jal	trap
+	move	a0, k1
+
+	jal	exception_restore_registers
+	daddu	sp, sp, TF_SIZE
+	eret
+	.set at
+VEND(CacheVector)
+
+LEAF(TLBMissVector)
+	.set noat
+	j ExceptionVector
+	nop
+	.set at
+VEND(TLBMissVector)
+
+LEAF(XTLBMissVector)
+	.set noat
+	j ExceptionVector
+	nop
+	.set at
+VEND(XTLBMissVector)
+
 /*
  * Restore registers from a trapframe pointed to in k1, returning to ra
  * that is passed in, and kept in k0.
@@ -91,7 +129,7 @@
 	/*
 	 * Brief interlude.
 	 */
-	mtc0	a1, MIPS_COP_0_EXC_PC
+	dmtc0	a1, MIPS_COP_0_EXC_PC
 	mthi	a0
 	mtlo	v1
 	mtc0	v0, MIPS_COP_0_STATUS
@@ -148,7 +186,7 @@
 	mfc0	v0, MIPS_COP_0_STATUS
 	mflo	v1
 	mfhi	a0
-	mfc0	a1, MIPS_COP_0_EXC_PC
+	dmfc0	a1, MIPS_COP_0_EXC_PC
 
 	sd	t0, TF_REG_T0(k1)
 	sd	t1, TF_REG_T1(k1)

==== //depot/projects/mips/sys/mips/mips/machdep.c#27 (text+ko) ====

@@ -192,13 +192,21 @@
 vm_offset_t kstack0;
 vm_paddr_t kstack0_phys;
 
+vm_size_t physsz;
+
 static void cpu_identify(void);
 void cpu_startup(void *);
 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
 
+
 void
 mips_init(void)
 {
+	physmem = btoc(physsz);
+
+	mips_vector_init();
+	pmap_bootstrap();
+
 	proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
 	proc0.p_uarea = (struct user *)uarea0;
 	proc0.p_stats = &proc0.p_uarea->u_stats;
@@ -567,6 +575,7 @@
 const mips_locore_jumpvec_t mips64_locore_vec =
 {
 	mips64_SetPID,
+	mips64_TBIA,
 	mips64_TBIAP,
 	mips64_TBIS,
 	mips64_TLBUpdate,
@@ -580,50 +589,39 @@
 	extern char ExceptionVector[], ExceptionVectorEnd[];
 
 	/* TLB miss handler address and end */
-	extern char mips64_TLBMiss[], mips64_TLBMissEnd[];
-	extern char mips64_XTLBMiss[], mips64_XTLBMissEnd[];
+	extern char TLBMissVector[], TLBMissVectorEnd[];
+	extern char XTLBMissVector[], XTLBMissVectorEnd[];
 
 	/* Cache error handler */
-	extern char mips64_cache[], mips64_cacheEnd[];
+	extern char CacheVector[], CacheVectorEnd[];
 
-#if 0
-	/* MIPS32/MIPS64 interrupt exception handler */
-	extern char mips64_intr[], mips64_intrEnd[];
-#endif
-
 	/*
 	 * Copy down exception vector code.
 	 */
 
-	if (mips64_TLBMissEnd - mips64_TLBMiss > 0x80)
+	if (TLBMissVectorEnd - TLBMissVector > 0x80)
 		panic("startup: UTLB vector code too large");
-	memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips64_TLBMiss,
-	      mips64_TLBMissEnd - mips64_TLBMiss);
+	memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, TLBMissVector,
+	      TLBMissVectorEnd - TLBMissVector);
 
-	if (mips64_XTLBMissEnd - mips64_XTLBMiss > 0x80)
+	if (XTLBMissVectorEnd - XTLBMissVector > 0x80)
 		panic("startup: XTLB vector code too large");
-	memcpy((void *)MIPS3_XTLB_MISS_EXC_VEC, mips64_XTLBMiss,
-	      mips64_XTLBMissEnd - mips64_XTLBMiss);
+	memcpy((void *)MIPS3_XTLB_MISS_EXC_VEC, XTLBMissVector,
+	      XTLBMissVectorEnd - XTLBMissVector);
 
-	if (mips64_cacheEnd - mips64_cache > 0x80)
+	if (CacheVectorEnd - CacheVector > 0x80)
 		panic("startup: Cache error vector code too large");
-	memcpy((void *)MIPS3_CACHE_ERR_EXC_VEC, mips64_cache,
-	      mips64_cacheEnd - mips64_cache);
+	memcpy((void *)MIPS3_CACHE_ERR_EXC_VEC, CacheVector,
+	      CacheVectorEnd - CacheVector);
 
 	if (ExceptionVectorEnd - ExceptionVector > 0x80)
 		panic("startup: General exception vector code too large");
 	memcpy((void *)MIPS3_GEN_EXC_VEC, ExceptionVector,
 	      ExceptionVectorEnd - ExceptionVector);
 
-#if 0	/* XXX - why doesn't mipsNN_intr() work? */
-	if (mips64_intrEnd - mips64_intr > 0x80)
-		panic("startup: interrupt exception vector code too large");
-	memcpy((void *)MIPS3_INTR_EXC_VEC, mips64_intr,
-	      mips64_intrEnd - mips64_intr);
-#else
+	/* XXX do a real interrupt vector for the mips32/64? */
 	memcpy((void *)MIPS3_INTR_EXC_VEC, ExceptionVector,
 	      ExceptionVectorEnd - ExceptionVector);
-#endif
 
 	/*
 	 * Copy locore-function vector.

==== //depot/projects/mips/sys/mips/mips/mips_subr.S#6 (text+ko) ====

@@ -152,27 +152,6 @@
 #endif
 
 /*
- * VECTOR
- *	exception vector entrypoint
- *	XXX: regmask should be used to generate .mask
- */
-#define VECTOR(x, regmask)		\
-	.ent	x;		\
-	EXPORT(x);			\
-
-#ifdef __STDC__
-#define VECTOR_END(x)			\
-	EXPORT(x ## End);		\
-	END(x)
-#else
-#define VECTOR_END(x)			\
-	EXPORT(x/**/End);		\
-	END(x)
-#endif
-
-#define	_VECTOR_END(x)	VECTOR_END(x)
-
-/*
  * XXX We need a cleaner way of handling the instruction hazards of
  * the various processors.  Here are the relevant rules for the QED 52XX:
  *	tlbw[ri]	-- two integer ops beforehand
@@ -194,1258 +173,11 @@
  * for those CPU, define COP0_SYNC as sync.p
  */
 
-
-/*
- *============================================================================
- *
- *  MIPS III ISA support, part 1: locore exception vectors.
- *  The following code is copied to the vector locations to which
- *  the CPU jumps in response to an exception or a TLB miss.
- *
- *============================================================================
- */
 	.set	noreorder
 	.set	mips64
 
-/*
- * TLB handling data.   'segbase' points to the base of the segment
- * table.   this is read and written by C code in mips_machdep.c.
- *
- * XXX: use linear mapped PTs at fixed VA in kseg2 in the future?
- */
 	.text
 
-
-/*
- *----------------------------------------------------------------------------
- *
- * mips3_TLBMiss --
- *
- *	Vector code for the TLB-miss exception vector 0x80000000
- *	on an r4000.
- *
- * This code is copied to the TLB exception vector address to
- * handle TLB translation misses.
- * NOTE: This code should be relocatable and max 32 instructions!!!
- *
- * Don't check for invalid pte's here. We load them as well and
- * let the processor trap to load the correct value after service.
- *----------------------------------------------------------------------------
- */
-VECTOR(MIPSX(TLBMiss), unknown)
-	.set	noat
-	mfc0	k0, MIPS_COP_0_BAD_VADDR	#00: k0=bad address
-	lui	k1, %hi(segbase)		#01: k1=hi of segbase
-	bltz	k0, 4f				#02: k0<0 -> 4f (kernel fault)
-	srl	k0, 20				#03: k0=seg offset (almost)
-	lw	k1, %lo(segbase)(k1)		#04: k1=segment tab base
-	andi	k0, k0, 0xffc			#05: k0=seg offset (mask 0x3)
-	addu	k1, k0, k1			#06: k1=seg entry address
-	lw	k1, 0(k1)			#07: k1=seg entry
-	mfc0	k0, MIPS_COP_0_BAD_VADDR	#08: k0=bad address (again)
-	beq	k1, zero, 5f			#09: ==0 -- no page table
-	srl	k0, 10				#0a: k0=VPN (aka va>>10)
-	andi	k0, k0, 0xff8			#0b: k0=page tab offset
-	addu	k1, k1, k0			#0c: k1=pte address
-	lw	k0, 0(k1)			#0d: k0=lo0 pte
-	lw	k1, 4(k1)			#0e: k1=lo1 pte
-	sll	k0, 2				#0f: chop top 2 bits (part 1a)
-	srl	k0, 2				#10: chop top 2 bits (part 1b)
-	mtc0	k0, MIPS_COP_0_TLB_LO0		#11: lo0 is loaded
-	sll	k1, 2				#12: chop top 2 bits (part 2a)
-	srl	k1, 2				#13: chop top 2 bits (part 2b)
-	mtc0	k1, MIPS_COP_0_TLB_LO1		#14: lo1 is loaded
-	nop					#15: standard nop
-	nop					#16: extra nop for QED5230
-	tlbwr					#17: write to tlb
-	nop					#18: standard nop
-	nop					#19: needed by R4000/4400
-	nop					#1a: needed by R4000/4400
-	eret					#1b: return from exception
-4:	j MIPSX(TLBMissException)		#1c: kernel exception
-	nop					#1d: branch delay slot
-5:	j	slowfault			#1e: no page table present
-	nop					#1f: branch delay slot
-	.set	at
-_VECTOR_END(MIPSX(TLBMiss))
-
-/*
- * mips3_XTLBMiss routine
- *
- *	Vector code for the XTLB-miss exception vector 0x80000080 on an r4000.
- *
- * This code is copied to the XTLB exception vector address to
- * handle TLB translation misses while in 64-bit mode.
- * NOTE: This code should be relocatable and max 32 instructions!!!
- *
- * Note that we do not support the full size of the PTEs, relying
- * on appropriate truncation/sign extension.
- *
- * Don't check for invalid pte's here. We load them as well and
- * let the processor trap to load the correct value after service.
- */
-VECTOR(MIPSX(XTLBMiss), unknown)
-	.set	noat
-	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#00: k0=bad address
-	lui	k1, %hi(segbase)		#01: k1=hi of segbase
-	bltz	k0, 4f				#02: k0<0 -> 4f (kernel fault)
-	srl	k0, 20				#03: k0=seg offset (almost)
-	lw	k1, %lo(segbase)(k1)		#04: k1=segment tab base
-	andi	k0, k0, 0xffc			#05: k0=seg offset (mask 0x3)
-	addu	k1, k0, k1			#06: k1=seg entry address
-	lw	k1, 0(k1)			#07: k1=seg entry
-	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#08: k0=bad address (again)
-	beq	k1, zero, 5f			#09: ==0 -- no page table
-	srl	k0, 10				#0a: k0=VPN (aka va>>10)
-	andi	k0, k0, 0xff8			#0b: k0=page tab offset
-	addu	k1, k1, k0			#0c: k1=pte address
-	lw	k0, 0(k1)			#0d: k0=lo0 pte
-	lw	k1, 4(k1)			#0e: k1=lo1 pte
-	sll	k0, 2				#0f: chop top 2 bits (part 1a)
-	srl	k0, 2				#10: chop top 2 bits (part 1b)
-	mtc0	k0, MIPS_COP_0_TLB_LO0		#11: lo0 is loaded
-	sll	k1, 2				#12: chop top 2 bits (part 2a)
-	srl	k1, 2				#13: chop top 2 bits (part 2b)
-	mtc0	k1, MIPS_COP_0_TLB_LO1		#14: lo1 is loaded
-	nop					#15: standard nop
-	nop					#16: extra nop for QED5230
-	tlbwr					#17: write to tlb
-	nop					#18: standard nop
-	nop					#19: needed by R4000/4400
-	nop					#1a: needed by R4000/4400
-	eret					#1b: return from exception
-4:	j MIPSX(TLBMissException)		#1c: kernel exception
-	nop					#1d: branch delay slot
-5:	j	slowfault			#1e: no page table present
-	nop					#1f: branch delay slot
-	.set	at
-_VECTOR_END(MIPSX(XTLBMiss))
-
-/*
- * Vector to real handler in KSEG1.
- */
-VECTOR(MIPSX(cache), unknown)
-	la	k0, MIPSX(cacheException)
-	li	k1, MIPS_PHYS_MASK
-	and	k0, k1
-	li	k1, MIPS_KSEG1_START
-	or	k0, k1
-	j	k0
-	nop
-_VECTOR_END(MIPSX(cache))
-
-/*
- * Handle MIPS32/MIPS64 style interrupt exception vector.
- */
-VECTOR(MIPSX(intr), unknown)
-	la	k0, MIPSX(KernIntr)
-	j	k0
-	nop
-_VECTOR_END(MIPSX(intr))
-
-/*----------------------------------------------------------------------------
- *
- * slowfault --
- *
- * Alternate entry point into the mips3_UserGenException or
- * or mips3_user_Kern_exception, when the ULTB miss handler couldn't
- * find a TLB entry.
- *
- * Find out what mode we came from and call the appropriate handler.
- *
- *----------------------------------------------------------------------------
- */
-
-/*
- * We couldn't find a TLB entry.
- * Find out what mode we came from and call the appropriate handler.
- */
-slowfault:
-	.set	noat
-	mfc0	k0, MIPS_COP_0_STATUS
-	nop
-	and	k0, k0, MIPS3_SR_KSU_USER
-	bne	k0, zero, MIPSX(UserGenException)
-	nop
-	.set	at
-/*
- * Fall though ...
- */
-
-/*
- * mips3_KernGenException
- *
- * Handle an exception from kernel mode.
- * Build trapframe on stack to hold interrupted kernel context, then
- * call trap() to process the condition.
- *
- * trapframe is pointed to by the 5th arg
- * and a dummy sixth argument is used to avoid alignment problems
- *	{
- *	register_t cf_args[4 + 1];
- *	register_t cf_pad;		(for 8 word alignment)
- *	register_t cf_sp;
- *	register_t cf_ra;
- *	mips_reg_t kf_regs[17];		- trapframe begins here
- * 	mips_reg_t kf_sr;		-
- * 	mips_reg_t kf_mullo;		-
- * 	mips_reg_t kf_mulhi;		-
- * 	mips_reg_t kf_epc;		- may be changed by trap() call
- * };
- */
-NESTED_NOPROFILE(MIPSX(KernGenException), KERNFRAME_SIZ, ra)
-	.set	noat
-	.mask	0x80000000, -4
-#if defined(DDB) || defined(KGDB)
-	la	k0, kdbaux
-	REG_S	s0, SF_REG_S0(k0)
-	REG_S	s1, SF_REG_S1(k0)
-	REG_S	s2, SF_REG_S2(k0)
-	REG_S	s3, SF_REG_S3(k0)
-	REG_S	s4, SF_REG_S4(k0)
-	REG_S	s5, SF_REG_S5(k0)
-	REG_S	s6, SF_REG_S6(k0)
-	REG_S	s7, SF_REG_S7(k0)
-	REG_S	sp, SF_REG_SP(k0)
-	REG_S	s8, SF_REG_S8(k0)
-	REG_S	gp, SF_REG_RA(k0)
-#endif
-/*
- * Save the relevant kernel registers onto the stack.
- * We don't need to save s0 - s8, sp and gp because
- * the compiler does it for us.
- */
-	subu	sp, sp, KERNFRAME_SIZ
-	REG_S	AT, TF_BASE+TF_REG_AST(sp)
-	REG_S	v0, TF_BASE+TF_REG_V0(sp)
-	REG_S	v1, TF_BASE+TF_REG_V1(sp)
-	mflo	v0
-	mfhi	v1
-	REG_S	a0, TF_BASE+TF_REG_A0(sp)
-	REG_S	a1, TF_BASE+TF_REG_A1(sp)
-	REG_S	a2, TF_BASE+TF_REG_A2(sp)
-	REG_S	a3, TF_BASE+TF_REG_A3(sp)
-	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
-	REG_S	t0, TF_BASE+TF_REG_T0(sp)
-	REG_S	t1, TF_BASE+TF_REG_T1(sp)
-	REG_S	t2, TF_BASE+TF_REG_T2(sp)
-	REG_S	t3, TF_BASE+TF_REG_T3(sp)
-	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
-	REG_S	ta0, TF_BASE+TF_REG_TA0(sp)
-	REG_S	ta1, TF_BASE+TF_REG_TA1(sp)
-	REG_S	ta2, TF_BASE+TF_REG_TA2(sp)
-	REG_S	ta3, TF_BASE+TF_REG_TA3(sp)
-	mfc0	a2, MIPS_COP_0_BAD_VADDR	# 3rd arg is fault address
-	REG_S	t8, TF_BASE+TF_REG_T8(sp)
-	REG_S	t9, TF_BASE+TF_REG_T9(sp)
-	REG_S	ra, TF_BASE+TF_REG_RA(sp)
-	REG_S	a0, TF_BASE+TF_REG_SR(sp)
-	mfc0	a3, MIPS_COP_0_EXC_PC		# 4th arg is exception PC
-	REG_S	v0, TF_BASE+TF_REG_MULLO(sp)
-	REG_S	v1, TF_BASE+TF_REG_MULHI(sp)
-	REG_S	a3, TF_BASE+TF_REG_EPC(sp)
-	addu	v0, sp, TF_BASE
-	sw	v0, KERNFRAME_ARG5(sp)		# 5th arg is p. to trapframe
-#ifdef IPL_ICU_MASK
-	.set at
-	lw	v0, md_imask
-	sw	v0, TF_BASE+TF_PPL(sp)
-	nop
-	.set noat
-#endif
-/*
- * Call the trap handler.
- */
-#if defined(DDB) || defined(DEBUG) || defined(KGDB)
-	addu	v0, sp, KERNFRAME_SIZ
-	sw	v0, KERNFRAME_SP(sp)
-#endif
-	mtc0	zero, MIPS_COP_0_STATUS		# Set kernel no error level
-	COP0_SYNC
-	nop
-	nop
-	nop
-	PRINTF("OMG\n");
-	jal	trap				#
-	sw	a3, KERNFRAME_RA(sp)		# for debugging
-
-/*
- * Restore registers and return from the exception.
- */
-	mtc0	zero, MIPS_COP_0_STATUS		# Make sure int disabled
-	COP0_SYNC
-	nop					# 3 nop delay
-	nop
-	nop
-#ifdef IPL_ICU_MASK
-	.set at
-	lw	a0, TF_BASE+TF_PPL(sp)
-	sw	a0, md_imask
-	jal	md_imask_update
-	nop
-	.set noat
-#endif
-	REG_L	a0, TF_BASE+TF_REG_SR(sp)	# ??? why differs ???
-	REG_L	t0, TF_BASE+TF_REG_MULLO(sp)
-	REG_L	t1, TF_BASE+TF_REG_MULHI(sp)
-	REG_L	k0, TF_BASE+TF_REG_EPC(sp)	# might be changed inside trap
-	mtc0	a0, MIPS_COP_0_STATUS		# restore the SR, disable intrs
-	COP0_SYNC
-	mtlo	t0
-	mthi	t1
-	_MTC0	k0, MIPS_COP_0_EXC_PC		# set return address
-	COP0_SYNC
-	REG_L	AT, TF_BASE+TF_REG_AST(sp)
-	REG_L	v0, TF_BASE+TF_REG_V0(sp)
-	REG_L	v1, TF_BASE+TF_REG_V1(sp)
-	REG_L	a0, TF_BASE+TF_REG_A0(sp)
-	REG_L	a1, TF_BASE+TF_REG_A1(sp)
-	REG_L	a2, TF_BASE+TF_REG_A2(sp)
-	REG_L	a3, TF_BASE+TF_REG_A3(sp)
-	REG_L	t0, TF_BASE+TF_REG_T0(sp)
-	REG_L	t1, TF_BASE+TF_REG_T1(sp)
-	REG_L	t2, TF_BASE+TF_REG_T2(sp)
-	REG_L	t3, TF_BASE+TF_REG_T3(sp)
-	REG_L	ta0, TF_BASE+TF_REG_TA0(sp)
-	REG_L	ta1, TF_BASE+TF_REG_TA1(sp)
-	REG_L	ta2, TF_BASE+TF_REG_TA2(sp)
-	REG_L	ta3, TF_BASE+TF_REG_TA3(sp)
-	REG_L	t8, TF_BASE+TF_REG_T8(sp)
-	REG_L	t9, TF_BASE+TF_REG_T9(sp)
-	REG_L	ra, TF_BASE+TF_REG_RA(sp)
-	addu	sp, sp, KERNFRAME_SIZ
-#ifdef DDBnotyet
-	la	k0, kdbaux
-	REG_L	s0, SF_REG_S0(k0)
-	REG_L	s1, SF_REG_S1(k0)
-	REG_L	s2, SF_REG_S2(k0)
-	REG_L	s3, SF_REG_S3(k0)
-	REG_L	s4, SF_REG_S4(k0)
-	REG_L	s5, SF_REG_S5(k0)
-	REG_L	s6, SF_REG_S6(k0)
-	REG_L	s7, SF_REG_S7(k0)
-	REG_L	sp, SF_REG_SP(k0)
-	REG_L	s8, SF_REG_S8(k0)
-	REG_L	gp, SF_REG_RA(k0)
-#endif
-	eret					# return to interrupted point
-	.set	at
-END(MIPSX(KernGenException))
-
-/*
- * mipsN_UserGenException
- *
- * Handle an exception from user mode.
- * Save user context atop the kernel stack, then call trap() to process
- * the condition.  The context can be manipulated alternatively via
- * curlwp->p_md.md_regs.
- */
-NESTED_NOPROFILE(MIPSX(UserGenException), CALLFRAME_SIZ, ra)
-	.set	noat
-	.mask	0x80000000, -4
-/*
- * Save all of the registers except for the kernel temporaries in u_pcb.
- */
- 	ld	k1, pcpup
-	ld	k1, PC_CURPCB(k1)
-	#nop					# -slip-
-	addu	k1, k1, USPACE - FRAME_SIZ
-	REG_S	AT, FRAME_AST(k1)
-	REG_S	v0, FRAME_V0(k1)
-	REG_S	v1, FRAME_V1(k1)
-	mflo	v0
-	REG_S	a0, FRAME_A0(k1)
-	REG_S	a1, FRAME_A1(k1)
-	REG_S	a2, FRAME_A2(k1)
-	REG_S	a3, FRAME_A3(k1)
-	mfhi	v1
-	REG_S	t0, FRAME_T0(k1)
-	REG_S	t1, FRAME_T1(k1)
-	REG_S	t2, FRAME_T2(k1)
-	REG_S	t3, FRAME_T3(k1)
-	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
-	REG_S	ta0, FRAME_TA0(k1)
-	REG_S	ta1, FRAME_TA1(k1)
-	REG_S	ta2, FRAME_TA2(k1)
-	REG_S	ta3, FRAME_TA3(k1)
-	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
-	REG_S	s0, FRAME_S0(k1)
-	REG_S	s1, FRAME_S1(k1)
-	REG_S	s2, FRAME_S2(k1)
-	REG_S	s3, FRAME_S3(k1)
-	_MFC0	a2, MIPS_COP_0_BAD_VADDR	# 3rd arg is fault address
-	REG_S	s4, FRAME_S4(k1)
-	REG_S	s5, FRAME_S5(k1)
-	REG_S	s6, FRAME_S6(k1)
-	REG_S	s7, FRAME_S7(k1)
-	_MFC0	a3, MIPS_COP_0_EXC_PC		# 4th arg is exception PC
-	REG_S	t8, FRAME_T8(k1)
-	REG_S	t9, FRAME_T9(k1)
-	REG_S	gp, FRAME_GP(k1)
-	REG_S	sp, FRAME_SP(k1)
-	REG_S	s8, FRAME_S8(k1)
-	REG_S	ra, FRAME_RA(k1)
-	REG_S	a0, FRAME_SR(k1)
-	REG_S	v0, FRAME_MULLO(k1)
-	REG_S	v1, FRAME_MULHI(k1)
-	REG_S	a3, FRAME_EPC(k1)
-#ifdef IPL_ICU_MASK
-	.set at
-	lw	t0, md_imask
-	sw	t0, FRAME_PPL(k1)
-	.set noat
-#endif
-	addu	sp, k1, -CALLFRAME_SIZ	# switch to kernel SP
-#ifdef __GP_SUPPORT__
-	la	gp, _gp		# switch to kernel GP
-#endif
-/*
- * Turn off fpu and enter kernel mode
- */
-	.set	at
-	and	t0, a0, ~(MIPS_SR_COP_1_BIT | MIPS_SR_EXL | MIPS_SR_KSU_MASK | MIPS_SR_INT_IE)
-	.set	noat
-/*
- * Call the trap handler.
- */
-	mtc0	t0, MIPS_COP_0_STATUS
-	COP0_SYNC
-	jal	trap
-	sw	a3, CALLFRAME_SIZ-4(sp)		# for debugging
-/*
- * Check pending asynchronous traps.
- */
- 	ld	t0, pcpup
- 	ld	t0, PC_CURTHREAD(t0)
-	lw	t0, TD_FLAGS(t0)
-	and	t0, TDF_ASTPENDING
-	beq	t0, zero, 1f
-	nop
-/*
- * We have pending asynchronous traps; all the state is already saved.
- */
-	jal	ast
-	lw	a0, CALLFRAME_SIZ + FRAME_EPC(sp)
-1:
-/*
- * Restore user registers and return.
- * First disable interrupts and set exception level.
- */
-	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupt
-	COP0_SYNC
-	nop					# 3 clock delay before
-	nop					# exceptions blocked
-	nop					# for R4X
-	li	v0, MIPS_SR_EXL
-	mtc0	v0, MIPS_COP_0_STATUS		# set exception level
-	COP0_SYNC
-	nop					# 3 nop delay
-	nop
-	nop

>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200305252146.h4PLksGP011950>