Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 28 Apr 2003 16:40:52 -0700 (PDT)
From:      Peter Wemm <peter@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 30012 for review
Message-ID:  <200304282340.h3SNeq6M001079@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=30012

Change 30012 by peter@peter_overcee on 2003/04/28 16:40:47

	integrate i386_hammer branch.

Affected files ...

.. //depot/projects/hammer/sys/x86_64/include/pmap.h#19 integrate
.. //depot/projects/hammer/sys/x86_64/x86_64/machdep.c#87 edit
.. //depot/projects/hammer/sys/x86_64/x86_64/pmap.c#25 edit
.. //depot/projects/hammer/sys/x86_64/x86_64/sys_machdep.c#11 integrate

Differences ...

==== //depot/projects/hammer/sys/x86_64/include/pmap.h#19 (text+ko) ====

@@ -42,7 +42,7 @@
  *
  *	from: hp300: @(#)pmap.h	7.2 (Berkeley) 12/16/90
  *	from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
- * $FreeBSD: src/sys/i386/include/pmap.h,v 1.98 2003/04/08 18:22:41 jake Exp $
+ * $FreeBSD: src/sys/i386/include/pmap.h,v 1.99 2003/04/28 20:35:36 jake Exp $
  */
 
 #ifndef _MACHINE_PMAP_H_
@@ -190,9 +190,33 @@
 
 #define	vtophys(va)	pmap_kextract(((vm_offset_t) (va)))
 
+static __inline pt_entry_t
+pte_load(pt_entry_t *ptep)
+{
+	pt_entry_t r;
+
+	r = *ptep;
+	return (r);
+}
+
+static __inline pt_entry_t
+pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
+{
+	pt_entry_t r;
+
+	r = *ptep;
+	*ptep = pte;
+	return (r);
+}
+
 #define	pte_load_clear(pte)	atomic_readandclear_long(pte)
 
-#endif
+#define	pte_clear(ptep)		pte_load_store((ptep), (pt_entry_t)0ULL)
+#define	pte_store(ptep, pte)	pte_load_store((ptep), (pt_entry_t)pte)
+
+#define	pde_store(pdep, pde)	pte_store((pdep), (pde))
+
+#endif /* _KERNEL */
 
 /*
  * Pmap stuff

==== //depot/projects/hammer/sys/x86_64/x86_64/machdep.c#87 (text+ko) ====

@@ -35,7 +35,7 @@
  * SUCH DAMAGE.
  *
  *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
- * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.562 2003/04/18 20:09:03 jhb Exp $
+ * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.563 2003/04/25 01:50:28 deischen Exp $
  */
 
 #include "opt_atalk.h"
@@ -105,8 +105,8 @@
 #endif
 #include <machine/tss.h>
 
-#include <x86_64/isa/icu.h>
-#include <x86_64/isa/intr_machdep.h>
+#include <amd64/isa/icu.h>
+#include <amd64/isa/intr_machdep.h>
 #include <isa/rtc.h>
 #include <sys/ptrace.h>
 #include <machine/sigframe.h>
@@ -546,7 +546,7 @@
 
 static char dblfault_stack[PAGE_SIZE] __aligned(16);
 
-struct x86_64tss common_tss;
+struct amd64tss common_tss;
 
 /* software prototypes -- in more palatable form */
 struct soft_segment_descriptor gdt_segs[] = {
@@ -607,7 +607,7 @@
 /* GPROC0_SEL	6 Proc 0 Tss Descriptor */
 {
 	0x0,			/* segment base address */
-	sizeof(struct x86_64tss)-1,/* length - all address space */
+	sizeof(struct amd64tss)-1,/* length - all address space */
 	SDT_SYSTSS,		/* segment type */
 	SEL_KPL,		/* segment descriptor priority level */
 	1,			/* segment descriptor present */
@@ -1454,7 +1454,7 @@
  * Get machine context.
  */
 int
-get_mcontext(struct thread *td, mcontext_t *mcp)
+get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
 {
 	struct trapframe *tp;
 
@@ -1475,9 +1475,14 @@
 	mcp->mc_rsi = tp->tf_rsi;
 	mcp->mc_rbp = tp->tf_rbp;
 	mcp->mc_rbx = tp->tf_rbx;
-	mcp->mc_rdx = tp->tf_rdx;
 	mcp->mc_rcx = tp->tf_rcx;
-	mcp->mc_rax = tp->tf_rax;
+	if (clear_ret != 0) {
+		mcp->mc_rax = 0;
+		mcp->mc_rdx = 0;
+	} else {
+		mcp->mc_rax = tp->tf_rax;
+		mcp->mc_rdx = tp->tf_rdx;
+	}
 	mcp->mc_rip = tp->tf_rip;
 	mcp->mc_cs = tp->tf_cs;
 	mcp->mc_rflags = tp->tf_rflags;

==== //depot/projects/hammer/sys/x86_64/x86_64/pmap.c#25 (text+ko) ====

@@ -39,7 +39,7 @@
  * SUCH DAMAGE.
  *
  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
- * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.403 2003/04/03 23:44:35 jake Exp $
+ * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.405 2003/04/28 20:35:36 jake Exp $
  */
 /*-
  * Copyright (c) 2003 Networks Associates Technology, Inc.
@@ -532,7 +532,7 @@
 			*PMAP1 = newpf | PG_RW | PG_V;
 			pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR1);
 		}
-		return PADDR1 + (x86_64_btop(va) & (NPTEPG - 1));
+		return PADDR1 + (amd64_btop(va) & (NPTEPG - 1));
 	}
 	return (0);
 }
@@ -582,7 +582,7 @@
 	pt_entry_t *pte;
 
 	pte = vtopte(va);
-	*pte = pa | PG_RW | PG_V | PG_G;
+	pte_store(pte, pa | PG_RW | PG_V | PG_G);
 }
 
 /*
@@ -595,7 +595,7 @@
 	pt_entry_t *pte;
 
 	pte = vtopte(va);
-	*pte = 0;
+	pte_clear(pte);
 }
 
 /*
@@ -917,7 +917,7 @@
 			 * Do an invltlb to make the invalidated mapping
 			 * take effect immediately.
 			 */
-			pteva = VM_MAXUSER_ADDRESS + x86_64_ptob(m->pindex);
+			pteva = VM_MAXUSER_ADDRESS + amd64_ptob(m->pindex);
 			pmap_invalidate_page(pmap, pteva);
 		}
 
@@ -1143,7 +1143,7 @@
 	 */
 	if ((m->flags & PG_ZERO) == 0) {
 		if (pmap_is_current(pmap)) {
-			pteva = VM_MAXUSER_ADDRESS + x86_64_ptob(ptepindex);
+			pteva = VM_MAXUSER_ADDRESS + amd64_ptob(ptepindex);
 			bzero((caddr_t) pteva, PAGE_SIZE);
 		} else {
 			pmap_zero_page(m);
@@ -1173,7 +1173,7 @@
 	/*
 	 * Get the page directory entry
 	 */
-	ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
+	ptepa = pmap->pm_pdir[ptepindex];
 
 	/*
 	 * This supports switching from a 2MB page to a
@@ -1298,6 +1298,7 @@
 	vm_paddr_t ptppaddr;
 	vm_page_t nkpg;
 	pd_entry_t newpdir;
+	pt_entry_t *pde;
 
 	s = splhigh();
 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
@@ -1333,7 +1334,8 @@
 
 		mtx_lock_spin(&allpmaps_lock);
 		LIST_FOREACH(pmap, &allpmaps, pm_list) {
-			*pmap_pde(pmap, kernel_vm_end) = newpdir;
+			pde = pmap_pde(pmap, kernel_vm_end);
+			pde_store(pde, newpdir);
 		}
 		mtx_unlock_spin(&allpmaps_lock);
 		kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
@@ -1728,7 +1730,7 @@
 			pbits &= ~PG_RW;
 
 			if (pbits != *pte) {
-				*pte = pbits;
+				pte_store(pte, pbits);
 				anychanged = 1;
 			}
 		}
@@ -1837,7 +1839,7 @@
 
 		if ((prot & VM_PROT_WRITE) && (origpte & PG_V)) {
 			if ((origpte & PG_RW) == 0) {
-				*pte |= PG_RW;
+				pte_store(pte, origpte | PG_RW);
 				pmap_invalidate_page(pmap, va);
 			}
 			return;
@@ -1906,7 +1908,7 @@
 	 * to update the pte.
 	 */
 	if ((origpte & ~(PG_M|PG_A)) != newpte) {
-		*pte = newpte | PG_A;
+		pte_store(pte, newpte | PG_A);
 		/*if (origpte)*/ {
 			pmap_invalidate_page(pmap, va);
 		}
@@ -2010,9 +2012,9 @@
 	 * Now validate mapping with RO protection
 	 */
 	if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
-		*pte = pa | PG_V | PG_U;
+		pte_store(pte, pa | PG_V | PG_U);
 	else
-		*pte = pa | PG_V | PG_U | PG_MANAGED;
+		pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 
 	return mpte;
 }
@@ -2101,8 +2103,8 @@
 		pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
 		npdes = size >> PDRSHIFT;
 		for(i = 0; i < npdes; i++) {
-			pmap->pm_pdir[ptepindex] =
-			    ptepa | PG_U | PG_RW | PG_V | PG_PS;
+			pde_store(&pmap->pm_pdir[ptepindex],
+			    ptepa | PG_U | PG_RW | PG_V | PG_PS);
 			ptepa += NBPDR;
 			ptepindex += 1;
 		}
@@ -2110,7 +2112,7 @@
 		return;
 	}
 
-	psize = x86_64_btop(size);
+	psize = amd64_btop(size);
 
 	if ((object->type != OBJT_VNODE) ||
 	    ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
@@ -2158,7 +2160,7 @@
 			vm_page_busy(p);
 			vm_page_unlock_queues();
 			mpte = pmap_enter_quick(pmap, 
-				addr + x86_64_ptob(tmpidx), p, mpte);
+				addr + amd64_ptob(tmpidx), p, mpte);
 			vm_page_lock_queues();
 			vm_page_wakeup(p);
 		}
@@ -2584,7 +2586,7 @@
 
 		pv->pv_pmap->pm_stats.resident_count--;
 
-		*pte = 0;
+		pte_clear(pte);
 
 		/*
 		 * Update the vm_page_t clean and reference bits.
@@ -2699,9 +2701,9 @@
 					if (pbits & PG_M) {
 						vm_page_dirty(m);
 					}
-					*pte = pbits & ~(PG_M|PG_RW);
+					pte_store(pte, pbits & ~(PG_M|PG_RW));
 				} else {
-					*pte = pbits & ~bit;
+					pte_store(pte, pbits & ~bit);
 				}
 				pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
 			}
@@ -2746,6 +2748,7 @@
 {
 	register pv_entry_t pv, pvf, pvn;
 	pt_entry_t *pte;
+	pt_entry_t v;
 	int s;
 	int rtval = 0;
 
@@ -2770,9 +2773,8 @@
 
 			pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
 
-			if (pte && (*pte & PG_A)) {
-				*pte &= ~PG_A;
-
+			if (pte && ((v = pte_load(pte)) & PG_A) != 0) {
+				pte_store(pte, v & ~PG_A);
 				pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
 
 				rtval++;
@@ -2885,7 +2887,7 @@
 	size = roundup(offset + size, PAGE_SIZE);
 	for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
 		pte = vtopte(tmpva);
-		*pte = 0;
+		pte_clear(pte);
 	}
 	pmap_invalidate_range(kernel_pmap, va, tmpva);
 	kmem_free(kernel_map, base, size);

==== //depot/projects/hammer/sys/x86_64/x86_64/sys_machdep.c#11 (text+ko) ====

@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  *	from: @(#)sys_machdep.c	5.5 (Berkeley) 1/19/91
- * $FreeBSD: src/sys/i386/i386/sys_machdep.c,v 1.80 2003/04/11 14:45:07 davidxu Exp $
+ * $FreeBSD: src/sys/i386/i386/sys_machdep.c,v 1.81 2003/04/25 20:04:02 jhb Exp $
  *
  */
 
@@ -53,12 +53,14 @@
 	struct thread *td;
 	register struct sysarch_args *uap;
 {
-	int error = 0;
+	int error;
 
+	mtx_lock(&Giant);
 	switch(uap->op) {
 	default:
-		error = EOPNOTSUPP;
+		error = EINVAL;
 		break;
 	}
+	mtx_unlock(&Giant);
 	return (error);
 }



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200304282340.h3SNeq6M001079>