Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 11 Sep 2003 16:53:44 -0700 (PDT)
From:      Peter Wemm <peter@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 37948 for review
Message-ID:  <200309112353.h8BNri64062266@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=37948

Change 37948 by peter@peter_work on 2003/09/11 16:53:27

	change pmap->pm_active to an int.  the MI cpumask is an int
	and having it a long just makes more work in things like cpu_switch
	and messes up the lazy shootdown stuff.

Affected files ...

.. //depot/projects/hammer/sys/amd64/amd64/cpu_switch.S#9 edit
.. //depot/projects/hammer/sys/amd64/amd64/mpboot.s#3 edit
.. //depot/projects/hammer/sys/amd64/amd64/pmap.c#33 edit
.. //depot/projects/hammer/sys/amd64/include/pcpu.h#9 edit
.. //depot/projects/hammer/sys/amd64/include/pmap.h#19 edit

Differences ...

==== //depot/projects/hammer/sys/amd64/amd64/cpu_switch.S#9 (text+ko) ====

@@ -58,7 +58,6 @@
  * %rsi = newtd
  */
 ENTRY(cpu_throw)
-	xorq	%rax, %rax
 	movl	PCPU(CPUID), %eax
 	testq	%rdi,%rdi			/* no thread? */
 	jz	1f
@@ -68,7 +67,7 @@
 #ifdef SMP
 	lock
 #endif
-	btrq	%rax, VM_PMAP+PM_ACTIVE(%rdx)	/* clear old */
+	btrl	%eax, VM_PMAP+PM_ACTIVE(%rdx)	/* clear old */
 1:
 	movq	TD_PCB(%rsi),%rdx		/* newtd->td_proc */
 	movq	PCB_CR3(%rdx),%rdx
@@ -79,7 +78,7 @@
 #ifdef SMP
 	lock
 #endif
-	btsq	%rax, VM_PMAP+PM_ACTIVE(%rdx)	/* set new */
+	btsl	%eax, VM_PMAP+PM_ACTIVE(%rdx)	/* set new */
 	jmp	sw1
 
 /*
@@ -148,7 +147,6 @@
 	jz	badsw3				/* no, panic */
 #endif
 	movq	TD_PCB(%rsi),%r8
-	xorq	%rax, %rax
 	movl	PCPU(CPUID), %eax
 
 	/* switch address space */
@@ -161,7 +159,7 @@
 #ifdef SMP
 	lock
 #endif
-	btrq	%rax, VM_PMAP+PM_ACTIVE(%rdx)	/* clear old */
+	btrl	%eax, VM_PMAP+PM_ACTIVE(%rdx)	/* clear old */
 
 	/* Set bit in new pmap->pm_active */
 	movq	TD_PROC(%rsi),%rdx		/* newproc */
@@ -169,7 +167,7 @@
 #ifdef SMP
 	lock
 #endif
-	btsq	%rax, VM_PMAP+PM_ACTIVE(%rdx)	/* set new */
+	btsl	%eax, VM_PMAP+PM_ACTIVE(%rdx)	/* set new */
 
 sw1:
 	/*

==== //depot/projects/hammer/sys/amd64/amd64/mpboot.s#3 (text+ko) ====

@@ -42,8 +42,6 @@
 
 #include "assym.s"
 
-#define	R(x)	((x)-KERNBASE)
-
 /*
  * the APs enter here from their trampoline code (bootMP, below)
  */

==== //depot/projects/hammer/sys/amd64/amd64/pmap.c#33 (text+ko) ====

@@ -698,7 +698,7 @@
 	u_int other_cpus;
 
 	if (smp_started) {
-		if (!(read_eflags() & PSL_I))
+		if (!(read_rflags() & PSL_I))
 			panic("%s: interrupts disabled", __func__);
 		mtx_lock_spin(&smp_tlb_mtx);
 	} else
@@ -734,7 +734,7 @@
 	vm_offset_t addr;
 
 	if (smp_started) {
-		if (!(read_eflags() & PSL_I))
+		if (!(read_rflags() & PSL_I))
 			panic("%s: interrupts disabled", __func__);
 		mtx_lock_spin(&smp_tlb_mtx);
 	} else
@@ -772,7 +772,7 @@
 	u_int other_cpus;
 
 	if (smp_started) {
-		if (!(read_eflags() & PSL_I))
+		if (!(read_rflags() & PSL_I))
 			panic("%s: interrupts disabled", __func__);
 		mtx_lock_spin(&smp_tlb_mtx);
 	} else
@@ -1368,7 +1368,7 @@
  * trying to dispose of.  This can be a bit hairy.
  */
 static u_int *lazymask;
-static u_int lazyptd;
+static register_t lazyptd;
 static volatile u_int lazywait;
 
 void pmap_lazyfix_action(void);
@@ -1405,11 +1405,7 @@
 		spins = 50000000;
 		mask = mask & -mask;	/* Find least significant set bit */
 		mtx_lock_spin(&lazypmap_lock);
-#ifdef PAE
-		lazyptd = vtophys(pmap->pm_pdpt);
-#else
-		lazyptd = vtophys(pmap->pm_pdir);
-#endif
+		lazyptd = vtophys(pmap->pm_pml4);
 		if (mask == mymask) {
 			lazymask = &pmap->pm_active;
 			pmap_lazyfix_self(mymask);
@@ -1441,9 +1437,9 @@
 static void
 pmap_lazyfix(pmap_t pmap)
 {
-	u_int cr3;
+	u_long cr3;
 
-	cr3 = vtophys(pmap->pm_pdir);
+	cr3 = vtophys(pmap->pm_pml4);
 	if (cr3 == rcr3()) {
 		load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 		pmap->pm_active &= ~(PCPU_GET(cpumask));
@@ -3117,8 +3113,8 @@
 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
 	oldpmap = PCPU_GET(curpmap);
 #ifdef SMP
-	atomic_clear_long(&oldpmap->pm_active, PCPU_GET(cpumask));
-	atomic_set_long(&pmap->pm_active, PCPU_GET(cpumask));
+	atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
+	atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
 #else
 	oldpmap->pm_active &= ~PCPU_GET(cpumask);
 	pmap->pm_active |= PCPU_GET(cpumask);

==== //depot/projects/hammer/sys/amd64/include/pcpu.h#9 (text+ko) ====

@@ -41,7 +41,6 @@
 #define	PCPU_MD_FIELDS							\
 	struct	pcpu *pc_prvspace;	/* Self-reference */		\
 	register_t pc_scratch_rsp;	/* User %rsp in syscall */
-	XXXX add rest here
 	u_int	pc_apic_id;
 
 #if defined(lint)

==== //depot/projects/hammer/sys/amd64/include/pmap.h#19 (text+ko) ====

@@ -221,7 +221,8 @@
 	pml4_entry_t		*pm_pml4;	/* KVA of level 4 page table */
 	vm_object_t		pm_pteobj;	/* Container for pte's */
 	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
-	u_long			pm_active;	/* active on cpus */
+	u_int			pm_active;	/* active on cpus */
+	/* spare u_int here due to padding */
 	struct pmap_statistics	pm_stats;	/* pmap statistics */
 	LIST_ENTRY(pmap) 	pm_list;	/* List of all pmaps */
 };



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200309112353.h8BNri64062266>