Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 20 Jun 2005 21:14:02 GMT
From:      John Baldwin <jhb@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 78747 for review
Message-ID:  <200506202114.j5KLE2eL075011@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=78747

Change 78747 by jhb@jhb_slimer on 2005/06/20 21:13:22

	Compile.

Affected files ...

.. //depot/projects/smpng/sys/i386/include/atomic.h#34 edit
.. //depot/projects/smpng/sys/kern/kern_exit.c#96 edit
.. //depot/projects/smpng/sys/kern/kern_kse.c#21 edit
.. //depot/projects/smpng/sys/kern/kern_shutdown.c#58 edit

Differences ...

==== //depot/projects/smpng/sys/i386/include/atomic.h#34 (text+ko) ====

@@ -298,7 +298,6 @@
 #define	atomic_add_rel_long		atomic_add_long
 #define	atomic_subtract_acq_long	atomic_subtract_long
 #define	atomic_subtract_rel_long	atomic_subtract_long
-#define	atomic_cmpset_long		atomic_cmpset_int
 #define	atomic_cmpset_acq_long		atomic_cmpset_acq_int
 #define	atomic_cmpset_rel_long		atomic_cmpset_rel_int
 
@@ -358,14 +357,14 @@
 #define	atomic_readandclear_32	atomic_readandclear_int
 
 /* Operations on longs. */
-#define	atomic_set_long(p, v)		atomic_set_int((u_int *)(p), (v))
-#define	atomic_clear_long(p, v)		atomic_clear_int((u_int *)(p), (v))
-#define	atomic_add_long(p, v)		atomic_add_int((u_int *)(p), (v))
-#define	atomic_subtract_long(p, v)	atomic_subtract_int((u_int *)(p), (v))
-#define	atomic_cmpset_long(d, e, s)	atomic_cmpset_int((u_int *)(d), (e), (s))
-#define	atomic_load_acq_long(p)		atomic_load_acq_int((u_int *)(p))
-#define	atomic_store_rel_long(p, v)	atomic_store_rel_int((u_int *)(p), (v))
-#define	atomic_readandclear_long(p)	atomic_readandclear_int((u_int *)(p))
+#define	atomic_set_long(p, v)		atomic_set_int((volatile u_int *)(p), (v))
+#define	atomic_clear_long(p, v)		atomic_clear_int((volatile u_int *)(p), (v))
+#define	atomic_add_long(p, v)		atomic_add_int((volatile u_int *)(p), (v))
+#define	atomic_subtract_long(p, v)	atomic_subtract_int((volatile u_int *)(p), (v))
+#define	atomic_cmpset_long(d, e, s)	atomic_cmpset_int((volatile u_int *)(d), (e), (s))
+#define	atomic_load_acq_long(p)		atomic_load_acq_int((volatile u_int *)(p))
+#define	atomic_store_rel_long(p, v)	atomic_store_rel_int((volatile u_int *)(p), (v))
+#define	atomic_readandclear_long(p)	atomic_readandclear_int((volatile u_int *)(p))
 
 #if !defined(WANT_FUNCTIONS)
 static __inline int
@@ -435,24 +434,8 @@
 	return (result);
 }
 
-static __inline u_long
-atomic_readandclear_long(volatile u_long *addr)
-{
-	u_long result;
-
-	__asm __volatile (
-	"	xorl	%0,%0 ;		"
-	"	xchgl	%1,%0 ;		"
-	"# atomic_readandclear_int"
-	: "=&r" (result)		/* 0 (result) */
-	: "m" (*addr));			/* 1 (addr) */
-
-	return (result);
-}
-
 #else /* !__GNUCLIKE_ASM */
 
-extern u_long	atomic_readandclear_long(volatile u_long *);
 extern u_int	atomic_readandclear_int(volatile u_int *);
 
 #endif /* __GNUCLIKE_ASM */

==== //depot/projects/smpng/sys/kern/kern_exit.c#96 (text+ko) ====

@@ -505,8 +505,8 @@
 	wakeup(p->p_pptr);
 	
 	PROC_UNLOCK(p->p_pptr);
-	WITNESS_WARN(WARN_PANIC, &p->p_mtx, "process (pid %d) exiting",
-	    p->p_pid);
+	WITNESS_WARN(WARN_PANIC, &p->p_mtx.mtx_object,
+	    "process (pid %d) exiting", p->p_pid);
 	mtx_lock_spin(&sched_lock);
 	critical_exit();
 

==== //depot/projects/smpng/sys/kern/kern_kse.c#21 (text+ko) ====

@@ -1313,7 +1313,7 @@
 		PROC_LOCK(p);
 		if (kg->kg_upsleeps)
 			wakeup(&kg->kg_completed);
-		WITNESS_WARN(WARN_PANIC, &p->p_mtx,
+		WITNESS_WARN(WARN_PANIC, &p->p_mtx.mtx_object,
 		    "thread exiting in userret");
 		mtx_lock_spin(&sched_lock);
 		thread_stopped(p);

==== //depot/projects/smpng/sys/kern/kern_shutdown.c#58 (text+ko) ====

@@ -474,7 +474,7 @@
 }
 
 #ifdef SMP
-static volatile uintptr_t panic_thread;
+static volatile void *panic_thread;
 #endif
 
 /*
@@ -492,7 +492,7 @@
 	va_list ap;
 	static char buf[256];
 #ifdef SMP
-	uintptr_t tid;
+	void *tid;
 
 	/*
 	 * We don't want multiple CPU's to panic at the same time, so we
@@ -500,7 +500,7 @@
 	 * panic_thread if we are spinning in case the panic on the first
 	 * CPU is canceled.
 	 */
-	tid = (uintptr_t)td;
+	tid = td;
 	if (panic_thread != tid)
 		while (atomic_cmpset_ptr(&panic_thread, 0, tid) == 0)
 			while (panic_thread != 0)



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200506202114.j5KLE2eL075011>