Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 28 Aug 2019 19:40:57 +0000 (UTC)
From:      Mateusz Guzik <mjg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r351577 - head/sys/amd64/amd64
Message-ID:  <201908281940.x7SJevd4041816@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mjg
Date: Wed Aug 28 19:40:57 2019
New Revision: 351577
URL: https://svnweb.freebsd.org/changeset/base/351577

Log:
  amd64: clean up cpu_switch.S
  
  - LK macro (conditional on SMP for the lock prefix) is unused
  - SETLK unnecessarily performs xchg. obtained value is never used and the
    implicit lock prefix adds avoidable cost. Barrier provided by it does
    not appear to be of any use.
  - the lock waited for is almost never blocked, yet the loop starts with
    a pause. Move it out of the common case.
  
  Reviewed by:	kib
  Sponsored by:	The FreeBSD Foundation
  Differential Revision:	https://reviews.freebsd.org/D19563

Modified:
  head/sys/amd64/amd64/cpu_switch.S

Modified: head/sys/amd64/amd64/cpu_switch.S
==============================================================================
--- head/sys/amd64/amd64/cpu_switch.S	Wed Aug 28 19:28:27 2019	(r351576)
+++ head/sys/amd64/amd64/cpu_switch.S	Wed Aug 28 19:40:57 2019	(r351577)
@@ -45,18 +45,6 @@
 
 	.text
 
-#ifdef SMP
-#define LK	lock ;
-#else
-#define LK
-#endif
-
-#if defined(SCHED_ULE) && defined(SMP)
-#define	SETLK	xchgq
-#else
-#define	SETLK	movq
-#endif
-
 /*
  * cpu_throw()
  *
@@ -150,17 +138,15 @@ ctx_switch_xsave:
 	movq	%rdx,%r15
 	movq	%rsi,%rdi
 	callq	pmap_activate_sw
-	SETLK	%r15,TD_LOCK(%r13)		/* Release the old thread */
+	movq	%r15,TD_LOCK(%r13)		/* Release the old thread */
 sw1:
 	movq	TD_PCB(%r12),%r8
 #if defined(SCHED_ULE) && defined(SMP)
-	/* Wait for the new thread to become unblocked */
 	movq	$blocked_lock, %rdx
-1:
 	movq	TD_LOCK(%r12),%rcx
 	cmpq	%rcx, %rdx
-	pause
-	je	1b
+	je	sw1wait
+sw1cont:
 #endif
 	/*
 	 * At this point, we've switched address spaces and are ready
@@ -496,3 +482,14 @@ ENTRY(resumectx)
 	xorl	%eax,%eax
 	ret
 END(resumectx)
+
+/* Wait for the new thread to become unblocked */
+#if defined(SCHED_ULE) && defined(SMP)
+sw1wait:
+1:
+	pause
+	movq	TD_LOCK(%r12),%rcx
+	cmpq	%rcx, %rdx
+	je	1b
+	jmp	sw1cont
+#endif



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201908281940.x7SJevd4041816>