Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 31 Oct 2012 18:07:18 +0000 (UTC)
From:      Attilio Rao <attilio@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r242402 - in head/sys: kern vm
Message-ID:  <201210311807.q9VI7IcX000993@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: attilio
Date: Wed Oct 31 18:07:18 2012
New Revision: 242402
URL: http://svn.freebsd.org/changeset/base/242402

Log:
  Rework the known mutexes to benefit about staying on their own
  cache line in order to avoid manual frobbing but using
  struct mtx_padalign.
  
  The sole exception being nvme and sxfge drivers, where the author
  redefined CACHE_LINE_SIZE manually, so they need to be analyzed and
  dealt with separately.
  
  Reviwed by:	jimharris, alc

Modified:
  head/sys/kern/kern_timeout.c
  head/sys/kern/sched_ule.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h

Modified: head/sys/kern/kern_timeout.c
==============================================================================
--- head/sys/kern/kern_timeout.c	Wed Oct 31 17:12:12 2012	(r242401)
+++ head/sys/kern/kern_timeout.c	Wed Oct 31 18:07:18 2012	(r242402)
@@ -119,8 +119,8 @@ struct cc_mig_ent {
  *	when the callout should be served.
  */
 struct callout_cpu {
-	struct mtx		cc_lock;
-	struct cc_mig_ent	cc_migrating_entity __aligned(CACHE_LINE_SIZE);
+	struct mtx_padalign	cc_lock;
+	struct cc_mig_ent	cc_migrating_entity;
 	struct callout		*cc_callout;
 	struct callout_tailq	*cc_callwheel;
 	struct callout_list	cc_callfree;

Modified: head/sys/kern/sched_ule.c
==============================================================================
--- head/sys/kern/sched_ule.c	Wed Oct 31 17:12:12 2012	(r242401)
+++ head/sys/kern/sched_ule.c	Wed Oct 31 18:07:18 2012	(r242402)
@@ -228,8 +228,7 @@ struct tdq {
 	 * tdq_lock is padded to avoid false sharing with tdq_load and
 	 * tdq_cpu_idle.
 	 */
-	struct mtx	tdq_lock;		/* run queue lock. */
-	char		pad[64 - sizeof(struct mtx)];
+	struct mtx_padalign tdq_lock;		/* run queue lock. */
 	struct cpu_group *tdq_cg;		/* Pointer to cpu topology. */
 	volatile int	tdq_load;		/* Aggregate load. */
 	volatile int	tdq_cpu_idle;		/* cpu_idle() is active. */
@@ -292,7 +291,7 @@ static struct tdq	tdq_cpu;
 #define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
 #define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
 #define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
-#define	TDQ_LOCKPTR(t)		(&(t)->tdq_lock)
+#define	TDQ_LOCKPTR(t)		((struct mtx *)(&(t)->tdq_lock))
 
 static void sched_priority(struct thread *);
 static void sched_thread_priority(struct thread *, u_char);

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Wed Oct 31 17:12:12 2012	(r242401)
+++ head/sys/vm/vm_page.c	Wed Oct 31 18:07:18 2012	(r242402)
@@ -116,10 +116,10 @@ __FBSDID("$FreeBSD$");
  */
 
 struct vpgqueues vm_page_queues[PQ_COUNT];
-struct vpglocks vm_page_queue_lock;
-struct vpglocks vm_page_queue_free_lock;
+struct mtx_padalign vm_page_queue_mtx;
+struct mtx_padalign vm_page_queue_free_mtx;
 
-struct vpglocks	pa_lock[PA_LOCK_COUNT];
+struct mtx_padalign pa_lock[PA_LOCK_COUNT];
 
 vm_page_t vm_page_array;
 long vm_page_array_size;
@@ -298,7 +298,7 @@ vm_page_startup(vm_offset_t vaddr)
 	    MTX_RECURSE);
 	mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
 	for (i = 0; i < PA_LOCK_COUNT; i++)
-		mtx_init(&pa_lock[i].data, "vm page", NULL, MTX_DEF);
+		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
 
 	/*
 	 * Initialize the queue headers for the hold queue, the active queue,

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h	Wed Oct 31 17:12:12 2012	(r242401)
+++ head/sys/vm/vm_page.h	Wed Oct 31 18:07:18 2012	(r242402)
@@ -187,13 +187,8 @@ struct vpgqueues {
 
 extern struct vpgqueues vm_page_queues[PQ_COUNT];
 
-struct vpglocks {
-	struct mtx	data;
-	char		pad[CACHE_LINE_SIZE - sizeof(struct mtx)];
-} __aligned(CACHE_LINE_SIZE);
-
-extern struct vpglocks vm_page_queue_free_lock;
-extern struct vpglocks pa_lock[];
+extern struct mtx_padalign vm_page_queue_free_mtx;
+extern struct mtx_padalign pa_lock[];
 
 #if defined(__arm__)
 #define	PDRSHIFT	PDR_SHIFT
@@ -202,7 +197,7 @@ extern struct vpglocks pa_lock[];
 #endif
 
 #define	pa_index(pa)	((pa) >> PDRSHIFT)
-#define	PA_LOCKPTR(pa)	&pa_lock[pa_index((pa)) % PA_LOCK_COUNT].data
+#define	PA_LOCKPTR(pa)	((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
 #define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
 #define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
 #define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
@@ -235,8 +230,6 @@ extern struct vpglocks pa_lock[];
 #define	vm_page_lock_assert(m, a)	mtx_assert(vm_page_lockptr((m)), (a))
 #endif
 
-#define	vm_page_queue_free_mtx	vm_page_queue_free_lock.data
-
 /*
  * The vm_page's aflags are updated using atomic operations.  To set or clear
  * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
@@ -327,9 +320,8 @@ vm_page_t vm_phys_paddr_to_vm_page(vm_pa
 
 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
 
-extern struct vpglocks vm_page_queue_lock;
+extern struct mtx_padalign vm_page_queue_mtx;
 
-#define	vm_page_queue_mtx	vm_page_queue_lock.data
 #define vm_page_lock_queues()   mtx_lock(&vm_page_queue_mtx)
 #define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201210311807.q9VI7IcX000993>