Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 29 Jul 2020 19:38:50 +0000 (UTC)
From:      Mark Johnston <markj@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r363671 - head/sys/vm
Message-ID:  <202007291938.06TJcoeA046560@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: markj
Date: Wed Jul 29 19:38:49 2020
New Revision: 363671
URL: https://svnweb.freebsd.org/changeset/base/363671

Log:
  Remove the volatile qualifier from busy_lock.
  
  Use atomic(9) to load the lock state.  Some places were doing this
  already, so it was inconsistent.  In initialization code, the lock state
  is still initialized with plain stores.
  
  Reviewed by:	alc, kib
  Sponsored by:	The FreeBSD Foundation
  Differential Revision:	https://reviews.freebsd.org/D25861

Modified:
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Wed Jul 29 19:36:24 2020	(r363670)
+++ head/sys/vm/vm_page.c	Wed Jul 29 19:38:49 2020	(r363671)
@@ -908,7 +908,7 @@ vm_page_busy_downgrade(vm_page_t m)
 
 	vm_page_assert_xbusied(m);
 
-	x = m->busy_lock;
+	x = vm_page_busy_fetch(m);
 	for (;;) {
 		if (atomic_fcmpset_rel_int(&m->busy_lock,
 		    &x, VPB_SHARERS_WORD(1)))
@@ -931,7 +931,7 @@ vm_page_busy_tryupgrade(vm_page_t m)
 
 	vm_page_assert_sbusied(m);
 
-	x = m->busy_lock;
+	x = vm_page_busy_fetch(m);
 	ce = VPB_CURTHREAD_EXCLUSIVE;
 	for (;;) {
 		if (VPB_SHARERS(x) > 1)
@@ -955,7 +955,7 @@ vm_page_sbusied(vm_page_t m)
 {
 	u_int x;
 
-	x = m->busy_lock;
+	x = vm_page_busy_fetch(m);
 	return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
 }
 
@@ -971,7 +971,7 @@ vm_page_sunbusy(vm_page_t m)
 
 	vm_page_assert_sbusied(m);
 
-	x = m->busy_lock;
+	x = vm_page_busy_fetch(m);
 	for (;;) {
 		KASSERT(x != VPB_FREED,
 		    ("vm_page_sunbusy: Unlocking freed page."));
@@ -1072,7 +1072,7 @@ _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_p
 
 	xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0;
 	sleepq_lock(m);
-	x = atomic_load_int(&m->busy_lock);
+	x = vm_page_busy_fetch(m);
 	do {
 		/*
 		 * If the page changes objects or becomes unlocked we can
@@ -1110,7 +1110,7 @@ vm_page_trysbusy(vm_page_t m)
 	u_int x;
 
 	obj = m->object;
-	x = m->busy_lock;
+	x = vm_page_busy_fetch(m);
 	for (;;) {
 		if ((x & VPB_BIT_SHARED) == 0)
 			return (0);
@@ -1146,7 +1146,7 @@ vm_page_tryxbusy(vm_page_t m)
 {
 	vm_object_t obj;
 
-        if (atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED,
+        if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED,
             VPB_CURTHREAD_EXCLUSIVE) == 0)
 		return (0);
 
@@ -1354,7 +1354,7 @@ vm_page_readahead_finish(vm_page_t m)
 	 * have shown that deactivating the page is usually the best choice,
 	 * unless the page is wanted by another thread.
 	 */
-	if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
+	if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0)
 		vm_page_activate(m);
 	else
 		vm_page_deactivate(m);
@@ -1719,7 +1719,7 @@ vm_page_busy_release(vm_page_t m)
 {
 	u_int x;
 
-	x = atomic_load_int(&m->busy_lock);
+	x = vm_page_busy_fetch(m);
 	for (;;) {
 		if (x == VPB_FREED)
 			break;

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h	Wed Jul 29 19:36:24 2020	(r363670)
+++ head/sys/vm/vm_page.h	Wed Jul 29 19:38:49 2020	(r363671)
@@ -100,7 +100,8 @@
  *	field and is described in detail below.
  *
  *	The following annotations are possible:
- *	(A) the field is atomic and may require additional synchronization.
+ *	(A) the field must be accessed using atomic(9) and may require
+ *	    additional synchronization.
  *	(B) the page busy lock.
  *	(C) the field is immutable.
  *	(F) the per-domain lock for the free queues
@@ -243,8 +244,8 @@ struct vm_page {
 	vm_paddr_t phys_addr;		/* physical address of page (C) */
 	struct md_page md;		/* machine dependent stuff */
 	u_int ref_count;		/* page references (A) */
-	volatile u_int busy_lock;	/* busy owners lock */
-	union vm_page_astate a;		/* state accessed atomically */
+	u_int busy_lock;		/* busy owners lock (A) */
+	union vm_page_astate a;		/* state accessed atomically (A) */
 	uint8_t order;			/* index of the buddy queue (F) */
 	uint8_t pool;			/* vm_phys freepool index (F) */
 	uint8_t flags;			/* page PG_* flags (P) */
@@ -701,6 +702,8 @@ void vm_page_assert_locked_KBI(vm_page_t m, const char
 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
 #endif
 
+#define	vm_page_busy_fetch(m)	atomic_load_int(&(m)->busy_lock)
+
 #define	vm_page_assert_busied(m)					\
 	KASSERT(vm_page_busied(m),					\
 	    ("vm_page_assert_busied: page %p not busy @ %s:%d", \
@@ -712,7 +715,7 @@ void vm_page_lock_assert_KBI(vm_page_t m, int a, const
 	    (m), __FILE__, __LINE__))
 
 #define	vm_page_assert_unbusied(m)					\
-	KASSERT((m->busy_lock & ~VPB_BIT_WAITERS) != 			\
+	KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) !=		\
 	    VPB_CURTHREAD_EXCLUSIVE,					\
 	    ("vm_page_assert_xbusied: page %p busy_lock %#x owned"	\
             " by me @ %s:%d",						\
@@ -725,7 +728,7 @@ void vm_page_lock_assert_KBI(vm_page_t m, int a, const
 } while (0)
 #define	vm_page_assert_xbusied(m) do {					\
 	vm_page_assert_xbusied_unchecked(m);				\
-	KASSERT((m->busy_lock & ~VPB_BIT_WAITERS) == 			\
+	KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) ==		\
 	    VPB_CURTHREAD_EXCLUSIVE,					\
 	    ("vm_page_assert_xbusied: page %p busy_lock %#x not owned"	\
             " by me @ %s:%d",						\
@@ -733,7 +736,7 @@ void vm_page_lock_assert_KBI(vm_page_t m, int a, const
 } while (0)
 
 #define	vm_page_busied(m)						\
-	((m)->busy_lock != VPB_UNBUSIED)
+	(vm_page_busy_fetch(m) != VPB_UNBUSIED)
 
 #define	vm_page_sbusy(m) do {						\
 	if (!vm_page_trysbusy(m))					\
@@ -742,10 +745,10 @@ void vm_page_lock_assert_KBI(vm_page_t m, int a, const
 } while (0)
 
 #define	vm_page_xbusied(m)						\
-	(((m)->busy_lock & VPB_SINGLE_EXCLUSIVE) != 0)
+	((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
 
 #define	vm_page_busy_freed(m)						\
-	((m)->busy_lock == VPB_FREED)
+	(vm_page_busy_fetch(m) == VPB_FREED)
 
 #define	vm_page_xbusy(m) do {						\
 	if (!vm_page_tryxbusy(m))					\
@@ -771,12 +774,17 @@ void vm_page_object_busy_assert(vm_page_t m);
 void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
 #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)				\
 	vm_page_assert_pga_writeable(m, bits)
+/*
+ * Claim ownership of a page's xbusy state.  In non-INVARIANTS kernels this
+ * operation is a no-op since ownership is not tracked.  In particular
+ * this macro does not provide any synchronization with the previous owner.
+ */
 #define	vm_page_xbusy_claim(m) do {					\
 	u_int _busy_lock;						\
 									\
 	vm_page_assert_xbusied_unchecked((m));				\
 	do {								\
-		_busy_lock = atomic_load_int(&(m)->busy_lock);		\
+		_busy_lock = vm_page_busy_fetch(m);			\
 	} while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock,	\
 	    (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \
 } while (0)



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202007291938.06TJcoeA046560>