Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 29 Mar 2023 07:52:31 GMT
From:      =?utf-8?Q?Roger=20Pau=20Monn=C3=A9?= <royger@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: 9f3be3a6ec8e - main - xen: switch to using core atomics for synchronization
Message-ID:  <202303290752.32T7qVDv052068@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by royger:

URL: https://cgit.FreeBSD.org/src/commit/?id=9f3be3a6ec8ec6096e3d6c715695a6560aabbb80

commit 9f3be3a6ec8ec6096e3d6c715695a6560aabbb80
Author:     Elliott Mitchell <ehem+freebsd@m5p.com>
AuthorDate: 2022-01-24 01:19:16 +0000
Commit:     Roger Pau Monné <royger@FreeBSD.org>
CommitDate: 2023-03-29 07:51:42 +0000

    xen: switch to using core atomics for synchronization
    
    Now that the atomic macros are always genuinely atomic on x86, they can
    be used for synchronization with Xen.  A single core VM isn't too
    unusual, but actual single core hardware is uncommon.
    
    Replace an open-coding of evtchn_clear_port() with the inline.
    
    Substantially inspired by work done by Julien Grall <julien@xen.org>,
    2014-01-13 17:40:58.
    
    Reviewed by: royger
    MFC after: 1 week
---
 sys/amd64/include/xen/synch_bitops.h  | 129 -------------------------------
 sys/dev/xen/evtchn/evtchn_dev.c       |   2 -
 sys/dev/xen/grant_table/grant_table.c |  34 ++++-----
 sys/i386/include/xen/synch_bitops.h   | 138 ----------------------------------
 sys/x86/xen/xen_intr.c                |   4 +-
 sys/xen/evtchn/evtchnvar.h            |  19 +++--
 6 files changed, 30 insertions(+), 296 deletions(-)

diff --git a/sys/amd64/include/xen/synch_bitops.h b/sys/amd64/include/xen/synch_bitops.h
deleted file mode 100644
index 746687aa91bd..000000000000
--- a/sys/amd64/include/xen/synch_bitops.h
+++ /dev/null
@@ -1,129 +0,0 @@
-#ifndef __XEN_SYNCH_BITOPS_H__
-#define __XEN_SYNCH_BITOPS_H__
-
-/*
- * Copyright 1992, Linus Torvalds.
- * Heavily modified to provide guaranteed strong synchronisation
- * when communicating with Xen or other guest OSes running on other CPUs.
- */
-
-
-#define ADDR (*(volatile long *) addr)
-
-static __inline__ void synch_set_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ ( 
-        "lock btsl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ (
-        "lock btrl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_change_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ (
-        "lock btcl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "lock btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "lock btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-
-    __asm__ __volatile__ (
-        "lock btcl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-struct __synch_xchg_dummy { unsigned long a[100]; };
-#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x))
-
-#define synch_cmpxchg(ptr, old, new) \
-((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
-                                     (unsigned long)(old), \
-                                     (unsigned long)(new), \
-                                     sizeof(*(ptr))))
-
-static inline unsigned long __synch_cmpxchg(volatile void *ptr,
-					    unsigned long old,
-					    unsigned long new, int size)
-{
-	unsigned long prev;
-	switch (size) {
-	case 1:
-		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__synch_xg(ptr)),
-				       "0"(old)
-				     : "memory");
-		return prev;
-	case 2:
-		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__synch_xg(ptr)),
-				       "0"(old)
-				     : "memory");
-		return prev;
-	case 4:
-		__asm__ __volatile__("lock; cmpxchgl %k1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__synch_xg(ptr)),
-				       "0"(old)
-				     : "memory");
-		return prev;
-	case 8:
-		__asm__ __volatile__("lock; cmpxchgq %1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__synch_xg(ptr)),
-				       "0"(old)
-				     : "memory");
-		return prev;
-	}
-	return old;
-}
-
-static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
-{
-    return ((1UL << (nr & 31)) & 
-            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "btl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
-    return oldbit;
-}
-
-#define synch_test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- synch_const_test_bit((nr),(addr)) : \
- synch_var_test_bit((nr),(addr)))
-
-#endif /* __XEN_SYNCH_BITOPS_H__ */
diff --git a/sys/dev/xen/evtchn/evtchn_dev.c b/sys/dev/xen/evtchn/evtchn_dev.c
index 34f481358008..a217120222e6 100644
--- a/sys/dev/xen/evtchn/evtchn_dev.c
+++ b/sys/dev/xen/evtchn/evtchn_dev.c
@@ -58,8 +58,6 @@ __FBSDID("$FreeBSD$");
 #include <sys/filio.h>
 #include <sys/vnode.h>
 
-#include <machine/xen/synch_bitops.h>
-
 #include <xen/xen-os.h>
 #include <xen/evtchn.h>
 #include <xen/xen_intr.h>
diff --git a/sys/dev/xen/grant_table/grant_table.c b/sys/dev/xen/grant_table/grant_table.c
index fdbec8ac14e8..b5bbe04977a2 100644
--- a/sys/dev/xen/grant_table/grant_table.c
+++ b/sys/dev/xen/grant_table/grant_table.c
@@ -28,9 +28,6 @@ __FBSDID("$FreeBSD$");
 #include <machine/cpu.h>
 
 #include <xen/xen-os.h>
-#include <xen/hypervisor.h>
-#include <machine/xen/synch_bitops.h>
-
 #include <xen/hypervisor.h>
 #include <xen/gnttab.h>
 
@@ -182,18 +179,15 @@ gnttab_query_foreign_access(grant_ref_t ref)
 int
 gnttab_end_foreign_access_ref(grant_ref_t ref)
 {
-	uint16_t flags, nflags;
+	uint16_t flags;
 
-	nflags = shared[ref].flags;
-	do {
-		if ( (flags = nflags) & (GTF_reading|GTF_writing) ) {
-			printf("%s: WARNING: g.e. still in use!\n", __func__);
-			return (0);
-		}
-	} while ((nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) !=
-	       flags);
+	while (!((flags = atomic_load_16(&shared[ref].flags)) &
+	    (GTF_reading|GTF_writing)))
+		if (atomic_cmpset_16(&shared[ref].flags, flags, 0))
+			return (1);
 
-	return (1);
+	printf("%s: WARNING: g.e. still in use!\n", __func__);
+	return (0);
 }
 
 void
@@ -284,17 +278,21 @@ gnttab_end_foreign_transfer_ref(grant_ref_t ref)
 	/*
          * If a transfer is not even yet started, try to reclaim the grant
          * reference and return failure (== 0).
+	 *
+	 * NOTE: This is a loop since the atomic cmpset can fail multiple
+	 * times.  In normal operation it will be rare to execute more than
+	 * twice.  Attempting an attack would consume a great deal of
+	 * attacker resources and be unlikely to prolong the loop very much.
          */
-	while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
-		if ( synch_cmpxchg(&shared[ref].flags, flags, 0) == flags )
+	while (!((flags = atomic_load_16(&shared[ref].flags)) &
+	    GTF_transfer_committed))
+		if (atomic_cmpset_16(&shared[ref].flags, flags, 0))
 			return (0);
-		cpu_spinwait();
-	}
 
 	/* If a transfer is in progress then wait until it is completed. */
 	while (!(flags & GTF_transfer_completed)) {
-		flags = shared[ref].flags;
 		cpu_spinwait();
+		flags = atomic_load_16(&shared[ref].flags);
 	}
 
 	/* Read the frame number /after/ reading completion status. */
diff --git a/sys/i386/include/xen/synch_bitops.h b/sys/i386/include/xen/synch_bitops.h
deleted file mode 100644
index 696bc6fa43f4..000000000000
--- a/sys/i386/include/xen/synch_bitops.h
+++ /dev/null
@@ -1,138 +0,0 @@
-#ifndef __XEN_SYNCH_BITOPS_H__
-#define __XEN_SYNCH_BITOPS_H__
-
-/*
- * Copyright 1992, Linus Torvalds.
- * Heavily modified to provide guaranteed strong synchronisation
- * when communicating with Xen or other guest OSes running on other CPUs.
- */
-
-#define ADDR (*(volatile long *) addr)
-
-static __inline__ void synch_set_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ ( 
-        "lock btsl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ (
-        "lock btrl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_change_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ (
-        "lock btcl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "lock btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "lock btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-
-    __asm__ __volatile__ (
-        "lock btcl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-struct __synch_xchg_dummy { unsigned long a[100]; };
-#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x))
-
-#define synch_cmpxchg(ptr, old, new) \
-((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
-                                     (unsigned long)(old), \
-                                     (unsigned long)(new), \
-                                     sizeof(*(ptr))))
-
-static inline unsigned long __synch_cmpxchg(volatile void *ptr,
-					    unsigned long old,
-					    unsigned long new, int size)
-{
-	unsigned long prev;
-	switch (size) {
-	case 1:
-		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__synch_xg(ptr)),
-				       "0"(old)
-				     : "memory");
-		return prev;
-	case 2:
-		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__synch_xg(ptr)),
-				       "0"(old)
-				     : "memory");
-		return prev;
-#ifdef CONFIG_X86_64
-	case 4:
-		__asm__ __volatile__("lock; cmpxchgl %k1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__synch_xg(ptr)),
-				       "0"(old)
-				     : "memory");
-		return prev;
-	case 8:
-		__asm__ __volatile__("lock; cmpxchgq %1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__synch_xg(ptr)),
-				       "0"(old)
-				     : "memory");
-		return prev;
-#else
-	case 4:
-		__asm__ __volatile__("lock; cmpxchgl %1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__synch_xg(ptr)),
-				       "0"(old)
-				     : "memory");
-		return prev;
-#endif
-	}
-	return old;
-}
-
-static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
-{
-    return ((1UL << (nr & 31)) & 
-            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "btl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
-    return oldbit;
-}
-
-#define synch_test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- synch_const_test_bit((nr),(addr)) : \
- synch_var_test_bit((nr),(addr)))
-
-#endif /* __XEN_SYNCH_BITOPS_H__ */
diff --git a/sys/x86/xen/xen_intr.c b/sys/x86/xen/xen_intr.c
index cf4560b6b5fb..8d5562e21018 100644
--- a/sys/x86/xen/xen_intr.c
+++ b/sys/x86/xen/xen_intr.c
@@ -57,8 +57,6 @@ __FBSDID("$FreeBSD$");
 #include <machine/smp.h>
 #include <machine/stdarg.h>
 
-#include <machine/xen/synch_bitops.h>
-
 #include <xen/xen-os.h>
 #include <xen/hvm.h>
 #include <xen/hypervisor.h>
@@ -576,7 +574,7 @@ xen_intr_handle_upcall(struct trapframe *trap_frame)
 
 			/* process port */
 			port = (l1i * LONG_BIT) + l2i;
-			synch_clear_bit(port, &s->evtchn_pending[0]);
+			evtchn_clear_port(port);
 
 			isrc = xen_intr_port_to_isrc[port];
 			if (__predict_false(isrc == NULL))
diff --git a/sys/xen/evtchn/evtchnvar.h b/sys/xen/evtchn/evtchnvar.h
index 1f78755115ac..d1438846594f 100644
--- a/sys/xen/evtchn/evtchnvar.h
+++ b/sys/xen/evtchn/evtchnvar.h
@@ -6,6 +6,7 @@
  * 
  * Copyright (c) 2004, K A Fraser
  * Copyright (c) 2012, Spectra Logic Corporation
+ * Copyright © 2022, Elliott Mitchell
  *
  * This file may be distributed separately from the Linux kernel, or
  * incorporated into other software packages, subject to the following license:
@@ -48,6 +49,12 @@ enum evtchn_type {
 /** Submit a port notification for delivery to a userland evtchn consumer */
 void evtchn_device_upcall(evtchn_port_t port);
 
+/* Macros for accessing event channel values */
+#define	EVTCHN_PTR(type, port) \
+	(HYPERVISOR_shared_info->evtchn_##type + ((port) / __LONG_BIT))
+#define	EVTCHN_BIT(port)	((port) & (__LONG_BIT - 1))
+#define	EVTCHN_MASK(port)	(1UL << EVTCHN_BIT(port))
+
 /**
  * Disable signal delivery for an event channel port, returning its
  * previous mask state.
@@ -59,8 +66,9 @@ void evtchn_device_upcall(evtchn_port_t port);
 static inline int
 evtchn_test_and_set_mask(evtchn_port_t port)
 {
-	shared_info_t *s = HYPERVISOR_shared_info;
-	return synch_test_and_set_bit(port, s->evtchn_mask);
+
+	return (atomic_testandset_long(EVTCHN_PTR(mask, port),
+	    EVTCHN_BIT(port)));
 }
 
 /**
@@ -71,8 +79,8 @@ evtchn_test_and_set_mask(evtchn_port_t port)
 static inline void 
 evtchn_clear_port(evtchn_port_t port)
 {
-	shared_info_t *s = HYPERVISOR_shared_info;
-	synch_clear_bit(port, &s->evtchn_pending[0]);
+
+	atomic_clear_long(EVTCHN_PTR(pending, port), EVTCHN_MASK(port));
 }
 
 /**
@@ -83,9 +91,8 @@ evtchn_clear_port(evtchn_port_t port)
 static inline void
 evtchn_mask_port(evtchn_port_t port)
 {
-	shared_info_t *s = HYPERVISOR_shared_info;
 
-	synch_set_bit(port, &s->evtchn_mask[0]);
+	atomic_set_long(EVTCHN_PTR(mask, port), EVTCHN_MASK(port));
 }
 
 /**



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202303290752.32T7qVDv052068>