Date: Wed, 12 Feb 2020 11:17:45 +0000 (UTC) From: Mateusz Guzik <mjg@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r357810 - in head/sys: kern sys Message-ID: <202002121117.01CBHjuv086128@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mjg Date: Wed Feb 12 11:17:45 2020 New Revision: 357810 URL: https://svnweb.freebsd.org/changeset/base/357810 Log: vfs: switch to smp_rendezvous_cpus_retry for vfs_op_thread_enter/exit In particular on amd64 this eliminates an atomic op in the common case, trading it for IPIs in the uncommon case of catching CPUs executing the code while the filesystem is getting suspended or unmounted. Modified: head/sys/kern/vfs_mount.c head/sys/kern/vfs_subr.c head/sys/sys/mount.h Modified: head/sys/kern/vfs_mount.c ============================================================================== --- head/sys/kern/vfs_mount.c Wed Feb 12 11:17:18 2020 (r357809) +++ head/sys/kern/vfs_mount.c Wed Feb 12 11:17:45 2020 (r357810) @@ -1441,16 +1441,7 @@ vfs_op_enter(struct mount *mp) MNT_IUNLOCK(mp); return; } - /* - * Paired with a fence in vfs_op_thread_enter(). See the comment - * above it for details. - */ - atomic_thread_fence_seq_cst(); vfs_op_barrier_wait(mp); - /* - * Paired with a fence in vfs_op_thread_exit(). - */ - atomic_thread_fence_acq(); CPU_FOREACH(cpu) { mp->mnt_ref += zpcpu_replace_cpu(mp->mnt_ref_pcpu, 0, cpu); @@ -1484,20 +1475,52 @@ vfs_op_exit(struct mount *mp) MNT_IUNLOCK(mp); } -/* - * It is assumed the caller already posted at least an acquire barrier. - */ +struct vfs_op_barrier_ipi { + struct mount *mp; + struct smp_rendezvous_cpus_retry_arg srcra; +}; + +static void +vfs_op_action_func(void *arg) +{ + struct vfs_op_barrier_ipi *vfsopipi; + struct mount *mp; + + vfsopipi = __containerof(arg, struct vfs_op_barrier_ipi, srcra); + mp = vfsopipi->mp; + + if (!vfs_op_thread_entered(mp)) + smp_rendezvous_cpus_done(arg); +} + +static void +vfs_op_wait_func(void *arg, int cpu) +{ + struct vfs_op_barrier_ipi *vfsopipi; + struct mount *mp; + int *in_op; + + vfsopipi = __containerof(arg, struct vfs_op_barrier_ipi, srcra); + mp = vfsopipi->mp; + + in_op = zpcpu_get_cpu(mp->mnt_thread_in_ops_pcpu, cpu); + while (atomic_load_int(in_op)) + cpu_spinwait(); +} + void vfs_op_barrier_wait(struct mount *mp) { - int *in_op; - int cpu; + struct vfs_op_barrier_ipi vfsopipi; - CPU_FOREACH(cpu) { - in_op = zpcpu_get_cpu(mp->mnt_thread_in_ops_pcpu, cpu); - while (atomic_load_int(in_op)) - cpu_spinwait(); - } + vfsopipi.mp = mp; + + smp_rendezvous_cpus_retry(all_cpus, + smp_no_rendezvous_barrier, + vfs_op_action_func, + smp_no_rendezvous_barrier, + vfs_op_wait_func, + &vfsopipi.srcra); } #ifdef DIAGNOSTIC Modified: head/sys/kern/vfs_subr.c ============================================================================== --- head/sys/kern/vfs_subr.c Wed Feb 12 11:17:18 2020 (r357809) +++ head/sys/kern/vfs_subr.c Wed Feb 12 11:17:45 2020 (r357810) @@ -6049,10 +6049,6 @@ restart: } MNT_IUNLOCK(mp); if (vp != NULL) { - /* - * Paired with a fence in vfs_op_thread_exit(). - */ - atomic_thread_fence_acq(); vfs_op_barrier_wait(mp); vrele(vp); } Modified: head/sys/sys/mount.h ============================================================================== --- head/sys/sys/mount.h Wed Feb 12 11:17:18 2020 (r357809) +++ head/sys/sys/mount.h Wed Feb 12 11:17:45 2020 (r357810) @@ -983,14 +983,9 @@ enum mount_counter { MNT_COUNT_REF, MNT_COUNT_LOCKREF, int vfs_mount_fetch_counter(struct mount *, enum mount_counter); /* - * We mark ourselves as entering the section and post a sequentially consistent - * fence, meaning the store is completed before we get into the section and - * mnt_vfs_ops is only read afterwards. + * Code transitioning mnt_vfs_ops to > 0 issues IPIs until it observes + * all CPUs not executing code enclosed by mnt_thread_in_ops_pcpu. * - * Any thread transitioning the ops counter 0->1 does things in the opposite - * order - first bumps the count, posts a sequentially consistent fence and - * observes all CPUs not executing within the section. - * * This provides an invariant that by the time the last CPU is observed not * executing, everyone else entering will see the counter > 0 and exit. * @@ -1009,7 +1004,7 @@ int vfs_mount_fetch_counter(struct mount *, enum mount critical_enter(); \ MPASS(!vfs_op_thread_entered(mp)); \ zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 1); \ - atomic_thread_fence_seq_cst(); \ + __compiler_membar(); \ if (__predict_false(mp->mnt_vfs_ops > 0)) { \ vfs_op_thread_exit(mp); \ _retval = false; \ @@ -1019,7 +1014,7 @@ int vfs_mount_fetch_counter(struct mount *, enum mount #define vfs_op_thread_exit(mp) do { \ MPASS(vfs_op_thread_entered(mp)); \ - atomic_thread_fence_rel(); \ + __compiler_membar(); \ zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 0); \ critical_exit(); \ } while (0)
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202002121117.01CBHjuv086128>