Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 15 May 2021 20:55:28 GMT
From:      Mateusz Guzik <mjg@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: eec2e4ef7f96 - main - tmpfs: reimplement the mtime scan to use the lazy list
Message-ID:  <202105152055.14FKtSRD088287@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by mjg:

URL: https://cgit.FreeBSD.org/src/commit/?id=eec2e4ef7f964d18fcec3dc2cdcd7530be490835

commit eec2e4ef7f964d18fcec3dc2cdcd7530be490835
Author:     Mateusz Guzik <mjg@FreeBSD.org>
AuthorDate: 2021-05-07 14:43:43 +0000
Commit:     Mateusz Guzik <mjg@FreeBSD.org>
CommitDate: 2021-05-15 20:48:45 +0000

    tmpfs: reimplement the mtime scan to use the lazy list
    
    Tested by:      pho
    Reviewed by:    kib, markj
    Differential Revision:  https://reviews.freebsd.org/D30065
---
 sys/fs/tmpfs/tmpfs.h        |   1 +
 sys/fs/tmpfs/tmpfs_subr.c   | 107 ++++++++++++++++++++++++++++++++++++++++++++
 sys/fs/tmpfs/tmpfs_vfsops.c |  67 ++++++++++++++-------------
 3 files changed, 143 insertions(+), 32 deletions(-)

diff --git a/sys/fs/tmpfs/tmpfs.h b/sys/fs/tmpfs/tmpfs.h
index bb777e29e3d0..99368d67aaaa 100644
--- a/sys/fs/tmpfs/tmpfs.h
+++ b/sys/fs/tmpfs/tmpfs.h
@@ -46,6 +46,7 @@ MALLOC_DECLARE(M_TMPFSNAME);
 #endif
 
 #define	OBJ_TMPFS	OBJ_PAGERPRIV1	/* has tmpfs vnode allocated */
+#define	OBJ_TMPFS_VREF	OBJ_PAGERPRIV2	/* vnode is referenced */
 
 /*
  * Internal representation of a tmpfs directory entry.
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 67eb12598e24..8b75c58d69a2 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -99,6 +99,92 @@ tmpfs_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
 	return (object);
 }
 
+/*
+ * Make sure tmpfs vnodes with writable mappings can be found on the lazy list.
+ *
+ * This allows for periodic mtime updates while only scanning vnodes which are
+ * plausibly dirty, see tmpfs_update_mtime_lazy.
+ */
+static void
+tmpfs_pager_writecount_recalc(vm_object_t object, vm_offset_t old,
+    vm_offset_t new)
+{
+	struct vnode *vp;
+
+	VM_OBJECT_ASSERT_WLOCKED(object);
+
+	vp = object->un_pager.swp.swp_tmpfs;
+
+	/*
+	 * Forced unmount?
+	 */
+	if (vp == NULL) {
+		KASSERT((object->flags & OBJ_TMPFS_VREF) == 0,
+		    ("object %p with OBJ_TMPFS_VREF but without vnode", object));
+		VM_OBJECT_WUNLOCK(object);
+		return;
+	}
+
+	if (old == 0) {
+		VNASSERT((object->flags & OBJ_TMPFS_VREF) == 0, vp,
+		    ("object without writable mappings has a reference"));
+		VNPASS(vp->v_usecount > 0, vp);
+	} else {
+		VNASSERT((object->flags & OBJ_TMPFS_VREF) != 0, vp,
+		    ("object with writable mappings does not have a reference"));
+	}
+
+	if (old == new) {
+		VM_OBJECT_WUNLOCK(object);
+		return;
+	}
+
+	if (new == 0) {
+		vm_object_clear_flag(object, OBJ_TMPFS_VREF);
+		VM_OBJECT_WUNLOCK(object);
+		vrele(vp);
+	} else {
+		if ((object->flags & OBJ_TMPFS_VREF) == 0) {
+			vref(vp);
+			vlazy(vp);
+			vm_object_set_flag(object, OBJ_TMPFS_VREF);
+		}
+		VM_OBJECT_WUNLOCK(object);
+	}
+}
+
+static void
+tmpfs_pager_update_writecount(vm_object_t object, vm_offset_t start,
+    vm_offset_t end)
+{
+	vm_offset_t new, old;
+
+	VM_OBJECT_WLOCK(object);
+	KASSERT((object->flags & OBJ_ANON) == 0,
+	    ("%s: object %p with OBJ_ANON", __func__, object));
+	old = object->un_pager.swp.writemappings;
+	object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
+	new = object->un_pager.swp.writemappings;
+	tmpfs_pager_writecount_recalc(object, old, new);
+	VM_OBJECT_ASSERT_UNLOCKED(object);
+}
+
+static void
+tmpfs_pager_release_writecount(vm_object_t object, vm_offset_t start,
+    vm_offset_t end)
+{
+	vm_offset_t new, old;
+
+	VM_OBJECT_WLOCK(object);
+	KASSERT((object->flags & OBJ_ANON) == 0,
+	    ("%s: object %p with OBJ_ANON", __func__, object));
+	old = object->un_pager.swp.writemappings;
+	object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
+	new = object->un_pager.swp.writemappings;
+	tmpfs_pager_writecount_recalc(object, old, new);
+	VM_OBJECT_ASSERT_UNLOCKED(object);
+}
+
 static void
 tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
 {
@@ -131,6 +217,8 @@ struct pagerops tmpfs_pager_ops = {
 	.pgo_kvme_type = KVME_TYPE_VNODE,
 	.pgo_alloc = tmpfs_pager_alloc,
 	.pgo_set_writeable_dirty = vm_object_set_writeable_dirty_,
+	.pgo_update_writecount = tmpfs_pager_update_writecount,
+	.pgo_release_writecount = tmpfs_pager_release_writecount,
 	.pgo_mightbedirty = vm_object_mightbedirty_,
 	.pgo_getvp = tmpfs_pager_getvp,
 };
@@ -643,6 +731,7 @@ tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
 void
 tmpfs_destroy_vobject(struct vnode *vp, vm_object_t obj)
 {
+	bool want_vrele;
 
 	ASSERT_VOP_ELOCKED(vp, "tmpfs_destroy_vobject");
 	if (vp->v_type != VREG || obj == NULL)
@@ -650,12 +739,24 @@ tmpfs_destroy_vobject(struct vnode *vp, vm_object_t obj)
 
 	VM_OBJECT_WLOCK(obj);
 	VI_LOCK(vp);
+	/*
+	 * May be going through forced unmount.
+	 */
+	want_vrele = false;
+	if ((obj->flags & OBJ_TMPFS_VREF) != 0) {
+		vm_object_clear_flag(obj, OBJ_TMPFS_VREF);
+		want_vrele = true;
+	}
+
 	vm_object_clear_flag(obj, OBJ_TMPFS);
 	obj->un_pager.swp.swp_tmpfs = NULL;
 	if (vp->v_writecount < 0)
 		vp->v_writecount = 0;
 	VI_UNLOCK(vp);
 	VM_OBJECT_WUNLOCK(obj);
+	if (want_vrele) {
+		vrele(vp);
+	}
 }
 
 /*
@@ -792,6 +893,12 @@ loop:
 	case VREG:
 		object = node->tn_reg.tn_aobj;
 		VM_OBJECT_WLOCK(object);
+		KASSERT((object->flags & OBJ_TMPFS_VREF) == 0,
+		    ("%s: object %p with OBJ_TMPFS_VREF but without vnode",
+		    __func__, object));
+		KASSERT(object->un_pager.swp.writemappings == 0,
+		    ("%s: object %p has writemappings",
+		    __func__, object));
 		VI_LOCK(vp);
 		KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs"));
 		vp->v_object = object;
diff --git a/sys/fs/tmpfs/tmpfs_vfsops.c b/sys/fs/tmpfs/tmpfs_vfsops.c
index 4f29b5dfc6f0..7dffb9027946 100644
--- a/sys/fs/tmpfs/tmpfs_vfsops.c
+++ b/sys/fs/tmpfs/tmpfs_vfsops.c
@@ -99,18 +99,38 @@ static const char *tmpfs_updateopts[] = {
 	"from", "export", "nomtime", "size", NULL
 };
 
-/*
- * Handle updates of time from writes to mmaped regions, if allowed.
- * Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since
- * unmap of the tmpfs-backed vnode does not call vinactive(), due to
- * vm object type is basically OBJT_SWAP.  If lazy, only handle
- * delayed update of mtime due to the writes to mapped files.
- */
+static int
+tmpfs_update_mtime_lazy_filter(struct vnode *vp, void *arg)
+{
+	struct vm_object *obj;
+
+	if (vp->v_type != VREG)
+		return (0);
+
+	obj = atomic_load_ptr(&vp->v_object);
+	if (obj == NULL)
+		return (0);
+
+	return (vm_object_mightbedirty_(obj));
+}
+
 static void
-tmpfs_update_mtime(struct mount *mp, bool lazy)
+tmpfs_update_mtime_lazy(struct mount *mp)
+{
+	struct vnode *vp, *mvp;
+
+	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, tmpfs_update_mtime_lazy_filter, NULL) {
+		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
+			continue;
+		tmpfs_check_mtime(vp);
+		vput(vp);
+	}
+}
+
+static void
+tmpfs_update_mtime_all(struct mount *mp)
 {
 	struct vnode *vp, *mvp;
-	struct vm_object *obj;
 
 	if (VFS_TO_TMPFS(mp)->tm_nomtime)
 		return;
@@ -119,28 +139,11 @@ tmpfs_update_mtime(struct mount *mp, bool lazy)
 			VI_UNLOCK(vp);
 			continue;
 		}
-		obj = vp->v_object;
-		MPASS(obj->type == tmpfs_pager_type);
-		MPASS((obj->flags & OBJ_TMPFS) != 0);
-
-		/*
-		 * In lazy case, do unlocked read, avoid taking vnode
-		 * lock if not needed.  Lost update will be handled on
-		 * the next call.
-		 * For non-lazy case, we must flush all pending
-		 * metadata changes now.
-		 */
-		if (!lazy || obj->generation != obj->cleangeneration) {
-			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
-				continue;
-			tmpfs_check_mtime(vp);
-			if (!lazy)
-				tmpfs_update(vp);
-			vput(vp);
-		} else {
-			VI_UNLOCK(vp);
+		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
 			continue;
-		}
+		tmpfs_check_mtime(vp);
+		tmpfs_update(vp);
+		vput(vp);
 	}
 }
 
@@ -300,7 +303,7 @@ tmpfs_rw_to_ro(struct mount *mp)
 	MNT_IUNLOCK(mp);
 	for (;;) {
 		tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
-		tmpfs_update_mtime(mp, false);
+		tmpfs_update_mtime_all(mp);
 		error = vflush(mp, 0, flags, curthread);
 		if (error != 0) {
 			VFS_TO_TMPFS(mp)->tm_ronly = 0;
@@ -653,7 +656,7 @@ tmpfs_sync(struct mount *mp, int waitfor)
 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
 		MNT_IUNLOCK(mp);
 	} else if (waitfor == MNT_LAZY) {
-		tmpfs_update_mtime(mp, true);
+		tmpfs_update_mtime_lazy(mp);
 	}
 	return (0);
 }



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202105152055.14FKtSRD088287>