Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 16 Jun 2015 00:29:31 +0300
From:      Gleb Smirnoff <glebius@FreeBSD.org>
To:        Alan Cox <alc@rice.edu>, Konstantin Belousov <kostikbel@gmail.com>
Cc:        arch@FreeBSD.org
Subject:   Step 2 Was: more strict KPI for vm_pager_get_pages()
Message-ID:  <20150615212931.GG73119@glebius.int.ru>

next in thread | raw e-mail | index | archive | help

--t5NgoZwlhlUmGr82
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline

  Hi!

  This is step 2 of the "more strict pager KPI" patch:

o Uninline vm_pager_get_pages() into vm_pager.c.
o Keep all KASSERTs that enforce the KPI of pagers and of their
  consumers in one place: vm_pager_get_pages().
o Keep the code that zeroes out partially valid pages in one
  place in vm_pager_get_pages().

-- 
Totus tuus, Glebius.

--t5NgoZwlhlUmGr82
Content-Type: text/x-diff; charset=us-ascii
Content-Disposition: attachment; filename="pagers.step.2.diff"

Index: sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
===================================================================
--- sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	(revision 284409)
+++ sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	(working copy)
@@ -5734,8 +5734,6 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int c
 	object = mreq->object;
 	error = 0;
 
-	KASSERT(vp->v_object == object, ("mismatching object"));
-
 	if (pcount > 1 && zp->z_blksz > PAGESIZE) {
 		startoff = rounddown(IDX_TO_OFF(mreq->pindex), zp->z_blksz);
 		reqstart = OFF_TO_IDX(round_page(startoff));
@@ -5771,14 +5769,6 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int c
 		vm_page_unlock(m[i]);
 	}
 
-	if (mreq->valid && reqsize == 1) {
-		if (mreq->valid != VM_PAGE_BITS_ALL)
-			vm_page_zero_invalid(mreq, TRUE);
-		zfs_vmobject_wunlock(object);
-		ZFS_EXIT(zfsvfs);
-		return (zfs_vm_pagerret_ok);
-	}
-
 	PCPU_INC(cnt.v_vnodein);
 	PCPU_ADD(cnt.v_vnodepgsin, reqsize);
 
Index: sys/fs/fuse/fuse_vnops.c
===================================================================
--- sys/fs/fuse/fuse_vnops.c	(revision 284409)
+++ sys/fs/fuse/fuse_vnops.c	(working copy)
@@ -1761,29 +1761,6 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
 	npages = btoc(count);
 
 	/*
-	 * If the requested page is partially valid, just return it and
-	 * allow the pager to zero-out the blanks.  Partially valid pages
-	 * can only occur at the file EOF.
-	 */
-
-	VM_OBJECT_WLOCK(vp->v_object);
-	fuse_vm_page_lock_queues();
-	if (pages[ap->a_reqpage]->valid != 0) {
-		for (i = 0; i < npages; ++i) {
-			if (i != ap->a_reqpage) {
-				fuse_vm_page_lock(pages[i]);
-				vm_page_free(pages[i]);
-				fuse_vm_page_unlock(pages[i]);
-			}
-		}
-		fuse_vm_page_unlock_queues();
-		VM_OBJECT_WUNLOCK(vp->v_object);
-		return 0;
-	}
-	fuse_vm_page_unlock_queues();
-	VM_OBJECT_WUNLOCK(vp->v_object);
-
-	/*
 	 * We use only the kva address for the buffer, but this is extremely
 	 * convienient and fast.
 	 */
Index: sys/fs/nfsclient/nfs_clbio.c
===================================================================
--- sys/fs/nfsclient/nfs_clbio.c	(revision 284409)
+++ sys/fs/nfsclient/nfs_clbio.c	(working copy)
@@ -129,23 +129,6 @@ ncl_getpages(struct vop_getpages_args *ap)
 	npages = btoc(count);
 
 	/*
-	 * Since the caller has busied the requested page, that page's valid
-	 * field will not be changed by other threads.
-	 */
-	vm_page_assert_xbusied(pages[ap->a_reqpage]);
-
-	/*
-	 * If the requested page is partially valid, just return it and
-	 * allow the pager to zero-out the blanks.  Partially valid pages
-	 * can only occur at the file EOF.
-	 */
-	if (pages[ap->a_reqpage]->valid != 0) {
-		vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages,
-		    FALSE);
-		return (VM_PAGER_OK);
-	}
-
-	/*
 	 * We use only the kva address for the buffer, but this is extremely
 	 * convienient and fast.
 	 */
Index: sys/fs/smbfs/smbfs_io.c
===================================================================
--- sys/fs/smbfs/smbfs_io.c	(revision 284409)
+++ sys/fs/smbfs/smbfs_io.c	(working copy)
@@ -436,7 +436,7 @@ smbfs_getpages(ap)
 	struct smbnode *np;
 	struct smb_cred *scred;
 	vm_object_t object;
-	vm_page_t *pages, m;
+	vm_page_t *pages;
 
 	vp = ap->a_vp;
 	if ((object = vp->v_object) == NULL) {
@@ -453,27 +453,6 @@ smbfs_getpages(ap)
 	npages = btoc(count);
 	reqpage = ap->a_reqpage;
 
-	/*
-	 * If the requested page is partially valid, just return it and
-	 * allow the pager to zero-out the blanks.  Partially valid pages
-	 * can only occur at the file EOF.
-	 */
-	m = pages[reqpage];
-
-	VM_OBJECT_WLOCK(object);
-	if (m->valid != 0) {
-		for (i = 0; i < npages; ++i) {
-			if (i != reqpage) {
-				vm_page_lock(pages[i]);
-				vm_page_free(pages[i]);
-				vm_page_unlock(pages[i]);
-			}
-		}
-		VM_OBJECT_WUNLOCK(object);
-		return 0;
-	}
-	VM_OBJECT_WUNLOCK(object);
-
 	scred = smbfs_malloc_scred();
 	smb_makescred(scred, td, cred);
 
Index: sys/vm/swap_pager.c
===================================================================
--- sys/vm/swap_pager.c	(revision 284409)
+++ sys/vm/swap_pager.c	(working copy)
@@ -1118,10 +1118,6 @@ swap_pager_getpages(vm_object_t object, vm_page_t
 
 	mreq = m[reqpage];
 
-	KASSERT(mreq->object == object,
-	    ("swap_pager_getpages: object mismatch %p/%p",
-	    object, mreq->object));
-
 	/*
 	 * Calculate range to retrieve.  The pages have already been assigned
 	 * their swapblks.  We require a *contiguous* range but we know it to
Index: sys/vm/vm_pager.c
===================================================================
--- sys/vm/vm_pager.c	(revision 284409)
+++ sys/vm/vm_pager.c	(working copy)
@@ -251,7 +251,90 @@ vm_pager_deallocate(object)
 }
 
 /*
- * vm_pager_get_pages() - inline, see vm/vm_pager.h
+ * Retrieve pages from the VM system in order to map them into an object
+ * ( or into VM space somewhere ).  If the pagein was successful, we
+ * must fully validate it.
+ */
+int
+vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int reqpage)
+{
+	int r;
+
+	VM_OBJECT_ASSERT_WLOCKED(object);
+	KASSERT(count > 0, ("%s: 0 count", __func__));
+
+        /*
+	 * If the request page is partially valid, just return it and zero-out
+	 * the blanks.  Partially valid pages can only occur at the file EOF.
+	 */
+	if (m[reqpage]->valid != 0) {
+		vm_page_zero_invalid(m[reqpage], TRUE);
+		vm_pager_free_nonreq(object, m, reqpage, count, TRUE);
+		return (VM_PAGER_OK);
+	}
+
+#ifdef INVARIANTS
+	/*
+	 * All pages must be busied, not mapped, not valid, not dirty
+	 * and belong to the proper object.
+	 */
+	for (int i = 0 ; i < count; i++) {
+		vm_page_assert_xbusied(m[i]);
+		KASSERT(!pmap_page_is_mapped(m[i]),
+		    ("%s: page %p is mapped", __func__, m[i]));
+		KASSERT(m[i]->valid == 0,
+		    ("%s: request for a valid page %p", __func__, m[i]));
+		KASSERT(m[i]->dirty == 0,
+		    ("%s: page %p is dirty", __func__, m[i]));
+		KASSERT(m[i]->object == object,
+		    ("%s: wrong object %p/%p", __func__, object, m[i]->object));
+	}
+#endif
+
+	r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
+	if (r != VM_PAGER_OK)
+		return (r);
+
+	/*
+	 * If pager has replaced the page, assert that it had
+	 * updated the array.
+	 */
+	KASSERT(m[reqpage] == vm_page_lookup(object, m[reqpage]->pindex),
+	    ("%s: mismatch page %p pindex %ju", __func__,
+	    m[reqpage], (uintmax_t )m[reqpage]->pindex - 1));
+	/*
+	 * Pager didn't fill up entire page.  Zero out
+	 * partially filled data.
+	 */
+	if (m[reqpage]->valid != VM_PAGE_BITS_ALL)
+		vm_page_zero_invalid(m[reqpage], TRUE);
+
+	return (VM_PAGER_OK);
+}
+
+int
+vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
+    int reqpage, pgo_getpages_iodone_t iodone, void *arg)
+{
+
+	VM_OBJECT_ASSERT_WLOCKED(object);
+	KASSERT(count > 0, ("%s: 0 count", __func__));
+
+        /*
+	 * If the last page is partially valid, just return it and zero-out
+	 * the blanks.  Partially valid pages can only occur at the file EOF.
+	 */
+	if (m[reqpage]->valid != 0) {
+		vm_page_zero_invalid(m[reqpage], TRUE);
+		iodone(arg, m, reqpage, 0);
+		return (VM_PAGER_OK);
+	}
+
+	return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
+	    count, reqpage, iodone, arg));
+}
+
+/*
  * vm_pager_put_pages() - inline, see vm/vm_pager.h
  * vm_pager_has_page() - inline, see vm/vm_pager.h
  */
Index: sys/vm/vm_pager.h
===================================================================
--- sys/vm/vm_pager.h	(revision 284409)
+++ sys/vm/vm_pager.h	(working copy)
@@ -106,9 +106,9 @@ vm_object_t vm_pager_allocate(objtype_t, void *, v
     vm_ooffset_t, struct ucred *);
 void vm_pager_bufferinit(void);
 void vm_pager_deallocate(vm_object_t);
-static __inline int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int);
-static inline int vm_pager_get_pages_async(vm_object_t, vm_page_t *, int,
-    int, pgo_getpages_iodone_t, void *);
+int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int);
+int vm_pager_get_pages_async(vm_object_t, vm_page_t *, int, int,
+    pgo_getpages_iodone_t, void *);
 static __inline boolean_t vm_pager_has_page(vm_object_t, vm_pindex_t, int *, int *);
 void vm_pager_init(void);
 vm_object_t vm_pager_object_lookup(struct pagerlst *, void *);
@@ -115,40 +115,6 @@ vm_object_t vm_pager_object_lookup(struct pagerlst
 void vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage,
     int npages, boolean_t object_locked);
 
-/*
- *	vm_page_get_pages:
- *
- *	Retrieve pages from the VM system in order to map them into an object
- *	( or into VM space somewhere ).  If the pagein was successful, we
- *	must fully validate it.
- */
-static __inline int
-vm_pager_get_pages(
-	vm_object_t object,
-	vm_page_t *m,
-	int count,
-	int reqpage
-) {
-	int r;
-
-	VM_OBJECT_ASSERT_WLOCKED(object);
-	r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
-	if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
-		vm_page_zero_invalid(m[reqpage], TRUE);
-	}
-	return (r);
-}
-
-static inline int
-vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
-    int reqpage, pgo_getpages_iodone_t iodone, void *arg)
-{
-
-	VM_OBJECT_ASSERT_WLOCKED(object);
-	return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
-	    count, reqpage, iodone, arg));
-}
-
 static __inline void
 vm_pager_put_pages(
 	vm_object_t object,
Index: sys/vm/vnode_pager.c
===================================================================
--- sys/vm/vnode_pager.c	(revision 284409)
+++ sys/vm/vnode_pager.c	(working copy)
@@ -84,8 +84,6 @@ static int vnode_pager_addr(struct vnode *vp, vm_o
 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
 static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
 static void vnode_pager_dealloc(vm_object_t);
-static int vnode_pager_local_getpages0(struct vnode *, vm_page_t *, int, int,
-    vop_getpages_iodone_t, void *);
 static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int);
 static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int,
     vop_getpages_iodone_t, void *);
@@ -714,7 +712,7 @@ int
 vnode_pager_local_getpages(struct vop_getpages_args *ap)
 {
 
-	return (vnode_pager_local_getpages0(ap->a_vp, ap->a_m, ap->a_count,
+	return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
 	    ap->a_reqpage, NULL, NULL));
 }
 
@@ -722,42 +720,10 @@ int
 vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap)
 {
 
-	return (vnode_pager_local_getpages0(ap->a_vp, ap->a_m, ap->a_count,
+	return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
 	    ap->a_reqpage, ap->a_iodone, ap->a_arg));
 }
 
-static int
-vnode_pager_local_getpages0(struct vnode *vp, vm_page_t *m, int bytecount,
-    int reqpage, vop_getpages_iodone_t iodone, void *arg)
-{
-	vm_page_t mreq;
-
-	mreq = m[reqpage];
-
-	/*
-	 * Since the caller has busied the requested page, that page's valid
-	 * field will not be changed by other threads.
-	 */
-	vm_page_assert_xbusied(mreq);
-
-	/*
-	 * The requested page has valid blocks.  Invalid part can only
-	 * exist at the end of file, and the page is made fully valid
-	 * by zeroing in vm_pager_get_pages().  Free non-requested
-	 * pages, since no i/o is done to read its content.
-	 */
-	if (mreq->valid != 0) {
-		vm_pager_free_nonreq(mreq->object, m, reqpage,
-		    round_page(bytecount) / PAGE_SIZE, FALSE);
-		if (iodone != NULL)
-			iodone(arg, m, reqpage, 0);
-		return (VM_PAGER_OK);
-	}
-
-	return (vnode_pager_generic_getpages(vp, m, bytecount, reqpage,
-	    iodone, arg));
-}
-
 /*
  * This is now called from local media FS's to operate against their
  * own vnodes if they fail to implement VOP_GETPAGES.

--t5NgoZwlhlUmGr82--



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20150615212931.GG73119>