Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 17 Jun 2015 17:28:50 +0300
From:      Gleb Smirnoff <glebius@FreeBSD.org>
To:        Konstantin Belousov <kostikbel@gmail.com>
Cc:        Alan Cox <alc@rice.edu>, arch@FreeBSD.org
Subject:   Re: Step 2 Was: more strict KPI for vm_pager_get_pages()
Message-ID:  <20150617142850.GA73119@glebius.int.ru>
In-Reply-To: <20150617134538.GY2080@kib.kiev.ua>
References:  <20150615212931.GG73119@glebius.int.ru> <20150617101444.GW73119@glebius.int.ru> <20150617134538.GY2080@kib.kiev.ua>

next in thread | previous in thread | raw e-mail | index | archive | help

--NKtYx2Ppz7d1tORf
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline

  Konstantin,

On Wed, Jun 17, 2015 at 04:45:38PM +0300, Konstantin Belousov wrote:
K> > +++ sys/vm/vm_pager.c	(working copy)
K> > @@ -251,7 +251,88 @@ vm_pager_deallocate(object)
K> >  }
K> >  
K> >  /*
K> > - * vm_pager_get_pages() - inline, see vm/vm_pager.h
K> > + * Retrieve pages from the VM system in order to map them into an object
K> > + * ( or into VM space somewhere ).  If the pagein was successful, we
K> > + * must fully validate it.
K> > + */
K> > +int
K> > +vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int reqpage)
K> > +{
K> > +	int r;
K> > +
K> > +	VM_OBJECT_ASSERT_WLOCKED(object);
K> > +	KASSERT(count > 0, ("%s: 0 count", __func__));
K> > +
K> > +#ifdef INVARIANTS
K> > +	/*
K> > +	 * All pages must be busied, not mapped, not valid (save the last one),
K> > +	 * not dirty and belong to the proper object.
K> > +	 */
K> > +	for (int i = 0 ; i < count; i++) {
K> > +		vm_page_assert_xbusied(m[i]);
K> > +		KASSERT(!pmap_page_is_mapped(m[i]),
K> > +		    ("%s: page %p is mapped", __func__, m[i]));
K> > +		KASSERT(m[i]->valid == 0 || i == count - 1,
K> > +		    ("%s: request for a valid page %p", __func__, m[i]));
K> Are you sure that e.g. the requested page cannot be partially valid ?
K> I am surprised.

All save the last one are always valid. This passed stress2 for me and pho@.

K> > +		KASSERT(m[i]->dirty == 0,
K> > +		    ("%s: page %p is dirty", __func__, m[i]));
K> > +		KASSERT(m[i]->object == object,
K> > +		    ("%s: wrong object %p/%p", __func__, object, m[i]->object));
K> > +	}
K> > +#endif
K> Create a helper function with the loop, and use it, instead of copying
K> the block twice.

Done.

K> > +
K> > +	r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
K> > +	if (r != VM_PAGER_OK)
K> > +		return (r);
K> > +
K> > +	/*
K> > +	 * If pager has replaced the page, assert that it had
K> > +	 * updated the array.
K> > +	 */
K> > +	KASSERT(m[reqpage] == vm_page_lookup(object, m[reqpage]->pindex),
K> > +	    ("%s: mismatch page %p pindex %ju", __func__,
K> > +	    m[reqpage], (uintmax_t )m[reqpage]->pindex - 1));
K> Why -1 ?
K> Need an empty line after assert, to deliniate the code which is commented
K> about.

That's an artifact from future patch. Fixed.

K> Also, you may assert that the requested page is still busied.

Done.

Updated patch attached.

-- 
Totus tuus, Glebius.

--NKtYx2Ppz7d1tORf
Content-Type: text/x-diff; charset=us-ascii
Content-Disposition: attachment; filename="pagers.step.2.diff"

Index: sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
===================================================================
--- sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	(revision 284504)
+++ sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	(working copy)
@@ -5734,8 +5734,6 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int c
 	object = mreq->object;
 	error = 0;
 
-	KASSERT(vp->v_object == object, ("mismatching object"));
-
 	if (pcount > 1 && zp->z_blksz > PAGESIZE) {
 		startoff = rounddown(IDX_TO_OFF(mreq->pindex), zp->z_blksz);
 		reqstart = OFF_TO_IDX(round_page(startoff));
Index: sys/fs/nfsclient/nfs_clbio.c
===================================================================
--- sys/fs/nfsclient/nfs_clbio.c	(revision 284504)
+++ sys/fs/nfsclient/nfs_clbio.c	(working copy)
@@ -129,12 +129,6 @@ ncl_getpages(struct vop_getpages_args *ap)
 	npages = btoc(count);
 
 	/*
-	 * Since the caller has busied the requested page, that page's valid
-	 * field will not be changed by other threads.
-	 */
-	vm_page_assert_xbusied(pages[ap->a_reqpage]);
-
-	/*
 	 * If the requested page is partially valid, just return it and
 	 * allow the pager to zero-out the blanks.  Partially valid pages
 	 * can only occur at the file EOF.
Index: sys/vm/swap_pager.c
===================================================================
--- sys/vm/swap_pager.c	(revision 284504)
+++ sys/vm/swap_pager.c	(working copy)
@@ -1118,10 +1118,6 @@ swap_pager_getpages(vm_object_t object, vm_page_t
 
 	mreq = m[reqpage];
 
-	KASSERT(mreq->object == object,
-	    ("swap_pager_getpages: object mismatch %p/%p",
-	    object, mreq->object));
-
 	/*
 	 * Calculate range to retrieve.  The pages have already been assigned
 	 * their swapblks.  We require a *contiguous* range but we know it to
Index: sys/vm/vm_pager.c
===================================================================
--- sys/vm/vm_pager.c	(revision 284504)
+++ sys/vm/vm_pager.c	(working copy)
@@ -250,8 +250,79 @@ vm_pager_deallocate(object)
 	(*pagertab[object->type]->pgo_dealloc) (object);
 }
 
+static void
+vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
+{
+#ifdef INVARIANTS
+
+	VM_OBJECT_ASSERT_WLOCKED(object);
+	KASSERT(count > 0, ("%s: 0 count", __func__));
+	/*
+	 * All pages must be busied, not mapped, not valid (save the last one),
+	 * not dirty and belong to the proper object.
+	 */
+	for (int i = 0 ; i < count; i++) {
+		vm_page_assert_xbusied(m[i]);
+		KASSERT(!pmap_page_is_mapped(m[i]),
+		    ("%s: page %p is mapped", __func__, m[i]));
+		KASSERT(m[i]->valid == 0 || i == count - 1,
+		    ("%s: request for a valid page %p", __func__, m[i]));
+		KASSERT(m[i]->dirty == 0,
+		    ("%s: page %p is dirty", __func__, m[i]));
+		KASSERT(m[i]->object == object,
+		    ("%s: wrong object %p/%p", __func__, object, m[i]->object));
+	}
+#endif
+}
+
 /*
- * vm_pager_get_pages() - inline, see vm/vm_pager.h
+ * Retrieve pages from the VM system in order to map them into an object
+ * ( or into VM space somewhere ).  If the pagein was successful, we
+ * must fully validate it.
+ */
+int
+vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int reqpage)
+{
+	int r;
+
+	vm_pager_assert_in(object, m, count);
+
+	r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
+	if (r != VM_PAGER_OK)
+		return (r);
+
+	/*
+	 * If pager has replaced the page, assert that it had
+	 * updated the array.  Also assert that page is still
+	 * busied.
+	 */
+	KASSERT(m[reqpage] == vm_page_lookup(object, m[reqpage]->pindex),
+	    ("%s: mismatch page %p pindex %ju", __func__,
+	    m[reqpage], (uintmax_t )m[reqpage]->pindex));
+	vm_page_assert_xbusied(m[reqpage]);
+
+	/*
+	 * Pager didn't fill up entire page.  Zero out
+	 * partially filled data.
+	 */
+	if (m[reqpage]->valid != VM_PAGE_BITS_ALL)
+		vm_page_zero_invalid(m[reqpage], TRUE);
+
+	return (VM_PAGER_OK);
+}
+
+int
+vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
+    int reqpage, pgo_getpages_iodone_t iodone, void *arg)
+{
+
+	vm_pager_assert_in(object, m, count);
+
+	return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
+	    count, reqpage, iodone, arg));
+}
+
+/*
  * vm_pager_put_pages() - inline, see vm/vm_pager.h
  * vm_pager_has_page() - inline, see vm/vm_pager.h
  */
Index: sys/vm/vm_pager.h
===================================================================
--- sys/vm/vm_pager.h	(revision 284504)
+++ sys/vm/vm_pager.h	(working copy)
@@ -106,9 +106,9 @@ vm_object_t vm_pager_allocate(objtype_t, void *, v
     vm_ooffset_t, struct ucred *);
 void vm_pager_bufferinit(void);
 void vm_pager_deallocate(vm_object_t);
-static __inline int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int);
-static inline int vm_pager_get_pages_async(vm_object_t, vm_page_t *, int,
-    int, pgo_getpages_iodone_t, void *);
+int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int);
+int vm_pager_get_pages_async(vm_object_t, vm_page_t *, int, int,
+    pgo_getpages_iodone_t, void *);
 static __inline boolean_t vm_pager_has_page(vm_object_t, vm_pindex_t, int *, int *);
 void vm_pager_init(void);
 vm_object_t vm_pager_object_lookup(struct pagerlst *, void *);
@@ -115,40 +115,6 @@ vm_object_t vm_pager_object_lookup(struct pagerlst
 void vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage,
     int npages, boolean_t object_locked);
 
-/*
- *	vm_page_get_pages:
- *
- *	Retrieve pages from the VM system in order to map them into an object
- *	( or into VM space somewhere ).  If the pagein was successful, we
- *	must fully validate it.
- */
-static __inline int
-vm_pager_get_pages(
-	vm_object_t object,
-	vm_page_t *m,
-	int count,
-	int reqpage
-) {
-	int r;
-
-	VM_OBJECT_ASSERT_WLOCKED(object);
-	r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
-	if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
-		vm_page_zero_invalid(m[reqpage], TRUE);
-	}
-	return (r);
-}
-
-static inline int
-vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
-    int reqpage, pgo_getpages_iodone_t iodone, void *arg)
-{
-
-	VM_OBJECT_ASSERT_WLOCKED(object);
-	return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
-	    count, reqpage, iodone, arg));
-}
-
 static __inline void
 vm_pager_put_pages(
 	vm_object_t object,

--NKtYx2Ppz7d1tORf--



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20150617142850.GA73119>