Date: Wed, 6 May 2015 14:45:49 +0300 From: Gleb Smirnoff <glebius@FreeBSD.org> To: kib@FreeBSD.org, alc@FreeBSD.org Cc: arch@FreeBSD.org Subject: Re: more strict KPI for vm_pager_get_pages() Message-ID: <20150506114549.GS34544@FreeBSD.org> In-Reply-To: <20150430142408.GS546@nginx.com> References: <20150430142408.GS546@nginx.com>
next in thread | previous in thread | raw e-mail | index | archive | help
--xaMk4Io5JJdpkLEb
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
Hi!
I'm splitting the patch into a serie. This is step 1:
Pagers are responsible to update the array of pages in
case if they replace pages in an object. All array entries
must be valid, if pager returns VM_PAGER_OK.
Note: the only pager that replaces pages is sg_pager, and
it does that correctly.
--
Totus tuus, Glebius.
--xaMk4Io5JJdpkLEb
Content-Type: text/x-diff; charset=us-ascii
Content-Disposition: attachment; filename="vm_pagers.step_1.diff"
Index: sys/dev/drm2/i915/i915_gem.c
===================================================================
--- sys/dev/drm2/i915/i915_gem.c (revision 282533)
+++ sys/dev/drm2/i915/i915_gem.c (working copy)
@@ -3175,9 +3175,6 @@ i915_gem_wire_page(vm_object_t object, vm_pindex_t
if (m->valid != VM_PAGE_BITS_ALL) {
if (vm_pager_has_page(object, pindex, NULL, NULL)) {
rv = vm_pager_get_pages(object, &m, 1, 0);
- m = vm_page_lookup(object, pindex);
- if (m == NULL)
- return (NULL);
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_free(m);
Index: sys/fs/tmpfs/tmpfs_subr.c
===================================================================
--- sys/fs/tmpfs/tmpfs_subr.c (revision 282533)
+++ sys/fs/tmpfs/tmpfs_subr.c (working copy)
@@ -1320,7 +1320,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize,
struct tmpfs_mount *tmp;
struct tmpfs_node *node;
vm_object_t uobj;
- vm_page_t m, ma[1];
+ vm_page_t m;
vm_pindex_t idx, newpages, oldpages;
off_t oldsize;
int base, rv;
@@ -1367,11 +1367,9 @@ retry:
VM_WAIT;
VM_OBJECT_WLOCK(uobj);
goto retry;
- } else if (m->valid != VM_PAGE_BITS_ALL) {
- ma[0] = m;
- rv = vm_pager_get_pages(uobj, ma, 1, 0);
- m = vm_page_lookup(uobj, idx);
- } else
+ } else if (m->valid != VM_PAGE_BITS_ALL)
+ rv = vm_pager_get_pages(uobj, &m, 1, 0);
+ else
/* A cached page was reactivated. */
rv = VM_PAGER_OK;
vm_page_lock(m);
Index: sys/kern/kern_exec.c
===================================================================
--- sys/kern/kern_exec.c (revision 282533)
+++ sys/kern/kern_exec.c (working copy)
@@ -955,13 +955,10 @@ exec_map_first_page(imgp)
}
initial_pagein = i;
rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
- ma[0] = vm_page_lookup(object, 0);
- if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) {
- if (ma[0] != NULL) {
- vm_page_lock(ma[0]);
- vm_page_free(ma[0]);
- vm_page_unlock(ma[0]);
- }
+ if (rv != VM_PAGER_OK) {
+ vm_page_lock(ma[0]);
+ vm_page_free(ma[0]);
+ vm_page_unlock(ma[0]);
VM_OBJECT_WUNLOCK(object);
return (EIO);
}
Index: sys/kern/uipc_shm.c
===================================================================
--- sys/kern/uipc_shm.c (revision 282533)
+++ sys/kern/uipc_shm.c (working copy)
@@ -187,14 +187,6 @@ uiomove_object_page(vm_object_t obj, size_t len, s
if (m->valid != VM_PAGE_BITS_ALL) {
if (vm_pager_has_page(obj, idx, NULL, NULL)) {
rv = vm_pager_get_pages(obj, &m, 1, 0);
- m = vm_page_lookup(obj, idx);
- if (m == NULL) {
- printf(
- "uiomove_object: vm_obj %p idx %jd null lookup rv %d\n",
- obj, idx, rv);
- VM_OBJECT_WUNLOCK(obj);
- return (EIO);
- }
if (rv != VM_PAGER_OK) {
printf(
"uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
@@ -421,7 +413,7 @@ static int
shm_dotruncate(struct shmfd *shmfd, off_t length)
{
vm_object_t object;
- vm_page_t m, ma[1];
+ vm_page_t m;
vm_pindex_t idx, nobjsize;
vm_ooffset_t delta;
int base, rv;
@@ -463,12 +455,10 @@ retry:
VM_WAIT;
VM_OBJECT_WLOCK(object);
goto retry;
- } else if (m->valid != VM_PAGE_BITS_ALL) {
- ma[0] = m;
- rv = vm_pager_get_pages(object, ma, 1,
+ } else if (m->valid != VM_PAGE_BITS_ALL)
+ rv = vm_pager_get_pages(object, &m, 1,
0);
- m = vm_page_lookup(object, idx);
- } else
+ else
/* A cached page was reactivated. */
rv = VM_PAGER_OK;
vm_page_lock(m);
Index: sys/kern/uipc_syscalls.c
===================================================================
--- sys/kern/uipc_syscalls.c (revision 282533)
+++ sys/kern/uipc_syscalls.c (working copy)
@@ -2026,10 +2026,7 @@ sendfile_readpage(vm_object_t obj, struct vnode *v
if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
rv = vm_pager_get_pages(obj, &m, 1, 0);
SFSTAT_INC(sf_iocnt);
- m = vm_page_lookup(obj, pindex);
- if (m == NULL)
- error = EIO;
- else if (rv != VM_PAGER_OK) {
+ if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_free(m);
vm_page_unlock(m);
Index: sys/vm/vm_fault.c
===================================================================
--- sys/vm/vm_fault.c (revision 282533)
+++ sys/vm/vm_fault.c (working copy)
@@ -680,18 +680,8 @@ vnode_locked:
* Found the page. Leave it busy while we play
* with it.
*/
-
- /*
- * Relookup in case pager changed page. Pager
- * is responsible for disposition of old page
- * if moved.
- */
- fs.m = vm_page_lookup(fs.object, fs.pindex);
- if (!fs.m) {
- unlock_and_deallocate(&fs);
- goto RetryFault;
- }
-
+ /* Pager could have changed the page. */
+ fs.m = marray[reqpage];
hardfault++;
break; /* break to PAGE HAS BEEN FOUND */
}
Index: sys/vm/vm_glue.c
===================================================================
--- sys/vm/vm_glue.c (revision 282533)
+++ sys/vm/vm_glue.c (working copy)
@@ -230,7 +230,7 @@ vsunlock(void *addr, size_t len)
static vm_page_t
vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
{
- vm_page_t m, ma[1];
+ vm_page_t m;
vm_pindex_t pindex;
int rv;
@@ -238,11 +238,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset
pindex = OFF_TO_IDX(offset);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
if (m->valid != VM_PAGE_BITS_ALL) {
- ma[0] = m;
- rv = vm_pager_get_pages(object, ma, 1, 0);
- m = vm_page_lookup(object, pindex);
- if (m == NULL)
- goto out;
+ rv = vm_pager_get_pages(object, &m, 1, 0);
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_free(m);
@@ -571,7 +567,7 @@ vm_thread_swapin(struct thread *td)
{
vm_object_t ksobj;
vm_page_t ma[KSTACK_MAX_PAGES];
- int i, j, k, pages, rv;
+ int i, j, pages, rv;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
@@ -594,8 +590,6 @@ vm_thread_swapin(struct thread *td)
panic("vm_thread_swapin: cannot get kstack for proc: %d",
td->td_proc->p_pid);
vm_object_pip_wakeup(ksobj);
- for (k = i; k < j; k++)
- ma[k] = vm_page_lookup(ksobj, k);
vm_page_xunbusy(ma[i]);
} else if (vm_page_xbusied(ma[i]))
vm_page_xunbusy(ma[i]);
Index: sys/vm/vm_object.c
===================================================================
--- sys/vm/vm_object.c (revision 282533)
+++ sys/vm/vm_object.c (working copy)
@@ -2042,7 +2042,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex
boolean_t
vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
{
- vm_page_t m, ma[1];
+ vm_page_t m;
vm_pindex_t pindex;
int rv;
@@ -2050,11 +2050,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t
for (pindex = start; pindex < end; pindex++) {
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
if (m->valid != VM_PAGE_BITS_ALL) {
- ma[0] = m;
- rv = vm_pager_get_pages(object, ma, 1, 0);
- m = vm_page_lookup(object, pindex);
- if (m == NULL)
- break;
+ rv = vm_pager_get_pages(object, &m, 1, 0);
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_free(m);
--xaMk4Io5JJdpkLEb--
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20150506114549.GS34544>
