Date: Mon, 21 Jun 2010 15:58:25 +0300 From: Kostik Belousov <kostikbel@gmail.com> To: fs@freebsd.org Cc: alc@freebsd.org, pho@freebsd.org Subject: Tmpfs elimination of double-copy Message-ID: <20100621125825.GG13238@deviant.kiev.zoral.com.ua>
next in thread | raw e-mail | index | archive | help
--zd5GkkQQtETumrwc
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
Content-Transfer-Encoding: quoted-printable
Hi,
Below is the patch that eliminates second copy of the data kept by tmpfs
in case a file is mapped. Also, it removes potential deadlocks due to
tmpfs doing copyin/out while page is busy. It is possible that patch
also fixes known issue with sendfile(2) of tmpfs file, but I did not
verified this.
Patch essentially consists of three parts:
- move of vm_object' vnp_size from the type-discriminated union to the
vm_object proper;
- making vm not choke when vm object held in the struct vnode' v_object
is default or swap object instead of vnode object;
- use of the swap object that keeps data for tmpfs VREG file, also as
v_object.
Peter Holm helped me with the patch, apparently we survive fsx and stress2.
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/s=
ys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
index adeabfb..0cfe0d9 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
@@ -339,7 +339,7 @@ again:
=20
if (vm_page_sleep_if_busy(m, FALSE, "zfsmwb"))
goto again;
- fsize =3D obj->un_pager.vnp.vnp_size;
+ fsize =3D obj->vnp_size;
vm_page_busy(m);
vm_page_lock_queues();
vm_page_undirty(m);
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index b6c5cfe..7297f5a 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -379,13 +379,17 @@ loop:
/* FALLTHROUGH */
case VLNK:
/* FALLTHROUGH */
- case VREG:
- /* FALLTHROUGH */
case VSOCK:
break;
case VFIFO:
vp->v_op =3D &tmpfs_fifoop_entries;
break;
+ case VREG:
+ VI_LOCK(vp);
+ KASSERT(vp->v_object =3D=3D NULL, ("Not NULL v_object in tmpfs"));
+ vp->v_object =3D node->tn_reg.tn_aobj;
+ VI_UNLOCK(vp);
+ break;
case VDIR:
MPASS(node->tn_dir.tn_parent !=3D NULL);
if (node->tn_dir.tn_parent =3D=3D node)
@@ -396,7 +400,6 @@ loop:
panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
}
=20
- vnode_pager_setsize(vp, node->tn_size);
error =3D insmntque(vp, mp);
if (error)
vp =3D NULL;
@@ -849,11 +852,13 @@ tmpfs_dir_getdents(struct tmpfs_node *node, struct ui=
o *uio, off_t *cntp)
int
tmpfs_reg_resize(struct vnode *vp, off_t newsize)
{
- int error;
- size_t newpages, oldpages;
struct tmpfs_mount *tmp;
struct tmpfs_node *node;
+ vm_object_t uobj;
+ vm_page_t m;
off_t oldsize;
+ size_t newpages, oldpages, zerolen;
+ int error;
=20
MPASS(vp->v_type =3D=3D VREG);
MPASS(newsize >=3D 0);
@@ -883,41 +888,38 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize)
TMPFS_UNLOCK(tmp);
=20
node->tn_size =3D newsize;
- vnode_pager_setsize(vp, newsize);
+ uobj =3D node->tn_reg.tn_aobj;
+ VM_OBJECT_LOCK(uobj);
if (newsize < oldsize) {
- size_t zerolen =3D round_page(newsize) - newsize;
- vm_object_t uobj =3D node->tn_reg.tn_aobj;
- vm_page_t m;
-
/*
* free "backing store"
*/
- VM_OBJECT_LOCK(uobj);
if (newpages < oldpages) {
- swap_pager_freespace(uobj,
- newpages, oldpages - newpages);
- vm_object_page_remove(uobj,
- OFF_TO_IDX(newsize + PAGE_MASK), 0, FALSE);
+ swap_pager_freespace(uobj, newpages, oldpages -
+ newpages);
+ vm_object_page_remove(uobj, OFF_TO_IDX(newsize +
+ PAGE_MASK), 0, FALSE);
}
=20
/*
* zero out the truncated part of the last page.
*/
-
+ zerolen =3D round_page(newsize) - newsize;
if (zerolen > 0) {
m =3D vm_page_grab(uobj, OFF_TO_IDX(newsize),
VM_ALLOC_NOBUSY | VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
pmap_zero_page_area(m, PAGE_SIZE - zerolen,
zerolen);
}
- VM_OBJECT_UNLOCK(uobj);
-
}
+ uobj->size =3D newpages;
+ uobj->vnp_size =3D newsize;
+ VM_OBJECT_UNLOCK(uobj);
=20
error =3D 0;
=20
out:
- return error;
+ return (error);
}
=20
/* --------------------------------------------------------------------- */
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index 88e0939..97d3cc7 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -433,7 +433,6 @@ tmpfs_setattr(struct vop_setattr_args *v)
return error;
}
=20
-/* --------------------------------------------------------------------- */
static int
tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
vm_offset_t offset, size_t tlen, struct uio *uio)
@@ -449,12 +448,14 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
error =3D vm_pager_get_pages(tobj, &m, 1, 0);
if (error !=3D 0) {
+ vm_page_wakeup(m);
printf("tmpfs get pages from pager error [read]\n");
goto out;
}
} else
vm_page_zero_invalid(m, TRUE);
}
+ vm_page_wakeup(m);
VM_OBJECT_UNLOCK(tobj);
error =3D uiomove_fromphys(&m, offset, tlen, uio);
VM_OBJECT_LOCK(tobj);
@@ -462,124 +463,26 @@ out:
vm_page_lock(m);
vm_page_unwire(m, TRUE);
vm_page_unlock(m);
- vm_page_wakeup(m);
vm_object_pip_subtract(tobj, 1);
VM_OBJECT_UNLOCK(tobj);
=20
return (error);
}
=20
-static __inline int
-tmpfs_nocacheread_buf(vm_object_t tobj, vm_pindex_t idx,
- vm_offset_t offset, size_t tlen, void *buf)
-{
- struct uio uio;
- struct iovec iov;
-
- uio.uio_iovcnt =3D 1;
- uio.uio_iov =3D &iov;
- iov.iov_base =3D buf;
- iov.iov_len =3D tlen;
-
- uio.uio_offset =3D 0;
- uio.uio_resid =3D tlen;
- uio.uio_rw =3D UIO_READ;
- uio.uio_segflg =3D UIO_SYSSPACE;
- uio.uio_td =3D curthread;
-
- return (tmpfs_nocacheread(tobj, idx, offset, tlen, &uio));
-}
-
-static int
-tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct ui=
o *uio)
-{
- struct sf_buf *sf;
- vm_pindex_t idx;
- vm_page_t m;
- vm_offset_t offset;
- off_t addr;
- size_t tlen;
- char *ma;
- int error;
-
- addr =3D uio->uio_offset;
- idx =3D OFF_TO_IDX(addr);
- offset =3D addr & PAGE_MASK;
- tlen =3D MIN(PAGE_SIZE - offset, len);
-
- if ((vobj =3D=3D NULL) ||
- (vobj->resident_page_count =3D=3D 0 && vobj->cache =3D=3D NULL))
- goto nocache;
-
- VM_OBJECT_LOCK(vobj);
-lookupvpg:
- if (((m =3D vm_page_lookup(vobj, idx)) !=3D NULL) &&
- vm_page_is_valid(m, offset, tlen)) {
- if ((m->oflags & VPO_BUSY) !=3D 0) {
- /*
- * Reference the page before unlocking and sleeping so
- * that the page daemon is less likely to reclaim it. =20
- */
- vm_page_lock_queues();
- vm_page_flag_set(m, PG_REFERENCED);
- vm_page_sleep(m, "tmfsmr");
- goto lookupvpg;
- }
- vm_page_busy(m);
- VM_OBJECT_UNLOCK(vobj);
- error =3D uiomove_fromphys(&m, offset, tlen, uio);
- VM_OBJECT_LOCK(vobj);
- vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(vobj);
- return (error);
- } else if (m !=3D NULL && uio->uio_segflg =3D=3D UIO_NOCOPY) {
- if ((m->oflags & VPO_BUSY) !=3D 0) {
- /*
- * Reference the page before unlocking and sleeping so
- * that the page daemon is less likely to reclaim it. =20
- */
- vm_page_lock_queues();
- vm_page_flag_set(m, PG_REFERENCED);
- vm_page_sleep(m, "tmfsmr");
- goto lookupvpg;
- }
- vm_page_busy(m);
- VM_OBJECT_UNLOCK(vobj);
- sched_pin();
- sf =3D sf_buf_alloc(m, SFB_CPUPRIVATE);
- ma =3D (char *)sf_buf_kva(sf);
- error =3D tmpfs_nocacheread_buf(tobj, idx, offset, tlen,
- ma + offset);
- if (error =3D=3D 0) {
- uio->uio_offset +=3D tlen;
- uio->uio_resid -=3D tlen;
- }
- sf_buf_free(sf);
- sched_unpin();
- VM_OBJECT_LOCK(vobj);
- vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(vobj);
- return (error);
- }
- VM_OBJECT_UNLOCK(vobj);
-nocache:
- error =3D tmpfs_nocacheread(tobj, idx, offset, tlen, uio);
-
- return (error);
-}
-
static int
tmpfs_read(struct vop_read_args *v)
{
struct vnode *vp =3D v->a_vp;
struct uio *uio =3D v->a_uio;
-
struct tmpfs_node *node;
vm_object_t uobj;
size_t len;
int resid;
-
int error =3D 0;
+ vm_pindex_t idx;
+ vm_offset_t offset;
+ off_t addr;
+ size_t tlen;
=20
node =3D VP_TO_TMPFS_NODE(vp);
=20
@@ -603,7 +506,11 @@ tmpfs_read(struct vop_read_args *v)
len =3D MIN(node->tn_size - uio->uio_offset, resid);
if (len =3D=3D 0)
break;
- error =3D tmpfs_mappedread(vp->v_object, uobj, len, uio);
+ addr =3D uio->uio_offset;
+ idx =3D OFF_TO_IDX(addr);
+ offset =3D addr & PAGE_MASK;
+ tlen =3D MIN(PAGE_SIZE - offset, len);
+ error =3D tmpfs_nocacheread(uobj, idx, offset, tlen, uio);
if ((error !=3D 0) || (resid =3D=3D uio->uio_resid))
break;
}
@@ -616,10 +523,10 @@ out:
/* --------------------------------------------------------------------- */
=20
static int
-tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct u=
io *uio)
+tmpfs_mappedwrite(vm_object_t tobj, size_t len, struct uio *uio)
{
vm_pindex_t idx;
- vm_page_t vpg, tpg;
+ vm_page_t tpg;
vm_offset_t offset;
off_t addr;
size_t tlen;
@@ -632,37 +539,6 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, =
size_t len, struct uio *ui
offset =3D addr & PAGE_MASK;
tlen =3D MIN(PAGE_SIZE - offset, len);
=20
- if ((vobj =3D=3D NULL) ||
- (vobj->resident_page_count =3D=3D 0 && vobj->cache =3D=3D NULL)) {
- vpg =3D NULL;
- goto nocache;
- }
-
- VM_OBJECT_LOCK(vobj);
-lookupvpg:
- if (((vpg =3D vm_page_lookup(vobj, idx)) !=3D NULL) &&
- vm_page_is_valid(vpg, offset, tlen)) {
- if ((vpg->oflags & VPO_BUSY) !=3D 0) {
- /*
- * Reference the page before unlocking and sleeping so
- * that the page daemon is less likely to reclaim it. =20
- */
- vm_page_lock_queues();
- vm_page_flag_set(vpg, PG_REFERENCED);
- vm_page_sleep(vpg, "tmfsmw");
- goto lookupvpg;
- }
- vm_page_busy(vpg);
- vm_page_undirty(vpg);
- VM_OBJECT_UNLOCK(vobj);
- error =3D uiomove_fromphys(&vpg, offset, tlen, uio);
- } else {
- if (__predict_false(vobj->cache !=3D NULL))
- vm_page_cache_free(vobj, idx, idx + 1);
- VM_OBJECT_UNLOCK(vobj);
- vpg =3D NULL;
- }
-nocache:
VM_OBJECT_LOCK(tobj);
vm_object_pip_add(tobj, 1);
tpg =3D vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
@@ -671,23 +547,18 @@ nocache:
if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
error =3D vm_pager_get_pages(tobj, &tpg, 1, 0);
if (error !=3D 0) {
+ vm_page_wakeup(tpg);
printf("tmpfs get pages from pager error [write]\n");
goto out;
}
} else
vm_page_zero_invalid(tpg, TRUE);
}
+ vm_page_wakeup(tpg);
VM_OBJECT_UNLOCK(tobj);
- if (vpg =3D=3D NULL)
- error =3D uiomove_fromphys(&tpg, offset, tlen, uio);
- else {
- KASSERT(vpg->valid =3D=3D VM_PAGE_BITS_ALL, ("parts of vpg invalid"));
- pmap_copy_page(vpg, tpg);
- }
+ error =3D uiomove_fromphys(&tpg, offset, tlen, uio);
VM_OBJECT_LOCK(tobj);
out:
- if (vobj !=3D NULL)
- VM_OBJECT_LOCK(vobj);
if (error =3D=3D 0) {
KASSERT(tpg->valid =3D=3D VM_PAGE_BITS_ALL,
("parts of tpg invalid"));
@@ -696,11 +567,6 @@ out:
vm_page_lock(tpg);
vm_page_unwire(tpg, TRUE);
vm_page_unlock(tpg);
- vm_page_wakeup(tpg);
- if (vpg !=3D NULL)
- vm_page_wakeup(vpg);
- if (vobj !=3D NULL)
- VM_OBJECT_UNLOCK(vobj);
vm_object_pip_subtract(tobj, 1);
VM_OBJECT_UNLOCK(tobj);
=20
@@ -759,7 +625,7 @@ tmpfs_write(struct vop_write_args *v)
len =3D MIN(node->tn_size - uio->uio_offset, resid);
if (len =3D=3D 0)
break;
- error =3D tmpfs_mappedwrite(vp->v_object, uobj, len, uio);
+ error =3D tmpfs_mappedwrite(uobj, len, uio);
if ((error !=3D 0) || (resid =3D=3D uio->uio_resid))
break;
}
@@ -1425,7 +1291,7 @@ tmpfs_reclaim(struct vop_reclaim_args *v)
node =3D VP_TO_TMPFS_NODE(vp);
tmp =3D VFS_TO_TMPFS(vp->v_mount);
=20
- vnode_destroy_vobject(vp);
+ vp->v_object =3D NULL;
cache_purge(vp);
=20
TMPFS_NODE_LOCK(node);
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index c48e0f5..754092f 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -447,7 +447,7 @@ __elfN(load_section)(struct vmspace *vmspace,
* While I'm here, might as well check for something else that
* is invalid: filsz cannot be greater than memsz.
*/
- if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
+ if ((off_t)filsz + offset > object->vnp_size ||
filsz > memsz) {
uprintf("elf_load_section: truncated ELF file\n");
return (ENOEXEC);
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index adcb852..ee80b3e 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -2033,12 +2033,12 @@ retry_space:
*/
pgoff =3D (vm_offset_t)(off & PAGE_MASK);
xfsize =3D omin(PAGE_SIZE - pgoff,
- obj->un_pager.vnp.vnp_size - uap->offset -
+ obj->vnp_size - uap->offset -
fsbytes - loopbytes);
if (uap->nbytes)
rem =3D (uap->nbytes - fsbytes - loopbytes);
else
- rem =3D obj->un_pager.vnp.vnp_size -
+ rem =3D obj->vnp_size -
uap->offset - fsbytes - loopbytes;
xfsize =3D omin(rem, xfsize);
xfsize =3D omin(space - loopbytes, xfsize);
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 3d72123..ff06892 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -1222,7 +1222,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
error =3D EINVAL;
goto done;
}
- if (obj->handle !=3D vp) {
+ if (obj->type =3D=3D OBJT_VNODE && obj->handle !=3D vp) {
vput(vp);
vp =3D (struct vnode*)obj->handle;
vget(vp, LK_SHARED, td);
@@ -1261,7 +1261,14 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
objsize =3D round_page(va.va_size);
if (va.va_nlink =3D=3D 0)
flags |=3D MAP_NOSYNC;
- obj =3D vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucr=
ed);
+ if (obj->type =3D=3D OBJT_VNODE)
+ obj =3D vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
+ td->td_ucred);
+ else {
+ KASSERT(obj->type =3D=3D OBJT_DEFAULT || obj->type =3D=3D OBJT_SWAP,
+ ("wrong object type"));
+ vm_object_reference(obj);
+ }
if (obj =3D=3D NULL) {
error =3D ENOMEM;
goto done;
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 6a9f129..0120d32 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -106,15 +106,6 @@ struct vm_object {
void *handle;
union {
/*
- * VNode pager
- *
- * vnp_size - current size of file
- */
- struct {
- off_t vnp_size;
- } vnp;
-
- /*
* Device pager
*
* devp_pglist - list of allocated pages
@@ -145,6 +136,7 @@ struct vm_object {
} un_pager;
struct uidinfo *uip;
vm_ooffset_t charge;
+ off_t vnp_size; /* current size of file for vnode pager */
};
=20
/*
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index f497d41..a1cfc01 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -212,8 +212,7 @@ retry:
msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0);
}
=20
- if (vp->v_usecount =3D=3D 0)
- panic("vnode_pager_alloc: no vnode reference");
+ KASSERT(vp->v_usecount !=3D 0, ("vnode_pager_alloc: no vnode reference"));
=20
if (object =3D=3D NULL) {
/*
@@ -221,7 +220,7 @@ retry:
*/
object =3D vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));
=20
- object->un_pager.vnp.vnp_size =3D size;
+ object->vnp_size =3D size;
=20
object->handle =3D handle;
VI_LOCK(vp);
@@ -301,7 +300,7 @@ vnode_pager_haspage(object, pindex, before, after)
* If the offset is beyond end of file we do
* not have the page.
*/
- if (IDX_TO_OFF(pindex) >=3D object->un_pager.vnp.vnp_size)
+ if (IDX_TO_OFF(pindex) >=3D object->vnp_size)
return FALSE;
=20
bsize =3D vp->v_mount->mnt_stat.f_iosize;
@@ -333,9 +332,8 @@ vnode_pager_haspage(object, pindex, before, after)
*after *=3D pagesperblock;
numafter =3D pagesperblock - (poff + 1);
if (IDX_TO_OFF(pindex + numafter) >
- object->un_pager.vnp.vnp_size) {
- numafter =3D
- OFF_TO_IDX(object->un_pager.vnp.vnp_size) -
+ object->vnp_size) {
+ numafter =3D OFF_TO_IDX(object->vnp_size) -
pindex;
}
*after +=3D numafter;
@@ -369,11 +367,11 @@ vnode_pager_setsize(vp, nsize)
vm_page_t m;
vm_pindex_t nobjsize;
=20
- if ((object =3D vp->v_object) =3D=3D NULL)
+ if ((object =3D vp->v_object) =3D=3D NULL || object->type !=3D OBJT_VNODE)
return;
/* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */
VM_OBJECT_LOCK(object);
- if (nsize =3D=3D object->un_pager.vnp.vnp_size) {
+ if (nsize =3D=3D object->vnp_size) {
/*
* Hasn't changed size
*/
@@ -381,7 +379,7 @@ vnode_pager_setsize(vp, nsize)
return;
}
nobjsize =3D OFF_TO_IDX(nsize + PAGE_MASK);
- if (nsize < object->un_pager.vnp.vnp_size) {
+ if (nsize < object->vnp_size) {
/*
* File has shrunk. Toss any cached pages beyond the new EOF.
*/
@@ -436,7 +434,7 @@ vnode_pager_setsize(vp, nsize)
nobjsize);
}
}
- object->un_pager.vnp.vnp_size =3D nsize;
+ object->vnp_size =3D nsize;
object->size =3D nobjsize;
VM_OBJECT_UNLOCK(object);
}
@@ -513,7 +511,7 @@ vnode_pager_input_smlfs(object, m)
continue;
=20
address =3D IDX_TO_OFF(m->pindex) + i * bsize;
- if (address >=3D object->un_pager.vnp.vnp_size) {
+ if (address >=3D object->vnp_size) {
fileaddr =3D -1;
} else {
error =3D vnode_pager_addr(vp, address, &fileaddr, NULL);
@@ -590,12 +588,12 @@ vnode_pager_input_old(object, m)
/*
* Return failure if beyond current EOF
*/
- if (IDX_TO_OFF(m->pindex) >=3D object->un_pager.vnp.vnp_size) {
+ if (IDX_TO_OFF(m->pindex) >=3D object->vnp_size) {
return VM_PAGER_BAD;
} else {
size =3D PAGE_SIZE;
- if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
- size =3D object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
+ if (IDX_TO_OFF(m->pindex) + size > object->vnp_size)
+ size =3D object->vnp_size - IDX_TO_OFF(m->pindex);
vp =3D object->handle;
VM_OBJECT_UNLOCK(object);
=20
@@ -815,13 +813,13 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpag=
e)
}
if (firstaddr =3D=3D -1) {
VM_OBJECT_LOCK(object);
- if (i =3D=3D reqpage && foff < object->un_pager.vnp.vnp_size) {
+ if (i =3D=3D reqpage && foff < object->vnp_size) {
panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, =
foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
(intmax_t)firstaddr, (uintmax_t)(foff >> 32),
(uintmax_t)foff,
(uintmax_t)
- (object->un_pager.vnp.vnp_size >> 32),
- (uintmax_t)object->un_pager.vnp.vnp_size);
+ (object->vnp_size >> 32),
+ (uintmax_t)object->vnp_size);
}
vm_page_lock(m[i]);
vm_page_free(m[i]);
@@ -876,8 +874,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
size =3D count * PAGE_SIZE;
KASSERT(count > 0, ("zero count"));
- if ((foff + size) > object->un_pager.vnp.vnp_size)
- size =3D object->un_pager.vnp.vnp_size - foff;
+ if ((foff + size) > object->vnp_size)
+ size =3D object->vnp_size - foff;
KASSERT(size > 0, ("zero size"));
=20
/*
@@ -944,7 +942,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
nextoff =3D tfoff + PAGE_SIZE;
mt =3D m[i];
=20
- if (nextoff <=3D object->un_pager.vnp.vnp_size) {
+ if (nextoff <=3D object->vnp_size) {
/*
* Read filled up entire page.
*/
@@ -964,9 +962,9 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* read.
*/
vm_page_set_valid(mt, 0,
- object->un_pager.vnp.vnp_size - tfoff);
+ object->vnp_size - tfoff);
KASSERT((mt->dirty & vm_page_bits(0,
- object->un_pager.vnp.vnp_size - tfoff)) =3D=3D 0,
+ object->vnp_size - tfoff)) =3D=3D 0,
("vnode_pager_generic_getpages: page %p is dirty",
mt));
}
@@ -1116,11 +1114,11 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_p=
age_t *ma, int bytecount,
* this will screw up bogus page replacement.
*/
VM_OBJECT_LOCK(object);
- if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
- if (object->un_pager.vnp.vnp_size > poffset) {
+ if (maxsize + poffset > object->vnp_size) {
+ if (object->vnp_size > poffset) {
int pgoff;
=20
- maxsize =3D object->un_pager.vnp.vnp_size - poffset;
+ maxsize =3D object->vnp_size - poffset;
ncount =3D btoc(maxsize);
if ((pgoff =3D (int)maxsize & PAGE_MASK) !=3D 0) {
/*
--zd5GkkQQtETumrwc
Content-Type: application/pgp-signature
Content-Disposition: inline
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.10 (FreeBSD)
iEYEARECAAYFAkwfYfEACgkQC3+MBN1Mb4hOPACg3aznl4eTBeO3QOCKEFZsRsSO
5kwAniuYlwXBbxmU8wHXsLsweO8LTGwV
=8WDX
-----END PGP SIGNATURE-----
--zd5GkkQQtETumrwc--
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20100621125825.GG13238>
