Date: Thu, 19 Feb 2009 08:25:29 +0000 (UTC) From: Alan Cox <alc@FreeBSD.org> To: src-committers@freebsd.org, svn-src-user@freebsd.org Subject: svn commit: r188811 - in user/alc/pagelock/sys: amd64/amd64 kern vm Message-ID: <200902190825.n1J8PTYF090171@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: alc Date: Thu Feb 19 08:25:29 2009 New Revision: 188811 URL: http://svn.freebsd.org/changeset/base/188811 Log: Replace PG_WRITEABLE by VPO_WRITEABLE. Eliminate unnecessary page queues locking. Modified: user/alc/pagelock/sys/amd64/amd64/pmap.c user/alc/pagelock/sys/kern/vfs_bio.c user/alc/pagelock/sys/vm/vm_page.c user/alc/pagelock/sys/vm/vm_page.h user/alc/pagelock/sys/vm/vm_pageout.c Modified: user/alc/pagelock/sys/amd64/amd64/pmap.c ============================================================================== --- user/alc/pagelock/sys/amd64/amd64/pmap.c Thu Feb 19 06:25:29 2009 (r188810) +++ user/alc/pagelock/sys/amd64/amd64/pmap.c Thu Feb 19 08:25:29 2009 (r188811) @@ -1471,11 +1471,9 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (flags & M_WAITOK) { PMAP_UNLOCK(pmap); - vm_page_unlock_queues(); vm_object_unlock_all(object); VM_WAIT; vm_object_lock_all(object); - vm_page_lock_queues(); PMAP_LOCK(pmap); } @@ -1907,7 +1905,7 @@ pmap_collect(pmap_t locked_pmap, struct if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + m->oflags &= ~VPO_WRITEABLE; /* XXX */ } free_pv_entry(pmap, pv); if (pmap != locked_pmap) @@ -1949,7 +1947,8 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); - vm_page_unwire(m, 0); + m->wire_count--; + atomic_subtract_int(&cnt.v_wire_count, 1); vm_page_free(m); } @@ -2164,7 +2163,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + m->oflags &= ~VPO_WRITEABLE; } } @@ -2375,8 +2374,10 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t m->oflags |= VPO_REFERENCED; } if (TAILQ_EMPTY(&m->md.pv_list) && - TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + TAILQ_EMPTY(&pvh->pv_list)) { + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + m->oflags &= ~VPO_WRITEABLE; + } } } if (pmap == kernel_pmap) { @@ -2473,7 +2474,6 @@ pmap_remove(pmap_t pmap, vm_offset_t sva anyvalid = 0; - vm_page_lock_queues(); PMAP_LOCK(pmap); /* @@ -2576,7 +2576,6 @@ pmap_remove(pmap_t pmap, vm_offset_t sva out: if (anyvalid) pmap_invalidate_all(pmap); - vm_page_unlock_queues(); PMAP_UNLOCK(pmap); pmap_free_zero_pages(free); } @@ -2644,7 +2643,7 @@ pmap_remove_all(vm_page_t m) free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + m->oflags &= ~VPO_WRITEABLE; } /* @@ -2967,7 +2966,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, mpte = NULL; - vm_page_lock_queues(); PMAP_LOCK(pmap); /* @@ -3065,7 +3063,8 @@ validate: newpte = (pt_entry_t)(pa | PG_V); if ((prot & VM_PROT_WRITE) != 0) { newpte |= PG_RW; - vm_page_flag_set(m, PG_WRITEABLE); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + m->oflags |= VPO_WRITEABLE; } if ((prot & VM_PROT_EXECUTE) == 0) newpte |= pg_nx; @@ -3117,7 +3116,6 @@ validate: pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0) pmap_promote_pde(pmap, pde, va); - vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } @@ -3913,7 +3911,7 @@ restart: if (TAILQ_EMPTY(&pvh->pv_list)) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) if (TAILQ_EMPTY(&mt->md.pv_list)) - vm_page_flag_clear(mt, PG_WRITEABLE); + mt->oflags &= ~VPO_WRITEABLE; } mpte = pmap_lookup_pt_page(pmap, pv->pv_va); if (mpte != NULL) { @@ -3932,7 +3930,7 @@ restart: if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + m->oflags &= ~VPO_WRITEABLE; } pmap_unuse_pt(pmap, pv->pv_va, *pde, &free); } @@ -3949,7 +3947,8 @@ restart: TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); - vm_page_unwire(m, 0); + m->wire_count--; + atomic_subtract_int(&cnt.v_wire_count, 1); vm_page_free(m); } } @@ -4041,7 +4040,7 @@ pmap_remove_write(vm_page_t m) vm_offset_t va; if ((m->flags & PG_FICTITIOUS) != 0 || - (m->flags & PG_WRITEABLE) == 0) + (m->oflags & VPO_WRITEABLE) == 0) return; VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -4073,7 +4072,7 @@ retry: } PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + m->oflags &= ~VPO_WRITEABLE; } /* Modified: user/alc/pagelock/sys/kern/vfs_bio.c ============================================================================== --- user/alc/pagelock/sys/kern/vfs_bio.c Thu Feb 19 06:25:29 2009 (r188810) +++ user/alc/pagelock/sys/kern/vfs_bio.c Thu Feb 19 08:25:29 2009 (r188811) @@ -1287,9 +1287,7 @@ brelse(struct buf *bp) (PAGE_SIZE - poffset) : resid; KASSERT(presid >= 0, ("brelse: extra page")); - vm_page_lock_queues(); vm_page_set_invalid(m, poffset, presid); - vm_page_unlock_queues(); if (had_bogus) printf("avoided corruption bug in bogus_page/brelse code\n"); } @@ -3393,7 +3391,6 @@ retry: goto retry; } bogus = 0; - vm_page_lock_queues(); for (i = 0; i < bp->b_npages; i++) { m = bp->b_pages[i]; @@ -3426,7 +3423,6 @@ retry: } foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(obj); if (bogus) pmap_qenter(trunc_page((vm_offset_t)bp->b_data), Modified: user/alc/pagelock/sys/vm/vm_page.c ============================================================================== --- user/alc/pagelock/sys/vm/vm_page.c Thu Feb 19 06:25:29 2009 (r188810) +++ user/alc/pagelock/sys/vm/vm_page.c Thu Feb 19 08:25:29 2009 (r188811) @@ -699,7 +699,7 @@ vm_page_insert(vm_page_t m, vm_object_t * Since we are inserting a new and possibly dirty page, * update the object's OBJ_MIGHTBEDIRTY flag. */ - if (m->flags & PG_WRITEABLE) + if (m->oflags & VPO_WRITEABLE) vm_object_set_writeable_dirty(object); } @@ -1949,7 +1949,6 @@ vm_page_set_invalid(vm_page_t m, int bas VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); bits = vm_page_bits(base, size); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->valid == VM_PAGE_BITS_ALL && bits != 0) pmap_remove_all(m); m->valid &= ~bits; Modified: user/alc/pagelock/sys/vm/vm_page.h ============================================================================== --- user/alc/pagelock/sys/vm/vm_page.h Thu Feb 19 06:25:29 2009 (r188810) +++ user/alc/pagelock/sys/vm/vm_page.h Thu Feb 19 08:25:29 2009 (r188811) @@ -146,6 +146,7 @@ struct vm_page { */ #define VPO_BUSY 0x0001 /* page is in transit */ #define VPO_WANTED 0x0002 /* someone is waiting for page */ +#define VPO_WRITEABLE 0x0010 /* page is mapped writeable */ #define VPO_REFERENCED 0x0080 /* page has been referenced */ #define VPO_CLEANCHK 0x0100 /* page will be checked for cleaning */ #define VPO_SWAPINPROG 0x0200 /* swap I/O in progress on page */ @@ -191,7 +192,6 @@ extern struct mtx vm_page_queue_free_mtx #define PG_FREE 0x0002 /* page is free */ #define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */ #define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */ -#define PG_WRITEABLE 0x0010 /* page is mapped writeable */ #define PG_ZERO 0x0040 /* page is zeroed */ #define PG_UNMANAGED 0x0800 /* No PV management for page */ #define PG_MARKER 0x1000 /* special queue marker page */ Modified: user/alc/pagelock/sys/vm/vm_pageout.c ============================================================================== --- user/alc/pagelock/sys/vm/vm_pageout.c Thu Feb 19 06:25:29 2009 (r188810) +++ user/alc/pagelock/sys/vm/vm_pageout.c Thu Feb 19 08:25:29 2009 (r188811) @@ -449,7 +449,7 @@ vm_pageout_flush(vm_page_t *mc, int coun vm_page_t mt = mc[i]; KASSERT(pageout_status[i] == VM_PAGER_PEND || - (mt->flags & PG_WRITEABLE) == 0, + (mt->oflags & VPO_WRITEABLE) == 0, ("vm_pageout_flush: page %p is not write protected", mt)); switch (pageout_status[i]) { case VM_PAGER_OK: @@ -840,7 +840,7 @@ rescan0: * to the page, removing all access will be cheaper * overall. */ - if ((m->flags & PG_WRITEABLE) != 0) + if ((m->oflags & VPO_WRITEABLE) != 0) pmap_remove_all(m); } else { vm_page_dirty(m);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200902190825.n1J8PTYF090171>