Date: Tue, 27 Apr 2010 05:39:13 +0000 (UTC) From: Kip Macy <kmacy@FreeBSD.org> To: src-committers@freebsd.org, svn-src-user@freebsd.org Subject: svn commit: r207264 - in user/kmacy/head_page_lock_incr/sys: amd64/amd64 amd64/include arm/arm arm/include dev/drm i386/i386 i386/include kern mips/include mips/mips net powerpc/aim powerpc/booke p... Message-ID: <201004270539.o3R5dD0S014618@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: kmacy Date: Tue Apr 27 05:39:13 2010 New Revision: 207264 URL: http://svn.freebsd.org/changeset/base/207264 Log: As it stands now, with the exception of hold_count, all vm_page modification acquires both the page queue lock and the appropriate page lock. The assumption being that over time we can incrementally wean ourselves from the page queue mutex where possible. The next field to rely strictly on page_lock will be wire_count. This currently appears to work ok on normal compile workloads in addition to stress2's swap. Modified: user/kmacy/head_page_lock_incr/sys/amd64/amd64/pmap.c user/kmacy/head_page_lock_incr/sys/amd64/include/pmap.h user/kmacy/head_page_lock_incr/sys/amd64/include/vmparam.h user/kmacy/head_page_lock_incr/sys/arm/arm/pmap.c user/kmacy/head_page_lock_incr/sys/arm/include/pmap.h user/kmacy/head_page_lock_incr/sys/dev/drm/via_dmablit.c user/kmacy/head_page_lock_incr/sys/i386/i386/pmap.c user/kmacy/head_page_lock_incr/sys/i386/include/pmap.h user/kmacy/head_page_lock_incr/sys/kern/kern_exec.c user/kmacy/head_page_lock_incr/sys/kern/subr_witness.c user/kmacy/head_page_lock_incr/sys/kern/sys_pipe.c user/kmacy/head_page_lock_incr/sys/kern/sys_process.c user/kmacy/head_page_lock_incr/sys/kern/uipc_cow.c user/kmacy/head_page_lock_incr/sys/kern/vfs_bio.c user/kmacy/head_page_lock_incr/sys/mips/include/pmap.h user/kmacy/head_page_lock_incr/sys/mips/mips/pmap.c user/kmacy/head_page_lock_incr/sys/net/bpf_zerocopy.c user/kmacy/head_page_lock_incr/sys/powerpc/aim/mmu_oea.c user/kmacy/head_page_lock_incr/sys/powerpc/aim/mmu_oea64.c user/kmacy/head_page_lock_incr/sys/powerpc/booke/pmap.c user/kmacy/head_page_lock_incr/sys/powerpc/include/pmap.h user/kmacy/head_page_lock_incr/sys/sparc64/include/pmap.h user/kmacy/head_page_lock_incr/sys/sparc64/sparc64/pmap.c user/kmacy/head_page_lock_incr/sys/sun4v/include/pmap.h user/kmacy/head_page_lock_incr/sys/sun4v/sun4v/pmap.c user/kmacy/head_page_lock_incr/sys/vm/device_pager.c user/kmacy/head_page_lock_incr/sys/vm/sg_pager.c user/kmacy/head_page_lock_incr/sys/vm/swap_pager.c user/kmacy/head_page_lock_incr/sys/vm/uma_core.c user/kmacy/head_page_lock_incr/sys/vm/vm_contig.c user/kmacy/head_page_lock_incr/sys/vm/vm_fault.c user/kmacy/head_page_lock_incr/sys/vm/vm_glue.c user/kmacy/head_page_lock_incr/sys/vm/vm_kern.c user/kmacy/head_page_lock_incr/sys/vm/vm_mmap.c user/kmacy/head_page_lock_incr/sys/vm/vm_object.c user/kmacy/head_page_lock_incr/sys/vm/vm_page.c user/kmacy/head_page_lock_incr/sys/vm/vm_page.h user/kmacy/head_page_lock_incr/sys/vm/vm_pageout.c user/kmacy/head_page_lock_incr/sys/vm/vm_param.h user/kmacy/head_page_lock_incr/sys/vm/vnode_pager.c Modified: user/kmacy/head_page_lock_incr/sys/amd64/amd64/pmap.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/amd64/amd64/pmap.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/amd64/amd64/pmap.c Tue Apr 27 05:39:13 2010 (r207264) @@ -793,7 +793,6 @@ static u_long pmap_pdpe_demotions; SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD, &pmap_pdpe_demotions, 0, "1GB page demotions"); - /*************************************************** * Low level helper routines..... ***************************************************/ @@ -1201,14 +1200,19 @@ pmap_extract_and_hold(pmap_t pmap, vm_of pd_entry_t pde, *pdep; pt_entry_t pte; vm_page_t m; + vm_paddr_t pa; + pa = 0; m = NULL; - vm_page_lock_queues(); PMAP_LOCK(pmap); +retry: pdep = pmap_pde(pmap, va); if (pdep != NULL && (pde = *pdep)) { if (pde & PG_PS) { if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { + if (vm_page_pa_tryrelock(pmap, (pde & PG_PS_FRAME) | + (va & PDRMASK), &pa)) + goto retry; m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK)); vm_page_hold(m); @@ -1217,12 +1221,14 @@ pmap_extract_and_hold(pmap_t pmap, vm_of pte = *pmap_pde_to_pte(pdep, va); if ((pte & PG_V) && ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, &pa)) + goto retry; m = PHYS_TO_VM_PAGE(pte & PG_FRAME); vm_page_hold(m); } } } - vm_page_unlock_queues(); + PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } @@ -3143,9 +3149,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, * In the case that a page table page is not * resident, we are creating it here. */ - if (va < VM_MAXUSER_ADDRESS) { + if (va < VM_MAXUSER_ADDRESS) mpte = pmap_allocpte(pmap, va, M_WAITOK); - } pde = pmap_pde(pmap, va); if (pde != NULL && (*pde & PG_V) != 0) { @@ -3393,7 +3398,7 @@ pmap_enter_object(pmap_t pmap, vm_offset mpte); m = TAILQ_NEXT(m, listq); } - PMAP_UNLOCK(pmap); + PMAP_UNLOCK(pmap); } /* Modified: user/kmacy/head_page_lock_incr/sys/amd64/include/pmap.h ============================================================================== --- user/kmacy/head_page_lock_incr/sys/amd64/include/pmap.h Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/amd64/include/pmap.h Tue Apr 27 05:39:13 2010 (r207264) @@ -245,6 +245,8 @@ struct pmap { pml4_entry_t *pm_pml4; /* KVA of level 4 page table */ TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ u_int pm_active; /* active on cpus */ + uint32_t pm_gen_count; /* generation count (pmap lock dropped) */ + u_int pm_retries; /* spare u_int here due to padding */ struct pmap_statistics pm_stats; /* pmap statistics */ vm_page_t pm_root; /* spare page table pages */ Modified: user/kmacy/head_page_lock_incr/sys/amd64/include/vmparam.h ============================================================================== --- user/kmacy/head_page_lock_incr/sys/amd64/include/vmparam.h Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/amd64/include/vmparam.h Tue Apr 27 05:39:13 2010 (r207264) @@ -145,6 +145,10 @@ #define VM_LEVEL_0_ORDER 9 #endif +#ifdef SMP +#define PA_LOCK_COUNT 256 +#endif + /* * Virtual addresses of things. Derived from the page directory and * page table indexes from pmap.h for precision. Modified: user/kmacy/head_page_lock_incr/sys/arm/arm/pmap.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/arm/arm/pmap.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/arm/arm/pmap.c Tue Apr 27 05:39:13 2010 (r207264) @@ -3740,13 +3740,14 @@ pmap_extract_and_hold(pmap_t pmap, vm_of struct l2_dtable *l2; pd_entry_t l1pd; pt_entry_t *ptep, pte; - vm_paddr_t pa; + vm_paddr_t pa, paddr; vm_page_t m = NULL; u_int l1idx; l1idx = L1_IDX(va); + paddr = 0; - vm_page_lock_queues(); PMAP_LOCK(pmap); +retry: l1pd = pmap->pm_l1->l1_kva[l1idx]; if (l1pte_section_p(l1pd)) { /* @@ -3758,6 +3759,8 @@ pmap_extract_and_hold(pmap_t pmap, vm_of pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); else pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); + if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) + goto retry; if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { m = PHYS_TO_VM_PAGE(pa); vm_page_hold(m); @@ -3774,7 +3777,6 @@ pmap_extract_and_hold(pmap_t pmap, vm_of if (l2 == NULL || (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { PMAP_UNLOCK(pmap); - vm_page_unlock_queues(); return (NULL); } @@ -3783,7 +3785,6 @@ pmap_extract_and_hold(pmap_t pmap, vm_of if (pte == 0) { PMAP_UNLOCK(pmap); - vm_page_unlock_queues(); return (NULL); } if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { @@ -3796,13 +3797,15 @@ pmap_extract_and_hold(pmap_t pmap, vm_of pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); break; } + if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) + goto retry; m = PHYS_TO_VM_PAGE(pa); vm_page_hold(m); } } PMAP_UNLOCK(pmap); - vm_page_unlock_queues(); + PA_UNLOCK_COND(paddr); return (m); } Modified: user/kmacy/head_page_lock_incr/sys/arm/include/pmap.h ============================================================================== --- user/kmacy/head_page_lock_incr/sys/arm/include/pmap.h Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/arm/include/pmap.h Tue Apr 27 05:39:13 2010 (r207264) @@ -134,6 +134,8 @@ struct pmap { struct l1_ttable *pm_l1; struct l2_dtable *pm_l2[L2_SIZE]; pd_entry_t *pm_pdir; /* KVA of page directory */ + uint32_t pm_gen_count; /* generation count (pmap lock dropped) */ + u_int pm_retries; int pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statictics */ TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ Modified: user/kmacy/head_page_lock_incr/sys/dev/drm/via_dmablit.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/dev/drm/via_dmablit.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/dev/drm/via_dmablit.c Tue Apr 27 05:39:13 2010 (r207264) @@ -248,10 +248,12 @@ via_lock_all_dma_pages(drm_via_sg_info_t (vm_offset_t)xfer->mem_addr + IDX_TO_OFF(i), VM_PROT_RW); if (m == NULL) break; + vm_page_lock(m); vm_page_lock_queues(); vm_page_wire(m); vm_page_unhold(m); vm_page_unlock_queues(); + vm_page_unlock(m); vsg->pages[i] = m; } vsg->state = dr_via_pages_locked; Modified: user/kmacy/head_page_lock_incr/sys/i386/i386/pmap.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/i386/i386/pmap.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/i386/i386/pmap.c Tue Apr 27 05:39:13 2010 (r207264) @@ -1346,14 +1346,19 @@ pmap_extract_and_hold(pmap_t pmap, vm_of pd_entry_t pde; pt_entry_t pte; vm_page_t m; + vm_paddr_t pa; + pa = 0; m = NULL; - vm_page_lock_queues(); PMAP_LOCK(pmap); +retry: pde = *pmap_pde(pmap, va); if (pde != 0) { if (pde & PG_PS) { if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { + if (vm_page_pa_tryrelock(pmap, (pde & PG_PS_FRAME) | + (va & PDRMASK), &pa)) + goto retry; m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK)); vm_page_hold(m); @@ -1363,13 +1368,15 @@ pmap_extract_and_hold(pmap_t pmap, vm_of pte = *pmap_pte_quick(pmap, va); if (pte != 0 && ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, &pa)) + goto retry; m = PHYS_TO_VM_PAGE(pte & PG_FRAME); vm_page_hold(m); } sched_unpin(); } } - vm_page_unlock_queues(); + PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } Modified: user/kmacy/head_page_lock_incr/sys/i386/include/pmap.h ============================================================================== --- user/kmacy/head_page_lock_incr/sys/i386/include/pmap.h Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/i386/include/pmap.h Tue Apr 27 05:39:13 2010 (r207264) @@ -420,11 +420,14 @@ struct pmap { u_int pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statistics */ LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ + uint32_t pm_gen_count; /* generation count (pmap lock dropped) */ + u_int pm_retries; #ifdef PAE pdpt_entry_t *pm_pdpt; /* KVA of page director pointer table */ #endif vm_page_t pm_root; /* spare page table pages */ + }; typedef struct pmap *pmap_t; Modified: user/kmacy/head_page_lock_incr/sys/kern/kern_exec.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/kern/kern_exec.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/kern/kern_exec.c Tue Apr 27 05:39:13 2010 (r207264) @@ -957,9 +957,9 @@ exec_map_first_page(imgp) return (EIO); } } - vm_page_lock_queues(); + vm_page_lock(ma[0]); vm_page_hold(ma[0]); - vm_page_unlock_queues(); + vm_page_unlock(ma[0]); vm_page_wakeup(ma[0]); VM_OBJECT_UNLOCK(object); @@ -979,9 +979,9 @@ exec_unmap_first_page(imgp) m = sf_buf_page(imgp->firstpage); sf_buf_free(imgp->firstpage); imgp->firstpage = NULL; - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unhold(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } } Modified: user/kmacy/head_page_lock_incr/sys/kern/subr_witness.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/kern/subr_witness.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/kern/subr_witness.c Tue Apr 27 05:39:13 2010 (r207264) @@ -597,6 +597,15 @@ static struct witness_order_list_entry o { "cdev", &lock_class_mtx_sleep }, { NULL, NULL }, /* + * VM + * + */ + { "vm object", &lock_class_mtx_sleep }, + { "page lock", &lock_class_mtx_sleep }, + { "vm page queue mutex", &lock_class_mtx_sleep }, + { "pmap", &lock_class_mtx_sleep }, + { NULL, NULL }, + /* * kqueue/VFS interaction */ { "kqueue", &lock_class_mtx_sleep }, Modified: user/kmacy/head_page_lock_incr/sys/kern/sys_pipe.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/kern/sys_pipe.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/kern/sys_pipe.c Tue Apr 27 05:39:13 2010 (r207264) @@ -773,10 +773,12 @@ pipe_build_write_buffer(wpipe, uio) */ race: if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) { - vm_page_lock_queues(); - for (j = 0; j < i; j++) + + for (j = 0; j < i; j++) { + vm_page_lock(wpipe->pipe_map.ms[j]); vm_page_unhold(wpipe->pipe_map.ms[j]); - vm_page_unlock_queues(); + vm_page_unlock(wpipe->pipe_map.ms[j]); + } return (EFAULT); } wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr, @@ -816,11 +818,11 @@ pipe_destroy_write_buffer(wpipe) int i; PIPE_LOCK_ASSERT(wpipe, MA_OWNED); - vm_page_lock_queues(); for (i = 0; i < wpipe->pipe_map.npages; i++) { + vm_page_lock(wpipe->pipe_map.ms[i]); vm_page_unhold(wpipe->pipe_map.ms[i]); + vm_page_unlock(wpipe->pipe_map.ms[i]); } - vm_page_unlock_queues(); wpipe->pipe_map.npages = 0; } Modified: user/kmacy/head_page_lock_incr/sys/kern/sys_process.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/kern/sys_process.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/kern/sys_process.c Tue Apr 27 05:39:13 2010 (r207264) @@ -349,9 +349,9 @@ proc_rwmem(struct proc *p, struct uio *u /* * Release the page. */ - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unhold(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } while (error == 0 && uio->uio_resid > 0); Modified: user/kmacy/head_page_lock_incr/sys/kern/uipc_cow.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/kern/uipc_cow.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/kern/uipc_cow.c Tue Apr 27 05:39:13 2010 (r207264) @@ -128,10 +128,12 @@ socow_setup(struct mbuf *m0, struct uio /* * set up COW */ + vm_page_lock(pp); vm_page_lock_queues(); if (vm_page_cowsetup(pp) != 0) { vm_page_unhold(pp); vm_page_unlock_queues(); + vm_page_unlock(pp); return (0); } @@ -141,7 +143,7 @@ socow_setup(struct mbuf *m0, struct uio vm_page_wire(pp); vm_page_unhold(pp); vm_page_unlock_queues(); - + vm_page_unlock(pp); /* * Allocate an sf buf */ Modified: user/kmacy/head_page_lock_incr/sys/kern/vfs_bio.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/kern/vfs_bio.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/kern/vfs_bio.c Tue Apr 27 05:39:13 2010 (r207264) @@ -3860,12 +3860,12 @@ vmapbuf(struct buf *bp) retry: if (vm_fault_quick(addr >= bp->b_data ? addr : bp->b_data, prot) < 0) { - vm_page_lock_queues(); for (i = 0; i < pidx; ++i) { + vm_page_lock(bp->b_pages[i]); vm_page_unhold(bp->b_pages[i]); + vm_page_unlock(bp->b_pages[i]); bp->b_pages[i] = NULL; } - vm_page_unlock_queues(); return(-1); } m = pmap_extract_and_hold(pmap, (vm_offset_t)addr, prot); @@ -3896,11 +3896,12 @@ vunmapbuf(struct buf *bp) npages = bp->b_npages; pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); - vm_page_lock_queues(); - for (pidx = 0; pidx < npages; pidx++) + for (pidx = 0; pidx < npages; pidx++) { + vm_page_lock(bp->b_pages[pidx]); vm_page_unhold(bp->b_pages[pidx]); - vm_page_unlock_queues(); - + vm_page_unlock(bp->b_pages[pidx]); + } + bp->b_data = bp->b_saveaddr; } Modified: user/kmacy/head_page_lock_incr/sys/mips/include/pmap.h ============================================================================== --- user/kmacy/head_page_lock_incr/sys/mips/include/pmap.h Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/mips/include/pmap.h Tue Apr 27 05:39:13 2010 (r207264) @@ -88,6 +88,8 @@ struct pmap { pd_entry_t *pm_segtab; /* KVA of segment table */ TAILQ_HEAD(, pv_entry) pm_pvlist; /* list of mappings in * pmap */ + uint32_t pm_gen_count; /* generation count (pmap lock dropped) */ + u_int pm_retries; int pm_active; /* active on cpus */ struct { u_int32_t asid:ASID_BITS; /* TLB address space tag */ Modified: user/kmacy/head_page_lock_incr/sys/mips/mips/pmap.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/mips/mips/pmap.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/mips/mips/pmap.c Tue Apr 27 05:39:13 2010 (r207264) @@ -147,7 +147,6 @@ unsigned pmap_max_asid; /* max ASID sup #define PMAP_ASID_RESERVED 0 - vm_offset_t kernel_vm_end; static struct tlb tlbstash[MAXCPU][MIPS_MAX_TLB_ENTRIES]; @@ -710,18 +709,22 @@ pmap_extract_and_hold(pmap_t pmap, vm_of { pt_entry_t pte; vm_page_t m; + vm_paddr_t pa; m = NULL; - vm_page_lock_queues(); + pa = 0; PMAP_LOCK(pmap); - +retry: pte = *pmap_pte(pmap, va); if (pte != 0 && pmap_pte_v(&pte) && ((pte & PTE_RW) || (prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, mips_tlbpfn_to_paddr(pte), &pa)) + goto retry; + m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pte)); vm_page_hold(m); } - vm_page_unlock_queues(); + PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } Modified: user/kmacy/head_page_lock_incr/sys/net/bpf_zerocopy.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/net/bpf_zerocopy.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/net/bpf_zerocopy.c Tue Apr 27 05:39:13 2010 (r207264) @@ -168,10 +168,12 @@ zbuf_sfbuf_get(struct vm_map *map, vm_of VM_PROT_WRITE); if (pp == NULL) return (NULL); + vm_page_lock(pp); vm_page_lock_queues(); vm_page_wire(pp); vm_page_unhold(pp); vm_page_unlock_queues(); + vm_page_unlock(pp); sf = sf_buf_alloc(pp, SFB_NOWAIT); if (sf == NULL) { zbuf_page_free(pp); Modified: user/kmacy/head_page_lock_incr/sys/powerpc/aim/mmu_oea.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/powerpc/aim/mmu_oea.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/powerpc/aim/mmu_oea.c Tue Apr 27 05:39:13 2010 (r207264) @@ -1241,18 +1241,22 @@ moea_extract_and_hold(mmu_t mmu, pmap_t { struct pvo_entry *pvo; vm_page_t m; - + vm_paddr_t pa; + m = NULL; - vm_page_lock_queues(); + pa = 0; PMAP_LOCK(pmap); +retry: pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || (prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa)) + goto retry; m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); vm_page_hold(m); } - vm_page_unlock_queues(); + PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } Modified: user/kmacy/head_page_lock_incr/sys/powerpc/aim/mmu_oea64.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/powerpc/aim/mmu_oea64.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/powerpc/aim/mmu_oea64.c Tue Apr 27 05:39:13 2010 (r207264) @@ -1374,18 +1374,22 @@ moea64_extract_and_hold(mmu_t mmu, pmap_ { struct pvo_entry *pvo; vm_page_t m; + vm_paddr_t pa; m = NULL; - vm_page_lock_queues(); + pa = 0; PMAP_LOCK(pmap); pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || (prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, + pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) + goto retry; m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); vm_page_hold(m); } - vm_page_unlock_queues(); + PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } Modified: user/kmacy/head_page_lock_incr/sys/powerpc/booke/pmap.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/powerpc/booke/pmap.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/powerpc/booke/pmap.c Tue Apr 27 05:39:13 2010 (r207264) @@ -2034,11 +2034,12 @@ mmu_booke_extract_and_hold(mmu_t mmu, pm pte_t *pte; vm_page_t m; uint32_t pte_wbit; - + vm_paddr_t pa; + m = NULL; - vm_page_lock_queues(); + pa = 0; PMAP_LOCK(pmap); - +retry: pte = pte_find(mmu, pmap, va); if ((pte != NULL) && PTE_ISVALID(pte)) { if (pmap == kernel_pmap) @@ -2047,12 +2048,14 @@ mmu_booke_extract_and_hold(mmu_t mmu, pm pte_wbit = PTE_UW; if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) + goto retry; m = PHYS_TO_VM_PAGE(PTE_PA(pte)); vm_page_hold(m); } } - vm_page_unlock_queues(); + PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } Modified: user/kmacy/head_page_lock_incr/sys/powerpc/include/pmap.h ============================================================================== --- user/kmacy/head_page_lock_incr/sys/powerpc/include/pmap.h Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/powerpc/include/pmap.h Tue Apr 27 05:39:13 2010 (r207264) @@ -88,6 +88,8 @@ struct pmap { struct mtx pm_mtx; u_int pm_sr[16]; u_int pm_active; + uint32_t pm_gen_count; /* generation count (pmap lock dropped) */ + u_int pm_retries; u_int pm_context; struct pmap *pmap_phys; Modified: user/kmacy/head_page_lock_incr/sys/sparc64/include/pmap.h ============================================================================== --- user/kmacy/head_page_lock_incr/sys/sparc64/include/pmap.h Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/sparc64/include/pmap.h Tue Apr 27 05:39:13 2010 (r207264) @@ -62,6 +62,8 @@ struct pmap { struct tte *pm_tsb; vm_object_t pm_tsb_obj; u_int pm_active; + uint32_t pm_gen_count; /* generation count (pmap lock dropped) */ + u_int pm_retries; u_int pm_context[MAXCPU]; struct pmap_statistics pm_stats; }; Modified: user/kmacy/head_page_lock_incr/sys/sparc64/sparc64/pmap.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/sparc64/sparc64/pmap.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/sparc64/sparc64/pmap.c Tue Apr 27 05:39:13 2010 (r207264) @@ -694,13 +694,17 @@ pmap_extract_and_hold(pmap_t pm, vm_offs { struct tte *tp; vm_page_t m; + vm_paddr_t pa; m = NULL; - vm_page_lock_queues(); + pa = 0; + PMAP_LOCK(pm); +retry: if (pm == kernel_pmap) { if (va >= VM_MIN_DIRECT_ADDRESS) { tp = NULL; m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS(va)); + (void)vm_page_pa_tryrelock(pmap, TLB_DIRECT_TO_PHYS(va), &pa); vm_page_hold(m); } else { tp = tsb_kvtotte(va); @@ -708,17 +712,17 @@ pmap_extract_and_hold(pmap_t pm, vm_offs tp = NULL; } } else { - PMAP_LOCK(pm); tp = tsb_tte_lookup(pm, va); } if (tp != NULL && ((tp->tte_data & TD_SW) || (prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, TTE_GET_PA(tp), &pa)) + goto retry; m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); vm_page_hold(m); } - vm_page_unlock_queues(); - if (pm != kernel_pmap) - PMAP_UNLOCK(pm); + PA_UNLOCK_COND(pa); + PMAP_UNLOCK(pm); return (m); } Modified: user/kmacy/head_page_lock_incr/sys/sun4v/include/pmap.h ============================================================================== --- user/kmacy/head_page_lock_incr/sys/sun4v/include/pmap.h Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/sun4v/include/pmap.h Tue Apr 27 05:39:13 2010 (r207264) @@ -75,6 +75,8 @@ struct pmap { struct tte_hash *pm_hash; TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ struct hv_tsb_info pm_tsb; + uint32_t pm_gen_count; /* generation count (pmap lock dropped) */ + u_int pm_retries; pmap_cpumask_t pm_active; /* mask of cpus currently using pmap */ pmap_cpumask_t pm_tlbactive; /* mask of cpus that have used this pmap */ struct pmap_statistics pm_stats; Modified: user/kmacy/head_page_lock_incr/sys/sun4v/sun4v/pmap.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/sun4v/sun4v/pmap.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/sun4v/sun4v/pmap.c Tue Apr 27 05:39:13 2010 (r207264) @@ -1275,17 +1275,21 @@ pmap_extract_and_hold(pmap_t pmap, vm_of { tte_t tte_data; vm_page_t m; + vm_paddr_t pa; m = NULL; - vm_page_lock_queues(); + pa = 0; PMAP_LOCK(pmap); +retry: tte_data = tte_hash_lookup(pmap->pm_hash, va); if (tte_data != 0 && ((tte_data & VTD_SW_W) || (prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, TTE_GET_PA(tte_data), &pa)) + goto retry; m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data)); vm_page_hold(m); } - vm_page_unlock_queues(); + PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); Modified: user/kmacy/head_page_lock_incr/sys/vm/device_pager.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/device_pager.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/vm/device_pager.c Tue Apr 27 05:39:13 2010 (r207264) @@ -251,12 +251,16 @@ dev_pager_getpages(object, m, count, req VM_OBJECT_LOCK(object); dev_pager_updatefake(page, paddr, memattr); if (count > 1) { - vm_page_lock_queues(); + for (i = 0; i < count; i++) { - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } } - vm_page_unlock_queues(); } } else { /* @@ -266,10 +270,13 @@ dev_pager_getpages(object, m, count, req page = dev_pager_getfake(paddr, memattr); VM_OBJECT_LOCK(object); TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq); - vm_page_lock_queues(); - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } vm_page_insert(page, object, offset); m[reqpage] = page; } Modified: user/kmacy/head_page_lock_incr/sys/vm/sg_pager.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/sg_pager.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/vm/sg_pager.c Tue Apr 27 05:39:13 2010 (r207264) @@ -198,10 +198,13 @@ sg_pager_getpages(vm_object_t object, vm TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq); /* Free the original pages and insert this fake page into the object. */ - vm_page_lock_queues(); - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } vm_page_insert(page, object, offset); m[reqpage] = page; page->valid = VM_PAGE_BITS_ALL; Modified: user/kmacy/head_page_lock_incr/sys/vm/swap_pager.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/swap_pager.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/vm/swap_pager.c Tue Apr 27 05:39:13 2010 (r207264) @@ -1130,10 +1130,16 @@ swap_pager_getpages(vm_object_t object, int k; vm_page_lock_queues(); - for (k = 0; k < i; ++k) + for (k = 0; k < i; ++k) { + vm_page_lock(m[k]); vm_page_free(m[k]); - for (k = j; k < count; ++k) + vm_page_unlock(m[k]); + } + for (k = j; k < count; ++k) { + vm_page_lock(m[k]); vm_page_free(m[k]); + vm_page_unlock(m[k]); + } vm_page_unlock_queues(); } @@ -1489,7 +1495,7 @@ swp_pager_async_iodone(struct buf *bp) object = bp->b_pages[0]->object; VM_OBJECT_LOCK(object); } - vm_page_lock_queues(); + /* * cleanup pages. If an error occurs writing to swap, we are in * very serious trouble. If it happens to be a disk error, though, @@ -1501,6 +1507,8 @@ swp_pager_async_iodone(struct buf *bp) for (i = 0; i < bp->b_npages; ++i) { vm_page_t m = bp->b_pages[i]; + vm_page_lock(m); + vm_page_lock_queues(); m->oflags &= ~VPO_SWAPINPROG; if (bp->b_ioflags & BIO_ERROR) { @@ -1597,8 +1605,9 @@ swp_pager_async_iodone(struct buf *bp) if (vm_page_count_severe()) vm_page_try_to_cache(m); } + vm_page_unlock_queues(); + vm_page_unlock(m); } - vm_page_unlock_queues(); /* * adjust pip. NOTE: the original parent may still have its own @@ -1694,10 +1703,12 @@ swp_pager_force_pagein(vm_object_t objec m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL|VM_ALLOC_RETRY); if (m->valid == VM_PAGE_BITS_ALL) { vm_object_pip_subtract(object, 1); + vm_page_lock(m); vm_page_lock_queues(); vm_page_activate(m); vm_page_dirty(m); vm_page_unlock_queues(); + vm_page_unlock(m); vm_page_wakeup(m); vm_pager_page_unswapped(m); return; @@ -1706,10 +1717,12 @@ swp_pager_force_pagein(vm_object_t objec if (swap_pager_getpages(object, &m, 1, 0) != VM_PAGER_OK) panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ vm_object_pip_subtract(object, 1); + vm_page_lock(m); vm_page_lock_queues(); vm_page_dirty(m); vm_page_dontneed(m); vm_page_unlock_queues(); + vm_page_unlock(m); vm_page_wakeup(m); vm_pager_page_unswapped(m); } Modified: user/kmacy/head_page_lock_incr/sys/vm/uma_core.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/uma_core.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/vm/uma_core.c Tue Apr 27 05:39:13 2010 (r207264) @@ -1022,10 +1022,12 @@ obj_alloc(uma_zone_t zone, int bytes, u_ while (pages != startpages) { pages--; p = TAILQ_LAST(&object->memq, pglist); + vm_page_lock(p); vm_page_lock_queues(); vm_page_unwire(p, 0); vm_page_free(p); vm_page_unlock_queues(); + vm_page_unlock(p); } retkva = 0; goto done; Modified: user/kmacy/head_page_lock_incr/sys/vm/vm_contig.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/vm_contig.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/vm/vm_contig.c Tue Apr 27 05:39:13 2010 (r207264) @@ -257,9 +257,11 @@ retry: i -= PAGE_SIZE; m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); + vm_page_lock(m); vm_page_lock_queues(); vm_page_free(m); vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(object); vm_map_delete(map, addr, addr + size); Modified: user/kmacy/head_page_lock_incr/sys/vm/vm_fault.c ============================================================================== --- user/kmacy/head_page_lock_incr/sys/vm/vm_fault.c Tue Apr 27 05:38:26 2010 (r207263) +++ user/kmacy/head_page_lock_incr/sys/vm/vm_fault.c Tue Apr 27 05:39:13 2010 (r207264) @@ -137,9 +137,11 @@ release_page(struct faultstate *fs) { vm_page_wakeup(fs->m); + vm_page_lock(fs->m); vm_page_lock_queues(); vm_page_deactivate(fs->m); vm_page_unlock_queues(); + vm_page_unlock(fs->m); fs->m = NULL; } @@ -161,9 +163,11 @@ unlock_and_deallocate(struct faultstate VM_OBJECT_UNLOCK(fs->object); if (fs->object != fs->first_object) { VM_OBJECT_LOCK(fs->first_object); + vm_page_lock(fs->first_m); vm_page_lock_queues(); vm_page_free(fs->first_m); vm_page_unlock_queues(); + vm_page_unlock(fs->first_m); vm_object_pip_wakeup(fs->first_object); VM_OBJECT_UNLOCK(fs->first_object); fs->first_m = NULL; @@ -305,12 +309,14 @@ RetryFault:; * removes the page from the backing object, * which is not what we want. */ + vm_page_lock(fs.m); vm_page_lock_queues(); if ((fs.m->cow) && (fault_type & VM_PROT_WRITE) && (fs.object == fs.first_object)) { vm_page_cowfault(fs.m); vm_page_unlock_queues(); + vm_page_unlock(fs.m); unlock_and_deallocate(&fs); goto RetryFault; } @@ -333,12 +339,15 @@ RetryFault:; */ if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) { vm_page_unlock_queues(); + vm_page_unlock(fs.m); VM_OBJECT_UNLOCK(fs.object); if (fs.object != fs.first_object) { VM_OBJECT_LOCK(fs.first_object); + vm_page_lock(fs.first_m); vm_page_lock_queues(); vm_page_free(fs.first_m); vm_page_unlock_queues(); + vm_page_unlock(fs.first_m); vm_object_pip_wakeup(fs.first_object); VM_OBJECT_UNLOCK(fs.first_object); fs.first_m = NULL; @@ -358,6 +367,7 @@ RetryFault:; } vm_pageq_remove(fs.m); vm_page_unlock_queues(); + vm_page_unlock(fs.m); /* * Mark page busy for other processes, and the @@ -481,17 +491,25 @@ readrest: continue; if (!are_queues_locked) { are_queues_locked = TRUE; + vm_page_lock(mt); + vm_page_lock_queues(); + } else { + vm_page_unlock_queues(); + vm_page_lock(mt); vm_page_lock_queues(); } if (mt->hold_count || - mt->wire_count) + mt->wire_count) { + vm_page_unlock(mt); continue; + } pmap_remove_all(mt); if (mt->dirty) { vm_page_deactivate(mt); } else { vm_page_cache(mt); } + vm_page_unlock(mt); } if (are_queues_locked) vm_page_unlock_queues(); @@ -623,17 +641,21 @@ vnode_locked: */ if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { + vm_page_lock(fs.m); vm_page_lock_queues(); vm_page_free(fs.m); vm_page_unlock_queues(); + vm_page_unlock(fs.m); fs.m = NULL; unlock_and_deallocate(&fs); return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); } if (fs.object != fs.first_object) { + vm_page_lock(fs.m); vm_page_lock_queues(); vm_page_free(fs.m); vm_page_unlock_queues(); + vm_page_unlock(fs.m); fs.m = NULL; /* * XXX - we cannot just fall out at this @@ -746,18 +768,24 @@ vnode_locked: * We don't chase down the shadow chain */ fs.object == fs.first_object->backing_object) { + vm_page_lock(fs.first_m); vm_page_lock_queues(); /* * get rid of the unnecessary page */ vm_page_free(fs.first_m); + vm_page_unlock_queues(); + vm_page_unlock(fs.first_m); /* * grab the page and put it into the * process'es object. The page is * automatically made dirty. */ + vm_page_lock(fs.m); + vm_page_lock_queues(); vm_page_rename(fs.m, fs.first_object, fs.first_pindex); vm_page_unlock_queues(); + vm_page_unlock(fs.m); vm_page_busy(fs.m); fs.first_m = fs.m; fs.m = NULL; @@ -770,10 +798,17 @@ vnode_locked: fs.first_m->valid = VM_PAGE_BITS_ALL; if (wired && (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) { + vm_page_lock(fs.first_m); vm_page_lock_queues(); vm_page_wire(fs.first_m); + vm_page_unlock_queues(); + vm_page_unlock(fs.first_m); + + vm_page_lock(fs.m); + vm_page_lock_queues(); vm_page_unwire(fs.m, FALSE); vm_page_unlock_queues(); + vm_page_unlock(fs.m); } /* * We no longer need the old page or object. @@ -923,6 +958,7 @@ vnode_locked: if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0) vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); VM_OBJECT_LOCK(fs.object); + vm_page_lock(fs.m); vm_page_lock_queues(); vm_page_flag_set(fs.m, PG_REFERENCED); @@ -939,6 +975,7 @@ vnode_locked: vm_page_activate(fs.m); } vm_page_unlock_queues(); + vm_page_unlock(fs.m); vm_page_wakeup(fs.m); /* @@ -1015,9 +1052,11 @@ vm_fault_prefault(pmap_t pmap, vm_offset } if (m->valid == VM_PAGE_BITS_ALL && (m->flags & PG_FICTITIOUS) == 0) { + vm_page_lock(m); vm_page_lock_queues(); pmap_enter_quick(pmap, addr, m, entry->protection); vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(lobject); } @@ -1093,9 +1132,11 @@ vm_fault_unwire(vm_map_t map, vm_offset_ if (pa != 0) { pmap_change_wiring(pmap, va, FALSE); if (!fictitious) { + vm_page_lock(PHYS_TO_VM_PAGE(pa)); vm_page_lock_queues(); vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); vm_page_unlock_queues(); + vm_page_unlock(PHYS_TO_VM_PAGE(pa)); } } } @@ -1238,13 +1279,26 @@ vm_fault_copy_entry(vm_map_t dst_map, vm * Mark it no longer busy, and put it on the active list. */ VM_OBJECT_LOCK(dst_object); *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201004270539.o3R5dD0S014618>