Date: Sat, 10 Aug 2013 17:36:42 +0000 (UTC) From: Konstantin Belousov <kib@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r254182 - in head/sys: amd64/amd64 dev/drm2/ttm dev/virtio/balloon i386/i386 vm Message-ID: <201308101736.r7AHagff000995@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: kib Date: Sat Aug 10 17:36:42 2013 New Revision: 254182 URL: http://svnweb.freebsd.org/changeset/base/254182 Log: Different consumers of the struct vm_page abuse pageq member to keep additional information, when the page is guaranteed to not belong to a paging queue. Usually, this results in a lot of type casts which make reasoning about the code correctness harder. Sometimes m->object is used instead of pageq, which could cause real and confusing bugs if non-NULL m->object is leaked. See r141955 and r253140 for examples. Change the pageq member into a union containing explicitly-typed members. Use them instead of type-punning or abusing m->object in x86 pmaps, uma and vm_page_alloc_contig(). Requested and reviewed by: alc Sponsored by: The FreeBSD Foundation Modified: head/sys/amd64/amd64/pmap.c head/sys/dev/drm2/ttm/ttm_page_alloc.c head/sys/dev/virtio/balloon/virtio_balloon.c head/sys/i386/i386/pmap.c head/sys/vm/device_pager.c head/sys/vm/memguard.c head/sys/vm/sg_pager.c head/sys/vm/uma_core.c head/sys/vm/uma_int.h head/sys/vm/vm_page.c head/sys/vm/vm_page.h head/sys/vm/vm_pageout.c head/sys/vm/vm_phys.c Modified: head/sys/amd64/amd64/pmap.c ============================================================================== --- head/sys/amd64/amd64/pmap.c Sat Aug 10 16:23:29 2013 (r254181) +++ head/sys/amd64/amd64/pmap.c Sat Aug 10 17:36:42 2013 (r254182) @@ -295,13 +295,12 @@ static boolean_t pmap_protect_pde(pmap_t vm_prot_t prot); static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, - vm_page_t *free, struct rwlock **lockp); -static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, - vm_offset_t sva, pd_entry_t ptepde, vm_page_t *free, - struct rwlock **lockp); + struct spglist *free, struct rwlock **lockp); +static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, + pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp); static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte); static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, - vm_page_t *free); + struct spglist *free); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp); static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, @@ -316,8 +315,8 @@ static vm_page_t pmap_allocpte(pmap_t pm struct rwlock **lockp); static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_page_t *free); -static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *); + struct spglist *free); +static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *); static vm_offset_t pmap_kmem_choose(vm_offset_t addr); /* @@ -1490,14 +1489,12 @@ pmap_qremove(vm_offset_t sva, int count) * Page table page management routines..... ***************************************************/ static __inline void -pmap_free_zero_pages(vm_page_t free) +pmap_free_zero_pages(struct spglist *free) { vm_page_t m; - while (free != NULL) { - m = free; - free = (void *)m->object; - m->object = NULL; + while ((m = SLIST_FIRST(free)) != NULL) { + SLIST_REMOVE_HEAD(free, plinks.s.ss); /* Preserve the page's PG_ZERO setting. */ vm_page_free_toq(m); } @@ -1509,15 +1506,15 @@ pmap_free_zero_pages(vm_page_t free) * physical memory manager after the TLB has been updated. */ static __inline void -pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO) +pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, + boolean_t set_PG_ZERO) { if (set_PG_ZERO) m->flags |= PG_ZERO; else m->flags &= ~PG_ZERO; - m->object = (void *)*free; - *free = m; + SLIST_INSERT_HEAD(free, m, plinks.s.ss); } /* @@ -1567,7 +1564,7 @@ pmap_remove_pt_page(pmap_t pmap, vm_page * page table page was unmapped and FALSE otherwise. */ static inline boolean_t -pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free) +pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { --m->wire_count; @@ -1579,7 +1576,7 @@ pmap_unwire_ptp(pmap_t pmap, vm_offset_t } static void -_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free) +_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -1637,7 +1634,8 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_ * conditionally free the page, and manage the hold/wire counts. */ static int -pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free) +pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, + struct spglist *free) { vm_page_t mpte; @@ -2123,7 +2121,8 @@ reclaim_pv_chunk(pmap_t locked_pmap, str pt_entry_t *pte, tpte; pv_entry_t pv; vm_offset_t va; - vm_page_t free, m, m_pc; + vm_page_t m, m_pc; + struct spglist free; uint64_t inuse; int bit, field, freed; @@ -2131,10 +2130,11 @@ reclaim_pv_chunk(pmap_t locked_pmap, str PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); pmap = NULL; - free = m_pc = NULL; + m_pc = NULL; + SLIST_INIT(&free); TAILQ_INIT(&new_tail); mtx_lock(&pv_chunks_mutex); - while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && free == NULL) { + while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) { TAILQ_REMOVE(&pv_chunks, pc, pc_lru); mtx_unlock(&pv_chunks_mutex); if (pmap != pc->pc_pmap) { @@ -2193,7 +2193,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, str } } pc->pc_map[field] |= 1UL << bit; - pmap_unuse_pt(pmap, va, *pde, &free); + pmap_unuse_pt(pmap, va, *pde, &free); freed++; } } @@ -2233,15 +2233,14 @@ reclaim_pv_chunk(pmap_t locked_pmap, str if (pmap != locked_pmap) PMAP_UNLOCK(pmap); } - if (m_pc == NULL && free != NULL) { - m_pc = free; - free = (void *)m_pc->object; - m_pc->object = NULL; + if (m_pc == NULL && !SLIST_EMPTY(&free)) { + m_pc = SLIST_FIRST(&free); + SLIST_REMOVE_HEAD(&free, plinks.s.ss); /* Recycle a freed page table page. */ m_pc->wire_count = 1; atomic_add_int(&cnt.v_wire_count, 1); } - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); return (m_pc); } @@ -2690,7 +2689,8 @@ pmap_demote_pde_locked(pmap_t pmap, pd_e pd_entry_t newpde, oldpde; pt_entry_t *firstpte, newpte; vm_paddr_t mptepa; - vm_page_t free, mpte; + vm_page_t mpte; + struct spglist free; PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpde = *pde; @@ -2720,11 +2720,11 @@ pmap_demote_pde_locked(pmap_t pmap, pd_e pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { - free = NULL; + SLIST_INIT(&free); pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free, lockp); pmap_invalidate_page(pmap, trunc_2mpage(va)); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); @@ -2845,7 +2845,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_e */ static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, - vm_page_t *free, struct rwlock **lockp) + struct spglist *free, struct rwlock **lockp) { struct md_page *pvh; pd_entry_t oldpde; @@ -2904,7 +2904,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t */ static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, - pd_entry_t ptepde, vm_page_t *free, struct rwlock **lockp) + pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp) { struct md_page *pvh; pt_entry_t oldpte; @@ -2937,7 +2937,8 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t * Remove a single page from a process address space */ static void -pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, vm_page_t *free) +pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, + struct spglist *free) { struct rwlock *lock; pt_entry_t *pte; @@ -2970,7 +2971,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva pdp_entry_t *pdpe; pd_entry_t ptpaddr, *pde; pt_entry_t *pte; - vm_page_t free = NULL; + struct spglist free; int anyvalid; /* @@ -2980,6 +2981,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva return; anyvalid = 0; + SLIST_INIT(&free); rw_rlock(&pvh_global_lock); PMAP_LOCK(pmap); @@ -3098,7 +3100,7 @@ out: pmap_invalidate_all(pmap); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); } /* @@ -3123,11 +3125,11 @@ pmap_remove_all(vm_page_t m) pt_entry_t *pte, tpte; pd_entry_t *pde; vm_offset_t va; - vm_page_t free; + struct spglist free; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); - free = NULL; + SLIST_INIT(&free); rw_wlock(&pvh_global_lock); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; @@ -3169,7 +3171,7 @@ small_mappings: } vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(&pvh_global_lock); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); } /* @@ -3692,7 +3694,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t struct rwlock **lockp) { pd_entry_t *pde, newpde; - vm_page_t free, mpde; + vm_page_t mpde; + struct spglist free; rw_assert(&pvh_global_lock, RA_LOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -3721,10 +3724,10 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t */ if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m), lockp)) { - free = NULL; + SLIST_INIT(&free); if (pmap_unwire_ptp(pmap, va, mpde, &free)) { pmap_invalidate_page(pmap, va); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); } CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" " in pmap %p", va, pmap); @@ -3827,7 +3830,7 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) { - vm_page_t free; + struct spglist free; pt_entry_t *pte; vm_paddr_t pa; @@ -3898,10 +3901,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { if (mpte != NULL) { - free = NULL; + SLIST_INIT(&free); if (pmap_unwire_ptp(pmap, va, mpte, &free)) { pmap_invalidate_page(pmap, va); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); } mpte = NULL; } @@ -4096,7 +4099,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm vm_offset_t src_addr) { struct rwlock *lock; - vm_page_t free; + struct spglist free; vm_offset_t addr; vm_offset_t end_addr = src_addr + len; vm_offset_t va_next; @@ -4204,13 +4207,13 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm *dst_pte = ptetemp & ~(PG_W | PG_M | PG_A); pmap_resident_count_inc(dst_pmap, 1); - } else { - free = NULL; + } else { + SLIST_INIT(&free); if (pmap_unwire_ptp(dst_pmap, addr, dstmpte, &free)) { - pmap_invalidate_page(dst_pmap, - addr); - pmap_free_zero_pages(free); + pmap_invalidate_page(dst_pmap, + addr); + pmap_free_zero_pages(&free); } goto out; } @@ -4227,10 +4230,10 @@ out: rw_runlock(&pvh_global_lock); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); -} +} /* - * pmap_zero_page zeros the specified hardware page by mapping + * pmap_zero_page zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. */ void @@ -4445,7 +4448,7 @@ pmap_remove_pages(pmap_t pmap) { pd_entry_t ptepde; pt_entry_t *pte, tpte; - vm_page_t free = NULL; + struct spglist free; vm_page_t m, mpte, mt; pv_entry_t pv; struct md_page *pvh; @@ -4462,6 +4465,7 @@ pmap_remove_pages(pmap_t pmap) return; } lock = NULL; + SLIST_INIT(&free); rw_rlock(&pvh_global_lock); PMAP_LOCK(pmap); TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { @@ -4597,7 +4601,7 @@ pmap_remove_pages(pmap_t pmap) pmap_invalidate_all(pmap); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); } /* Modified: head/sys/dev/drm2/ttm/ttm_page_alloc.c ============================================================================== --- head/sys/dev/drm2/ttm/ttm_page_alloc.c Sat Aug 10 16:23:29 2013 (r254181) +++ head/sys/dev/drm2/ttm/ttm_page_alloc.c Sat Aug 10 17:36:42 2013 (r254182) @@ -330,7 +330,7 @@ static int ttm_page_pool_free(struct ttm restart: mtx_lock(&pool->lock); - TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) { + TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) { if (freed_pages >= npages_to_free) break; @@ -338,7 +338,7 @@ restart: /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ if (freed_pages >= NUM_PAGES_TO_ALLOC) { /* remove range of pages from the pool */ - TAILQ_REMOVE(&pool->list, p, pageq); + TAILQ_REMOVE(&pool->list, p, plinks.q); ttm_pool_update_free_locked(pool, freed_pages); /** @@ -373,7 +373,7 @@ restart: /* remove range of pages from the pool */ if (freed_pages) { - TAILQ_REMOVE(&pool->list, p, pageq); + TAILQ_REMOVE(&pool->list, p, plinks.q); ttm_pool_update_free_locked(pool, freed_pages); nr_free -= freed_pages; @@ -470,7 +470,7 @@ static void ttm_handle_caching_state_fai unsigned i; /* Failed pages have to be freed */ for (i = 0; i < cpages; ++i) { - TAILQ_REMOVE(pages, failed_pages[i], pageq); + TAILQ_REMOVE(pages, failed_pages[i], plinks.q); ttm_vm_page_free(failed_pages[i]); } } @@ -545,7 +545,7 @@ static int ttm_alloc_new_pages(struct pg } } - TAILQ_INSERT_HEAD(pages, p, pageq); + TAILQ_INSERT_HEAD(pages, p, plinks.q); } if (cpages) { @@ -600,16 +600,16 @@ static void ttm_page_pool_fill_locked(st mtx_lock(&pool->lock); if (!r) { - TAILQ_CONCAT(&pool->list, &new_pages, pageq); + TAILQ_CONCAT(&pool->list, &new_pages, plinks.q); ++pool->nrefills; pool->npages += alloc_size; } else { printf("[TTM] Failed to fill pool (%p)\n", pool); /* If we have any pages left put them to the pool. */ - TAILQ_FOREACH(p, &pool->list, pageq) { + TAILQ_FOREACH(p, &pool->list, plinks.q) { ++cpages; } - TAILQ_CONCAT(&pool->list, &new_pages, pageq); + TAILQ_CONCAT(&pool->list, &new_pages, plinks.q); pool->npages += cpages; } @@ -636,15 +636,15 @@ static unsigned ttm_page_pool_get_pages( if (count >= pool->npages) { /* take all pages from the pool */ - TAILQ_CONCAT(pages, &pool->list, pageq); + TAILQ_CONCAT(pages, &pool->list, plinks.q); count -= pool->npages; pool->npages = 0; goto out; } for (i = 0; i < count; i++) { p = TAILQ_FIRST(&pool->list); - TAILQ_REMOVE(&pool->list, p, pageq); - TAILQ_INSERT_TAIL(pages, p, pageq); + TAILQ_REMOVE(&pool->list, p, plinks.q); + TAILQ_INSERT_TAIL(pages, p, plinks.q); } pool->npages -= count; count = 0; @@ -674,7 +674,7 @@ static void ttm_put_pages(vm_page_t *pag mtx_lock(&pool->lock); for (i = 0; i < npages; i++) { if (pages[i]) { - TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq); + TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q); pages[i] = NULL; pool->npages++; } @@ -735,13 +735,13 @@ static int ttm_get_pages(vm_page_t *page TAILQ_INIT(&plist); npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); count = 0; - TAILQ_FOREACH(p, &plist, pageq) { + TAILQ_FOREACH(p, &plist, plinks.q) { pages[count++] = p; } /* clear the pages coming from the pool if requested */ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { - TAILQ_FOREACH(p, &plist, pageq) { + TAILQ_FOREACH(p, &plist, plinks.q) { pmap_zero_page(p); } } @@ -754,7 +754,7 @@ static int ttm_get_pages(vm_page_t *page TAILQ_INIT(&plist); r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); - TAILQ_FOREACH(p, &plist, pageq) { + TAILQ_FOREACH(p, &plist, plinks.q) { pages[count++] = p; } if (r) { Modified: head/sys/dev/virtio/balloon/virtio_balloon.c ============================================================================== --- head/sys/dev/virtio/balloon/virtio_balloon.c Sat Aug 10 16:23:29 2013 (r254181) +++ head/sys/dev/virtio/balloon/virtio_balloon.c Sat Aug 10 17:36:42 2013 (r254182) @@ -334,7 +334,7 @@ vtballoon_inflate(struct vtballoon_softc KASSERT(m->queue == PQ_NONE, ("%s: allocated page %p on queue", __func__, m)); - TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, pageq); + TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, plinks.q); } if (i > 0) @@ -362,8 +362,8 @@ vtballoon_deflate(struct vtballoon_softc sc->vtballoon_page_frames[i] = VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT; - TAILQ_REMOVE(&sc->vtballoon_pages, m, pageq); - TAILQ_INSERT_TAIL(&free_pages, m, pageq); + TAILQ_REMOVE(&sc->vtballoon_pages, m, plinks.q); + TAILQ_INSERT_TAIL(&free_pages, m, plinks.q); } if (i > 0) { @@ -371,7 +371,7 @@ vtballoon_deflate(struct vtballoon_softc vtballoon_send_page_frames(sc, vq, i); while ((m = TAILQ_FIRST(&free_pages)) != NULL) { - TAILQ_REMOVE(&free_pages, m, pageq); + TAILQ_REMOVE(&free_pages, m, plinks.q); vtballoon_free_page(sc, m); } } Modified: head/sys/i386/i386/pmap.c ============================================================================== --- head/sys/i386/i386/pmap.c Sat Aug 10 16:23:29 2013 (r254181) +++ head/sys/i386/i386/pmap.c Sat Aug 10 17:36:42 2013 (r254182) @@ -317,12 +317,12 @@ static boolean_t pmap_protect_pde(pmap_t vm_prot_t prot); static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, - vm_page_t *free); + struct spglist *free); static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, - vm_page_t *free); + struct spglist *free); static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte); static void pmap_remove_page(struct pmap *pmap, vm_offset_t va, - vm_page_t *free); + struct spglist *free); static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); @@ -335,10 +335,10 @@ static void pmap_update_pde_invalidate(v static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags); -static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free); +static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free); static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); static void pmap_pte_release(pt_entry_t *pte); -static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); +static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *); #ifdef PAE static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); #endif @@ -1568,14 +1568,12 @@ pmap_qremove(vm_offset_t sva, int count) * Page table page management routines..... ***************************************************/ static __inline void -pmap_free_zero_pages(vm_page_t free) +pmap_free_zero_pages(struct spglist *free) { vm_page_t m; - while (free != NULL) { - m = free; - free = (void *)m->object; - m->object = NULL; + while ((m = SLIST_FIRST(free)) != NULL) { + SLIST_REMOVE_HEAD(free, plinks.s.ss); /* Preserve the page's PG_ZERO setting. */ vm_page_free_toq(m); } @@ -1587,15 +1585,15 @@ pmap_free_zero_pages(vm_page_t free) * physical memory manager after the TLB has been updated. */ static __inline void -pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO) +pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, + boolean_t set_PG_ZERO) { if (set_PG_ZERO) m->flags |= PG_ZERO; else m->flags &= ~PG_ZERO; - m->object = (void *)*free; - *free = m; + SLIST_INSERT_HEAD(free, m, plinks.s.ss); } /* @@ -1645,7 +1643,7 @@ pmap_remove_pt_page(pmap_t pmap, vm_page * page table page was unmapped and FALSE otherwise. */ static inline boolean_t -pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free) +pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) { --m->wire_count; @@ -1657,7 +1655,7 @@ pmap_unwire_ptp(pmap_t pmap, vm_page_t m } static void -_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free) +_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) { vm_offset_t pteva; @@ -1693,7 +1691,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_page_t * conditionally free the page, and manage the hold/wire counts. */ static int -pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) +pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free) { pd_entry_t ptepde; vm_page_t mpte; @@ -2193,16 +2191,18 @@ pmap_pv_reclaim(pmap_t locked_pmap) pt_entry_t *pte, tpte; pv_entry_t pv; vm_offset_t va; - vm_page_t free, m, m_pc; + vm_page_t m, m_pc; + struct spglist free; uint32_t inuse; int bit, field, freed; PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); pmap = NULL; - free = m_pc = NULL; + m_pc = NULL; + SLIST_INIT(&free); TAILQ_INIT(&newtail); while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || - free == NULL)) { + SLIST_EMPTY(&free))) { TAILQ_REMOVE(&pv_chunks, pc, pc_lru); if (pmap != pc->pc_pmap) { if (pmap != NULL) { @@ -2307,15 +2307,14 @@ out: if (pmap != locked_pmap) PMAP_UNLOCK(pmap); } - if (m_pc == NULL && pv_vafree != 0 && free != NULL) { - m_pc = free; - free = (void *)m_pc->object; - m_pc->object = NULL; + if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { + m_pc = SLIST_FIRST(&free); + SLIST_REMOVE_HEAD(&free, plinks.s.ss); /* Recycle a freed page table page. */ m_pc->wire_count = 1; atomic_add_int(&cnt.v_wire_count, 1); } - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); return (m_pc); } @@ -2636,7 +2635,8 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t pd_entry_t newpde, oldpde; pt_entry_t *firstpte, newpte; vm_paddr_t mptepa; - vm_page_t free, mpte; + vm_page_t mpte; + struct spglist free; PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpde = *pde; @@ -2658,10 +2658,10 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) { - free = NULL; + SLIST_INIT(&free); pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free); pmap_invalidate_page(pmap, trunc_4mpage(va)); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" " in pmap %p", va, pmap); return (FALSE); @@ -2814,7 +2814,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_e */ static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, - vm_page_t *free) + struct spglist *free) { struct md_page *pvh; pd_entry_t oldpde; @@ -2870,7 +2870,8 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t * pmap_remove_pte: do the things to unmap a page in a process */ static int -pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) +pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, + struct spglist *free) { pt_entry_t oldpte; vm_page_t m; @@ -2904,7 +2905,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t * Remove a single page from a process address space */ static void -pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) +pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) { pt_entry_t *pte; @@ -2929,7 +2930,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva vm_offset_t pdnxt; pd_entry_t ptpaddr; pt_entry_t *pte; - vm_page_t free = NULL; + struct spglist free; int anyvalid; /* @@ -2939,6 +2940,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva return; anyvalid = 0; + SLIST_INIT(&free); rw_wlock(&pvh_global_lock); sched_pin(); @@ -3031,7 +3033,7 @@ out: pmap_invalidate_all(pmap); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); } /* @@ -3056,11 +3058,11 @@ pmap_remove_all(vm_page_t m) pt_entry_t *pte, tpte; pd_entry_t *pde; vm_offset_t va; - vm_page_t free; + struct spglist free; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); - free = NULL; + SLIST_INIT(&free); rw_wlock(&pvh_global_lock); sched_pin(); if ((m->flags & PG_FICTITIOUS) != 0) @@ -3105,7 +3107,7 @@ small_mappings: vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); rw_wunlock(&pvh_global_lock); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); } /* @@ -3769,7 +3771,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ { pt_entry_t *pte; vm_paddr_t pa; - vm_page_t free; + struct spglist free; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, @@ -3838,10 +3840,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m)) { if (mpte != NULL) { - free = NULL; + SLIST_INIT(&free); if (pmap_unwire_ptp(pmap, mpte, &free)) { pmap_invalidate_page(pmap, va); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); } mpte = NULL; @@ -4024,7 +4026,7 @@ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { - vm_page_t free; + struct spglist free; vm_offset_t addr; vm_offset_t end_addr = src_addr + len; vm_offset_t pdnxt; @@ -4107,12 +4109,12 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm PG_A); dst_pmap->pm_stats.resident_count++; } else { - free = NULL; + SLIST_INIT(&free); if (pmap_unwire_ptp(dst_pmap, dstmpte, &free)) { pmap_invalidate_page(dst_pmap, addr); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); } goto out; } @@ -4419,11 +4421,11 @@ void pmap_remove_pages(pmap_t pmap) { pt_entry_t *pte, tpte; - vm_page_t free = NULL; vm_page_t m, mpte, mt; pv_entry_t pv; struct md_page *pvh; struct pv_chunk *pc, *npc; + struct spglist free; int field, idx; int32_t bit; uint32_t inuse, bitmask; @@ -4433,6 +4435,7 @@ pmap_remove_pages(pmap_t pmap) printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } + SLIST_INIT(&free); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); sched_pin(); @@ -4541,7 +4544,7 @@ pmap_remove_pages(pmap_t pmap) pmap_invalidate_all(pmap); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(free); + pmap_free_zero_pages(&free); } /* Modified: head/sys/vm/device_pager.c ============================================================================== --- head/sys/vm/device_pager.c Sat Aug 10 16:23:29 2013 (r254181) +++ head/sys/vm/device_pager.c Sat Aug 10 17:36:42 2013 (r254182) @@ -226,7 +226,7 @@ dev_pager_free_page(vm_object_t object, KASSERT((object->type == OBJT_DEVICE && (m->oflags & VPO_UNMANAGED) != 0), ("Managed device or page obj %p m %p", object, m)); - TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq); + TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, plinks.q); vm_page_putfake(m); } @@ -281,7 +281,7 @@ dev_pager_getpages(vm_object_t object, v ("Wrong page type %p %p", ma[reqpage], object)); if (object->type == OBJT_DEVICE) { TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, - ma[reqpage], pageq); + ma[reqpage], plinks.q); } } Modified: head/sys/vm/memguard.c ============================================================================== --- head/sys/vm/memguard.c Sat Aug 10 16:23:29 2013 (r254181) +++ head/sys/vm/memguard.c Sat Aug 10 17:36:42 2013 (r254182) @@ -261,7 +261,7 @@ v2sizep(vm_offset_t va) p = PHYS_TO_VM_PAGE(pa); KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); - return ((u_long *)&p->pageq.tqe_next); + return (&p->plinks.memguard.p); } static u_long * @@ -276,7 +276,7 @@ v2sizev(vm_offset_t va) p = PHYS_TO_VM_PAGE(pa); KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); - return ((u_long *)&p->pageq.tqe_prev); + return (&p->plinks.memguard.v); } /* Modified: head/sys/vm/sg_pager.c ============================================================================== --- head/sys/vm/sg_pager.c Sat Aug 10 16:23:29 2013 (r254181) +++ head/sys/vm/sg_pager.c Sat Aug 10 17:36:42 2013 (r254182) @@ -124,7 +124,7 @@ sg_pager_dealloc(vm_object_t object) * Free up our fake pages. */ while ((m = TAILQ_FIRST(&object->un_pager.sgp.sgp_pglist)) != 0) { - TAILQ_REMOVE(&object->un_pager.sgp.sgp_pglist, m, pageq); + TAILQ_REMOVE(&object->un_pager.sgp.sgp_pglist, m, plinks.q); vm_page_putfake(m); } @@ -182,7 +182,7 @@ sg_pager_getpages(vm_object_t object, vm /* Construct a new fake page. */ page = vm_page_getfake(paddr, memattr); VM_OBJECT_WLOCK(object); - TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq); + TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, plinks.q); /* Free the original pages and insert this fake page into the object. */ for (i = 0; i < count; i++) { Modified: head/sys/vm/uma_core.c ============================================================================== --- head/sys/vm/uma_core.c Sat Aug 10 16:23:29 2013 (r254181) +++ head/sys/vm/uma_core.c Sat Aug 10 17:36:42 2013 (r254182) @@ -718,18 +718,6 @@ keg_free_slab(uma_keg_t keg, uma_slab_t keg->uk_fini(slab->us_data + (keg->uk_rsize * i), keg->uk_size); } - if (keg->uk_flags & UMA_ZONE_VTOSLAB) { - vm_object_t obj; - - if (flags & UMA_SLAB_KMEM) - obj = kmem_object; - else if (flags & UMA_SLAB_KERNEL) - obj = kernel_object; - else - obj = NULL; - for (i = 0; i < keg->uk_ppera; i++) - vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), obj); - } if (keg->uk_flags & UMA_ZONE_OFFPAGE) zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); #ifdef UMA_DEBUG @@ -3112,7 +3100,7 @@ uma_large_malloc(int size, int wait) void uma_large_free(uma_slab_t slab) { - vsetobj((vm_offset_t)slab->us_data, kmem_object); + page_free(slab->us_data, slab->us_size, slab->us_flags); zone_free_item(slabzone, slab, NULL, SKIP_NONE); } Modified: head/sys/vm/uma_int.h ============================================================================== --- head/sys/vm/uma_int.h Sat Aug 10 16:23:29 2013 (r254181) +++ head/sys/vm/uma_int.h Sat Aug 10 17:36:42 2013 (r254182) @@ -407,7 +407,7 @@ vtoslab(vm_offset_t va) uma_slab_t slab; p = PHYS_TO_VM_PAGE(pmap_kextract(va)); - slab = (uma_slab_t )p->object; + slab = (uma_slab_t )p->plinks.s.pv; if (p->flags & PG_SLAB) return (slab); @@ -421,20 +421,10 @@ vsetslab(vm_offset_t va, uma_slab_t slab vm_page_t p; p = PHYS_TO_VM_PAGE(pmap_kextract(va)); - p->object = (vm_object_t)slab; + p->plinks.s.pv = slab; p->flags |= PG_SLAB; } -static __inline void -vsetobj(vm_offset_t va, vm_object_t obj) -{ - vm_page_t p; - - p = PHYS_TO_VM_PAGE(pmap_kextract(va)); - p->object = obj; - p->flags &= ~PG_SLAB; -} - /* * The following two functions may be defined by architecture specific code * if they can provide more effecient allocation functions. This is useful Modified: head/sys/vm/vm_page.c ============================================================================== --- head/sys/vm/vm_page.c Sat Aug 10 16:23:29 2013 (r254181) +++ head/sys/vm/vm_page.c Sat Aug 10 17:36:42 2013 (r254182) @@ -1643,6 +1643,16 @@ vm_page_alloc(vm_object_t object, vm_pin return (m); } +static void +vm_page_alloc_contig_vdrop(struct spglist *lst) +{ + + while (!SLIST_EMPTY(lst)) { + vdrop((struct vnode *)SLIST_FIRST(lst)-> plinks.s.pv); + SLIST_REMOVE_HEAD(lst, plinks.s.ss); + } +} + /* * vm_page_alloc_contig: * @@ -1687,7 +1697,8 @@ vm_page_alloc_contig(vm_object_t object, vm_paddr_t boundary, vm_memattr_t memattr) { struct vnode *drop; - vm_page_t deferred_vdrop_list, m, m_tmp, m_ret; + struct spglist deferred_vdrop_list; + vm_page_t m, m_tmp, m_ret; u_int flags, oflags; int req_class; @@ -1712,7 +1723,7 @@ vm_page_alloc_contig(vm_object_t object, if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) req_class = VM_ALLOC_SYSTEM; - deferred_vdrop_list = NULL; + SLIST_INIT(&deferred_vdrop_list); mtx_lock(&vm_page_queue_free_mtx); if (cnt.v_free_count + cnt.v_cache_count >= npages + cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && @@ -1744,9 +1755,9 @@ retry: * page list, "pageq" can be safely abused to * construct a short-lived list of vnodes. */ - m->pageq.tqe_prev = (void *)drop; - m->pageq.tqe_next = deferred_vdrop_list; - deferred_vdrop_list = m; + m->plinks.s.pv = drop; + SLIST_INSERT_HEAD(&deferred_vdrop_list, m, + plinks.s.ss); } } else { *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201308101736.r7AHagff000995>