Date: Sat, 16 Jun 2012 16:15:22 GMT From: John Baldwin <jhb@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 212958 for review Message-ID: <201206161615.q5GGFMpm061173@skunkworks.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://p4web.freebsd.org/@@212958?ac=10 Change 212958 by jhb@jhb_jhbbsd on 2012/06/16 16:14:19 Debugging and test hacks for the problem of write(2) buffers reclaiming cache pages instead of free pages. Affected files ... .. //depot/projects/fadvise/sys/vm/vm_phys.c#6 edit Differences ... ==== //depot/projects/fadvise/sys/vm/vm_phys.c#6 (text+ko) ==== @@ -128,6 +128,15 @@ static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order); +static int vm_phys_uncached; +SYSCTL_INT(_vm, OID_AUTO, phys_uncached, CTLFLAG_RD, &vm_phys_uncached, 0, ""); +static int vm_phys_uc_alloc_pages; +SYSCTL_INT(_vm, OID_AUTO, phys_uc_alloc_pages, CTLFLAG_RD, + &vm_phys_uc_alloc_pages, 0, ""); +static int vm_phys_uc_free_pages; +SYSCTL_INT(_vm, OID_AUTO, phys_uc_free_pages, CTLFLAG_RD, + &vm_phys_uc_free_pages, 0, ""); + /* * Outputs the state of the physical memory allocator, specifically, * the amount of physical memory in each free list. @@ -495,12 +504,21 @@ TAILQ_REMOVE(&alt[oind].pl, m, pageq); alt[oind].lcnt--; m->order = VM_NFREEORDER; + if (m->pool == VM_FREEPOOL_CACHE && + pool != VM_FREEPOOL_CACHE) + vm_phys_uc_alloc_pages++; vm_phys_set_pool(pool, m, oind); vm_phys_split_pages(m, oind, fl, order); return (m); } } } + + /* + * XXX: If we get here, do deferred merging of cache pages + * with pages from another pool to satisfy the request and + * try again. This may be quite hard to do. + */ return (NULL); } @@ -681,8 +699,30 @@ TAILQ_REMOVE(&fl[order].pl, m_buddy, pageq); fl[order].lcnt--; m_buddy->order = VM_NFREEORDER; - if (m_buddy->pool != m->pool) + if (m_buddy->pool != m->pool) { +#if 1 +#if 1 + if (m_buddy->pool == VM_FREEPOOL_CACHE || + m->pool == VM_FREEPOOL_CACHE) + break; +#endif + if (m_buddy->pool == VM_FREEPOOL_CACHE) + vm_phys_uc_free_pages++; vm_phys_set_pool(m->pool, m_buddy, order); +#else + if (m_buddy->pool < m->pool) { + if (m_buddy->pool == VM_FREEPOOL_CACHE) + vm_phys_uc_free_pages++; + vm_phys_set_pool(m->pool, m_buddy, + order); + } else { + if (m->pool == VM_FREEPOOL_CACHE) + vm_phys_uc_free_pages++; + vm_phys_set_pool(m_buddy->pool, m, + order); + } +#endif + } order++; pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); m = &seg->first_page[atop(pa - seg->start)]; @@ -743,8 +783,12 @@ { vm_page_t m_tmp; - for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) + for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) { + if (m_tmp->pool == VM_FREEPOOL_CACHE && + pool != VM_FREEPOOL_CACHE) + vm_phys_uncached++; m_tmp->pool = pool; + } } /*
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201206161615.q5GGFMpm061173>
