From owner-svn-src-user@freebsd.org  Tue Mar 13 03:53:34 2018
Return-Path: <owner-svn-src-user@freebsd.org>
Delivered-To: svn-src-user@mailman.ysv.freebsd.org
Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1])
 by mailman.ysv.freebsd.org (Postfix) with ESMTP id 74044B7DFED
 for <svn-src-user@mailman.ysv.freebsd.org>;
 Tue, 13 Mar 2018 03:53:34 +0000 (UTC)
 (envelope-from jeff@FreeBSD.org)
Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org
 [IPv6:2610:1c1:1:606c::19:3])
 (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
 (Client CN "mxrelay.nyi.freebsd.org",
 Issuer "Let's Encrypt Authority X3" (verified OK))
 by mx1.freebsd.org (Postfix) with ESMTPS id 2551886AD7;
 Tue, 13 Mar 2018 03:53:34 +0000 (UTC)
 (envelope-from jeff@FreeBSD.org)
Received: from repo.freebsd.org (repo.freebsd.org
 [IPv6:2610:1c1:1:6068::e6a:0])
 (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
 (Client did not present a certificate)
 by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 2047D14F99;
 Tue, 13 Mar 2018 03:53:34 +0000 (UTC)
 (envelope-from jeff@FreeBSD.org)
Received: from repo.freebsd.org ([127.0.1.37])
 by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id w2D3rYK6029675;
 Tue, 13 Mar 2018 03:53:34 GMT (envelope-from jeff@FreeBSD.org)
Received: (from jeff@localhost)
 by repo.freebsd.org (8.15.2/8.15.2/Submit) id w2D3rXIc029673;
 Tue, 13 Mar 2018 03:53:33 GMT (envelope-from jeff@FreeBSD.org)
Message-Id: <201803130353.w2D3rXIc029673@repo.freebsd.org>
X-Authentication-Warning: repo.freebsd.org: jeff set sender to
 jeff@FreeBSD.org using -f
From: Jeff Roberson <jeff@FreeBSD.org>
Date: Tue, 13 Mar 2018 03:53:33 +0000 (UTC)
To: src-committers@freebsd.org, svn-src-user@freebsd.org
Subject: svn commit: r330827 - user/jeff/numa/sys/vm
X-SVN-Group: user
X-SVN-Commit-Author: jeff
X-SVN-Commit-Paths: user/jeff/numa/sys/vm
X-SVN-Commit-Revision: 330827
X-SVN-Commit-Repository: base
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
X-BeenThere: svn-src-user@freebsd.org
X-Mailman-Version: 2.1.25
Precedence: list
List-Id: "SVN commit messages for the experimental &quot; user&quot;
 src tree" <svn-src-user.freebsd.org>
List-Unsubscribe: <https://lists.freebsd.org/mailman/options/svn-src-user>,
 <mailto:svn-src-user-request@freebsd.org?subject=unsubscribe>
List-Archive: <http://lists.freebsd.org/pipermail/svn-src-user/>
List-Post: <mailto:svn-src-user@freebsd.org>
List-Help: <mailto:svn-src-user-request@freebsd.org?subject=help>
List-Subscribe: <https://lists.freebsd.org/mailman/listinfo/svn-src-user>,
 <mailto:svn-src-user-request@freebsd.org?subject=subscribe>
X-List-Received-Date: Tue, 13 Mar 2018 03:53:34 -0000

Author: jeff
Date: Tue Mar 13 03:53:33 2018
New Revision: 330827
URL: https://svnweb.freebsd.org/changeset/base/330827

Log:
  Fix a smattering of bugs in my lockless free_count work and a recent
  merge.  There was an overflow bug in vm_domain_allocate().  Some double
  counting of frees, and a locking bug in reclaim_run that may exist in
  CURRENT.
  
  Reported by:	pho

Modified:
  user/jeff/numa/sys/vm/vm_page.c
  user/jeff/numa/sys/vm/vm_reserv.c

Modified: user/jeff/numa/sys/vm/vm_page.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.c	Tue Mar 13 03:02:09 2018	(r330826)
+++ user/jeff/numa/sys/vm/vm_page.c	Tue Mar 13 03:53:33 2018	(r330827)
@@ -184,7 +184,7 @@ static uma_zone_t fakepg_zone;
 static void vm_page_alloc_check(vm_page_t m);
 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
 static void vm_page_enqueue(uint8_t queue, vm_page_t m);
-static void vm_page_free_phys(struct vm_domain *vmd, vm_page_t m);
+static int vm_page_free_phys(struct vm_domain *vmd, vm_page_t m);
 static void vm_page_init(void *dummy);
 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
     vm_pindex_t pindex, vm_page_t mpred);
@@ -1733,11 +1733,12 @@ vm_domain_allocate(struct vm_domain *vmd, int req, int
 	/*
 	 * Attempt to reserve the pages.  Fail if we're below the limit.
 	 */
+	limit += npages;
 	do {
 		old = vmd->vmd_free_count;
-		new = old - npages;
-		if (new < limit)
+		if (old < limit)
 			return (0);
+		new = old - npages;
 	} while (atomic_cmpset_int(&vmd->vmd_free_count, old, new) == 0);
 
 	/* Wake the page daemon if we've crossed the threshold. */
@@ -2233,7 +2234,7 @@ vm_page_release(void *arg, void **store, int cnt)
 		vm_phys_free_pages(m, 0);
 	}
 	vm_domain_free_unlock(vmd);
-	vm_domain_freecnt_inc(vmd, i);
+	vm_domain_freecnt_inc(vmd, cnt);
 }
 
 #define	VPSC_ANY	0	/* No restrictions. */
@@ -2584,7 +2585,12 @@ retry:
 					    m->pindex, m);
 					m->valid = 0;
 					vm_page_undirty(m);
-
+					m->oflags = VPO_UNMANAGED;
+#if VM_NRESERVLEVEL > 0
+					if (!vm_reserv_free_page(m))
+#endif
+						SLIST_INSERT_HEAD(&free, m,
+						    plinks.s.ss);
 					/*
 					 * The new page must be deactivated
 					 * before the object is unlocked.
@@ -2597,19 +2603,20 @@ retry:
 					vm_page_remove(m);
 					KASSERT(m->dirty == 0,
 					    ("page %p is dirty", m));
-				}
+					m->oflags = VPO_UNMANAGED;
 #if VM_NRESERVLEVEL > 0
-				if (!vm_reserv_free_page(m))
+					if (!vm_reserv_free_page(m))
 #endif
-					SLIST_INSERT_HEAD(&free, m,
-					    plinks.s.ss);
+						SLIST_INSERT_HEAD(&free, m,
+						    plinks.s.ss);
+				}
 			} else
 				error = EBUSY;
 unlock:
 			VM_OBJECT_WUNLOCK(object);
 		} else {
 			MPASS(vm_phys_domain(m) == domain);
-			vm_page_lock(m);
+			/* XXX order unsynchronized? */
 			order = m->order;
 			if (order < VM_NFREEORDER) {
 				/*
@@ -2626,7 +2633,6 @@ unlock:
 			else if (vm_reserv_is_page_free(m))
 				order = 0;
 #endif
-			vm_page_unlock(m);
 			if (order == VM_NFREEORDER)
 				error = EINVAL;
 		}
@@ -3284,16 +3290,18 @@ vm_page_free_prep(vm_page_t m, bool pagequeue_locked)
  * queues.  This is the last step to free a page.  The caller is
  * responsible for adjusting the free page count.
  */
-static void
+static int
 vm_page_free_phys(struct vm_domain *vmd, vm_page_t m)
 {
 
 	vm_domain_free_assert_locked(vmd);
 
 #if VM_NRESERVLEVEL > 0
-	if (!vm_reserv_free_page(m))
+	if (vm_reserv_free_page(m))
+		return (0);
 #endif
-		vm_phys_free_pages(m, 0);
+	vm_phys_free_pages(m, 0);
+	return (1);
 }
 
 void
@@ -3317,8 +3325,7 @@ vm_page_free_phys_pglist(struct pglist *tq)
 			vmd = vm_pagequeue_domain(m);
 			vm_domain_free_lock(vmd);
 		}
-		vm_page_free_phys(vmd, m);
-		cnt++;
+		cnt += vm_page_free_phys(vmd, m);
 	}
 	if (vmd != NULL) {
 		vm_domain_free_unlock(vmd);
@@ -3339,14 +3346,16 @@ void
 vm_page_free_toq(vm_page_t m)
 {
 	struct vm_domain *vmd;
+	int freed;
 
 	if (!vm_page_free_prep(m, false))
 		return;
 	vmd = vm_pagequeue_domain(m);
 	vm_domain_free_lock(vmd);
-	vm_page_free_phys(vmd, m);
+	freed = vm_page_free_phys(vmd, m);
 	vm_domain_free_unlock(vmd);
-	vm_domain_freecnt_inc(vmd, 1);
+	if (freed)
+		vm_domain_freecnt_inc(vmd, 1);
 }
 
 /*
@@ -3374,11 +3383,8 @@ vm_page_free_pages_toq(struct spglist *free, bool upda
 	while ((m = SLIST_FIRST(free)) != NULL) {
 		count++;
 		SLIST_REMOVE_HEAD(free, plinks.s.ss);
-		/* XXX batch locks. */
-		vm_page_lock(m);
 		if (vm_page_free_prep(m, false))
 			TAILQ_INSERT_TAIL(&pgl, m, listq);
-		vm_page_unlock(m);
 	}
 
 	vm_page_free_phys_pglist(&pgl);

Modified: user/jeff/numa/sys/vm/vm_reserv.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_reserv.c	Tue Mar 13 03:02:09 2018	(r330826)
+++ user/jeff/numa/sys/vm/vm_reserv.c	Tue Mar 13 03:53:33 2018	(r330827)
@@ -455,20 +455,8 @@ vm_reserv_depopulate(vm_reserv_t rv, int index)
 static __inline vm_reserv_t
 vm_reserv_from_page(vm_page_t m)
 {
-	vm_reserv_t rv;
 
-	rv = &vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT];
-#if 0
-	if (rv->pages == NULL)
-		panic("vm_reserv_from_page: Bad reservation %p page %p phys %p segind %d start %p end %p first page %p domain %d\n",
-		    rv, m, (void *)m->phys_addr, m->segind,
-		    (void *)vm_phys_segs[m->segind].start,
-		    (void *)vm_phys_segs[m->segind].end,
-		    vm_phys_segs[m->segind].first_page,
-		    vm_phys_segs[m->segind].domain);
-#endif
-
-	return (rv);
+	return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
 }
 
 /*
@@ -790,14 +778,14 @@ vm_reserv_alloc_contig(int req, vm_object_t object, vm
 	 */
 	m = NULL;
 	vmd = VM_DOMAIN(domain);
-	if (vm_domain_allocate(vmd, req, allocpages)) {
+	if (vm_domain_allocate(vmd, req, npages)) {
 		vm_domain_free_lock(vmd);
 		m = vm_phys_alloc_contig(domain, allocpages, low, high,
 		    ulmax(alignment, VM_LEVEL_0_SIZE),
 		    boundary > VM_LEVEL_0_SIZE ? boundary : 0);
 		vm_domain_free_unlock(vmd);
 		if (m == NULL) {
-			vm_domain_freecnt_inc(vmd, allocpages);
+			vm_domain_freecnt_inc(vmd, npages);
 			return (NULL);
 		}
 	} else