Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 23 Aug 2019 19:49:29 +0000 (UTC)
From:      Mark Johnston <markj@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r351436 - head/sys/vm
Message-ID:  <201908231949.x7NJnTTE022549@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: markj
Date: Fri Aug 23 19:49:29 2019
New Revision: 351436
URL: https://svnweb.freebsd.org/changeset/base/351436

Log:
  Make vm_pqbatch_submit_page() externally visible.
  
  It will become useful for the page daemon to be able to directly create
  a batch queue entry for a page, and without modifying the page
  structure.  Rename vm_pqbatch_submit_page() to vm_page_pqbatch_submit()
  to keep the namespace consistent.  No functional change intended.
  
  Reviewed by:	alc, kib
  MFC after:	1 week
  Sponsored by:	Netflix
  Differential Revision:	https://reviews.freebsd.org/D21369

Modified:
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h
  head/sys/vm/vm_swapout.c

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Fri Aug 23 19:40:10 2019	(r351435)
+++ head/sys/vm/vm_page.c	Fri Aug 23 19:49:29 2019	(r351436)
@@ -3130,8 +3130,8 @@ vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_
 	vm_batchqueue_init(bq);
 }
 
-static void
-vm_pqbatch_submit_page(vm_page_t m, uint8_t queue)
+void
+vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
 {
 	struct vm_batchqueue *bq;
 	struct vm_pagequeue *pq;
@@ -3181,14 +3181,14 @@ vm_pqbatch_submit_page(vm_page_t m, uint8_t queue)
 }
 
 /*
- *	vm_page_drain_pqbatch:		[ internal use only ]
+ *	vm_page_pqbatch_drain:		[ internal use only ]
  *
  *	Force all per-CPU page queue batch queues to be drained.  This is
  *	intended for use in severe memory shortages, to ensure that pages
  *	do not remain stuck in the batch queues.
  */
 void
-vm_page_drain_pqbatch(void)
+vm_page_pqbatch_drain(void)
 {
 	struct thread *td;
 	struct vm_domain *vmd;
@@ -3253,7 +3253,7 @@ vm_page_dequeue_deferred(vm_page_t m)
 	if ((queue = vm_page_queue(m)) == PQ_NONE)
 		return;
 	vm_page_aflag_set(m, PGA_DEQUEUE);
-	vm_pqbatch_submit_page(m, queue);
+	vm_page_pqbatch_submit(m, queue);
 }
 
 /*
@@ -3277,7 +3277,7 @@ vm_page_dequeue_deferred_free(vm_page_t m)
 	if ((queue = m->queue) == PQ_NONE)
 		return;
 	vm_page_aflag_set(m, PGA_DEQUEUE);
-	vm_pqbatch_submit_page(m, queue);
+	vm_page_pqbatch_submit(m, queue);
 }
 
 /*
@@ -3352,7 +3352,7 @@ vm_page_enqueue(vm_page_t m, uint8_t queue)
 	m->queue = queue;
 	if ((m->aflags & PGA_REQUEUE) == 0)
 		vm_page_aflag_set(m, PGA_REQUEUE);
-	vm_pqbatch_submit_page(m, queue);
+	vm_page_pqbatch_submit(m, queue);
 }
 
 /*
@@ -3372,7 +3372,7 @@ vm_page_requeue(vm_page_t m)
 
 	if ((m->aflags & PGA_REQUEUE) == 0)
 		vm_page_aflag_set(m, PGA_REQUEUE);
-	vm_pqbatch_submit_page(m, atomic_load_8(&m->queue));
+	vm_page_pqbatch_submit(m, atomic_load_8(&m->queue));
 }
 
 /*
@@ -3700,7 +3700,7 @@ vm_page_deactivate_noreuse(vm_page_t m)
 	}
 	if ((m->aflags & PGA_REQUEUE_HEAD) == 0)
 		vm_page_aflag_set(m, PGA_REQUEUE_HEAD);
-	vm_pqbatch_submit_page(m, PQ_INACTIVE);
+	vm_page_pqbatch_submit(m, PQ_INACTIVE);
 }
 
 /*

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h	Fri Aug 23 19:40:10 2019	(r351435)
+++ head/sys/vm/vm_page.h	Fri Aug 23 19:49:29 2019	(r351436)
@@ -542,7 +542,6 @@ void vm_page_deactivate(vm_page_t);
 void vm_page_deactivate_noreuse(vm_page_t);
 void vm_page_dequeue(vm_page_t m);
 void vm_page_dequeue_deferred(vm_page_t m);
-void vm_page_drain_pqbatch(void);
 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
 bool vm_page_free_prep(vm_page_t m);
 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
@@ -552,6 +551,8 @@ void vm_page_launder(vm_page_t m);
 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
 vm_page_t vm_page_next(vm_page_t m);
 int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
+void vm_page_pqbatch_drain(void);
+void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
 vm_page_t vm_page_prev(vm_page_t m);
 bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
 void vm_page_putfake(vm_page_t m);

Modified: head/sys/vm/vm_swapout.c
==============================================================================
--- head/sys/vm/vm_swapout.c	Fri Aug 23 19:40:10 2019	(r351435)
+++ head/sys/vm/vm_swapout.c	Fri Aug 23 19:49:29 2019	(r351436)
@@ -409,7 +409,7 @@ vm_daemon(void)
 			 * avoidance measure.
 			 */
 			if ((swapout_flags & VM_SWAP_NORMAL) != 0)
-				vm_page_drain_pqbatch();
+				vm_page_pqbatch_drain();
 			swapout_procs(swapout_flags);
 		}
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201908231949.x7NJnTTE022549>