Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 2 May 2016 20:16:29 +0000 (UTC)
From:      "Pedro F. Giffuni" <pfg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r298940 - head/sys/vm
Message-ID:  <201605022016.u42KGTcC026691@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: pfg
Date: Mon May  2 20:16:29 2016
New Revision: 298940
URL: https://svnweb.freebsd.org/changeset/base/298940

Log:
  sys/vm: minor spelling fixes in comments.
  
  No functional change.

Modified:
  head/sys/vm/swap_pager.c
  head/sys/vm/uma_core.c
  head/sys/vm/uma_int.h
  head/sys/vm/vm_glue.c
  head/sys/vm/vm_map.c
  head/sys/vm/vm_object.c
  head/sys/vm/vm_page.h
  head/sys/vm/vm_pageout.c
  head/sys/vm/vnode_pager.c

Modified: head/sys/vm/swap_pager.c
==============================================================================
--- head/sys/vm/swap_pager.c	Mon May  2 19:56:48 2016	(r298939)
+++ head/sys/vm/swap_pager.c	Mon May  2 20:16:29 2016	(r298940)
@@ -965,7 +965,7 @@ swap_pager_copy(vm_object_t srcobject, v
 	/*
 	 * Free left over swap blocks in source.
 	 *
-	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
+	 * We have to revert the type to OBJT_DEFAULT so we do not accidentally
 	 * double-remove the object from the swap queues.
 	 */
 	if (destroysource) {
@@ -2623,7 +2623,7 @@ swapongeom_ev(void *arg, int flags)
 	cp->flags |=  G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
 	g_attach(cp, pp);
 	/*
-	 * XXX: Everytime you think you can improve the margin for
+	 * XXX: Every time you think you can improve the margin for
 	 * footshooting, somebody depends on the ability to do so:
 	 * savecore(8) wants to write to our swapdev so we cannot
 	 * set an exclusive count :-(

Modified: head/sys/vm/uma_core.c
==============================================================================
--- head/sys/vm/uma_core.c	Mon May  2 19:56:48 2016	(r298939)
+++ head/sys/vm/uma_core.c	Mon May  2 20:16:29 2016	(r298940)
@@ -31,7 +31,7 @@
  *
  * This allocator is intended to replace the multitude of similar object caches
  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
- * effecient.  A primary design goal is to return unused memory to the rest of
+ * efficient.  A primary design goal is to return unused memory to the rest of
  * the system.  This will make the system as a whole more flexible due to the
  * ability to move memory to subsystems which most need it instead of leaving
  * pools of reserved memory unused.
@@ -531,7 +531,7 @@ zone_timeout(uma_zone_t zone)
  *	hash  A new hash structure with the old hash size in uh_hashsize
  *
  * Returns:
- *	1 on sucess and 0 on failure.
+ *	1 on success and 0 on failure.
  */
 static int
 hash_alloc(struct uma_hash *hash)
@@ -2257,7 +2257,7 @@ zalloc_start:
 
 	/*
 	 * Now lets just fill a bucket and put it on the free list.  If that
-	 * works we'll restart the allocation from the begining and it
+	 * works we'll restart the allocation from the beginning and it
 	 * will use the just filled bucket.
 	 */
 	bucket = zone_alloc_bucket(zone, udata, flags);

Modified: head/sys/vm/uma_int.h
==============================================================================
--- head/sys/vm/uma_int.h	Mon May  2 19:56:48 2016	(r298939)
+++ head/sys/vm/uma_int.h	Mon May  2 20:16:29 2016	(r298940)
@@ -415,7 +415,7 @@ vsetslab(vm_offset_t va, uma_slab_t slab
 
 /*
  * The following two functions may be defined by architecture specific code
- * if they can provide more effecient allocation functions.  This is useful
+ * if they can provide more efficient allocation functions.  This is useful
  * for using direct mapped addresses.
  */
 void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,

Modified: head/sys/vm/vm_glue.c
==============================================================================
--- head/sys/vm/vm_glue.c	Mon May  2 19:56:48 2016	(r298939)
+++ head/sys/vm/vm_glue.c	Mon May  2 20:16:29 2016	(r298940)
@@ -149,7 +149,7 @@ kernacc(addr, len, rw)
  * the associated vm_map_entry range.  It does not determine whether the
  * contents of the memory is actually readable or writable.  vmapbuf(),
  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
- * used in conjuction with this call.
+ * used in conjunction with this call.
  */
 int
 useracc(addr, len, rw)
@@ -665,7 +665,7 @@ vm_forkproc(td, p2, td2, vm2, flags)
 }
 
 /*
- * Called after process has been wait(2)'ed apon and is being reaped.
+ * Called after process has been wait(2)'ed upon and is being reaped.
  * The idea is to reclaim resources that we could not reclaim while
  * the process was still executing.
  */

Modified: head/sys/vm/vm_map.c
==============================================================================
--- head/sys/vm/vm_map.c	Mon May  2 19:56:48 2016	(r298939)
+++ head/sys/vm/vm_map.c	Mon May  2 20:16:29 2016	(r298940)
@@ -3519,7 +3519,7 @@ vm_map_stack_locked(vm_map_t map, vm_off
 		return (KERN_NO_SPACE);
 
 	/*
-	 * If we can't accomodate max_ssize in the current mapping, no go.
+	 * If we can't accommodate max_ssize in the current mapping, no go.
 	 * However, we need to be aware that subsequent user mappings might
 	 * map into the space we have reserved for stack, and currently this
 	 * space is not protected.

Modified: head/sys/vm/vm_object.c
==============================================================================
--- head/sys/vm/vm_object.c	Mon May  2 19:56:48 2016	(r298939)
+++ head/sys/vm/vm_object.c	Mon May  2 20:16:29 2016	(r298940)
@@ -2111,7 +2111,7 @@ vm_object_coalesce(vm_object_t prev_obje
 
 		/*
 		 * If prev_object was charged, then this mapping,
-		 * althought not charged now, may become writable
+		 * although not charged now, may become writable
 		 * later. Non-NULL cred in the object would prevent
 		 * swap reservation during enabling of the write
 		 * access, so reserve swap now. Failed reservation

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h	Mon May  2 19:56:48 2016	(r298939)
+++ head/sys/vm/vm_page.h	Mon May  2 20:16:29 2016	(r298940)
@@ -141,7 +141,7 @@ struct vm_page {
 	vm_object_t object;		/* which object am I in (O,P) */
 	vm_pindex_t pindex;		/* offset into object (O,P) */
 	vm_paddr_t phys_addr;		/* physical address of page */
-	struct md_page md;		/* machine dependant stuff */
+	struct md_page md;		/* machine dependent stuff */
 	u_int wire_count;		/* wired down maps refs (P) */
 	volatile u_int busy_lock;	/* busy owners lock */
 	uint16_t hold_count;		/* page hold count (P) */

Modified: head/sys/vm/vm_pageout.c
==============================================================================
--- head/sys/vm/vm_pageout.c	Mon May  2 19:56:48 2016	(r298939)
+++ head/sys/vm/vm_pageout.c	Mon May  2 20:16:29 2016	(r298940)
@@ -447,7 +447,7 @@ more:
 		++pageout_count;
 		++ib;
 		/*
-		 * alignment boundry, stop here and switch directions.  Do
+		 * alignment boundary, stop here and switch directions.  Do
 		 * not clear ib.
 		 */
 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
@@ -477,7 +477,7 @@ more:
 
 	/*
 	 * If we exhausted our forward scan, continue with the reverse scan
-	 * when possible, even past a page boundry.  This catches boundry
+	 * when possible, even past a page boundary.  This catches boundary
 	 * conditions.
 	 */
 	if (ib && pageout_count < vm_pageout_page_count)

Modified: head/sys/vm/vnode_pager.c
==============================================================================
--- head/sys/vm/vnode_pager.c	Mon May  2 19:56:48 2016	(r298939)
+++ head/sys/vm/vnode_pager.c	Mon May  2 20:16:29 2016	(r298940)
@@ -819,7 +819,7 @@ vnode_pager_generic_getpages(struct vnod
 
 	/*
 	 * A sparse file can be encountered only for a single page request,
-	 * which may not be preceeded by call to vm_pager_haspage().
+	 * which may not be preceded by call to vm_pager_haspage().
 	 */
 	if (bp->b_blkno == -1) {
 		KASSERT(count == 1,
@@ -1139,7 +1139,7 @@ vnode_pager_putpages(vm_object_t object,
  * own vnodes if they fail to implement VOP_PUTPAGES.
  *
  * This is typically called indirectly via the pageout daemon and
- * clustering has already typically occured, so in general we ask the
+ * clustering has already typically occurred, so in general we ask the
  * underlying filesystem to write the data out asynchronously rather
  * then delayed.
  */
@@ -1182,7 +1182,7 @@ vnode_pager_generic_putpages(struct vnod
 
 	/*
 	 * If the page-aligned write is larger then the actual file we
-	 * have to invalidate pages occuring beyond the file EOF.  However,
+	 * have to invalidate pages occurring beyond the file EOF.  However,
 	 * there is an edge case where a file may not be page-aligned where
 	 * the last page is partially invalid.  In this case the filesystem
 	 * may not properly clear the dirty bits for the entire page (which



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201605022016.u42KGTcC026691>