Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 21 Jun 2011 20:30:38 +0000
From:      "Bjoern A. Zeeb" <bz@FreeBSD.org>
To:        Alan Cox <alc@FreeBSD.org>
Cc:        svn-src-head@freebsd.org, svn-src-all@freebsd.org, src-committers@freebsd.org
Subject:   Re: svn commit: r223307 - head/sys/vm
Message-ID:  <BBC34F79-FFA7-4A05-83B3-DE17E0AB14D0@FreeBSD.org>
In-Reply-To: <201106191913.p5JJDOqJ006272@svn.freebsd.org>
References:  <201106191913.p5JJDOqJ006272@svn.freebsd.org>

next in thread | previous in thread | raw e-mail | index | archive | help
On Jun 19, 2011, at 7:13 PM, Alan Cox wrote:

Hi Alan,

> Author: alc
> Date: Sun Jun 19 19:13:24 2011
> New Revision: 223307
> URL: http://svn.freebsd.org/changeset/base/223307
>=20
> Log:
>  Precisely document the synchronization rules for the page's dirty =
field.
>  (Saying that the lock on the object that the page belongs to must be =
held
>  only represents one aspect of the rules.)
>=20
>  Eliminate the use of the page queues lock for atomically performing =
read-
>  modify-write operations on the dirty field when the underlying =
architecture
>  supports atomic operations on char and short types.
>=20
>  Document the fact that 32KB pages aren't really supported.
>=20

contrary to the tinderbox I'd like to point out that all mips kernels =
built by universe are broken with a SVN HEAD from earlier today.  Could =
you please check and see if you can fix it?  The errors I get are:

vm_page.o: In function `vm_page_clear_dirty':
/sys/vm/vm_page.c:(.text+0x18d0): undefined reference to =
`atomic_clear_8'
/sys/vm/vm_page.c:(.text+0x18d0): relocation truncated to fit: R_MIPS_26 =
against `atomic_clear_8'
vm_page.o: In function `vm_page_set_validclean':
/sys/vm/vm_page.c:(.text+0x38f0): undefined reference to =
`atomic_clear_8'
/sys/vm/vm_page.c:(.text+0x38f0): relocation truncated to fit: R_MIPS_26 =
against `atomic_clear_8'

Thanks a lot!
/bz


>  Reviewed by:	attilio, kib
>=20
> Modified:
>  head/sys/vm/vm_fault.c
>  head/sys/vm/vm_page.c
>  head/sys/vm/vm_page.h
>=20
> Modified: head/sys/vm/vm_fault.c
> =
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D
> --- head/sys/vm/vm_fault.c	Sun Jun 19 18:34:49 2011	=
(r223306)
> +++ head/sys/vm/vm_fault.c	Sun Jun 19 19:13:24 2011	=
(r223307)
> @@ -1089,10 +1089,20 @@ vm_fault_quick_hold_pages(vm_map_t map,=20
> 			 * caller's changes may go unnoticed because =
they are
> 			 * performed through an unmanaged mapping or by =
a DMA
> 			 * operation.
> +			 *
> +			 * The object lock is not held here.  Therefore, =
like
> +			 * a pmap operation, the page queues lock may be
> +			 * required in order to call vm_page_dirty().  =
See
> +			 * vm_page_clear_dirty_mask().
> 			 */
> +#if defined(__amd64__) || defined(__i386__) || defined(__ia64__) || \
> +    defined(__mips__)
> +			vm_page_dirty(*mp);
> +#else
> 			vm_page_lock_queues();
> 			vm_page_dirty(*mp);
> 			vm_page_unlock_queues();
> +#endif
> 		}
> 	}
> 	if (pmap_failed) {
>=20
> Modified: head/sys/vm/vm_page.c
> =
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D
> --- head/sys/vm/vm_page.c	Sun Jun 19 18:34:49 2011	=
(r223306)
> +++ head/sys/vm/vm_page.c	Sun Jun 19 19:13:24 2011	=
(r223307)
> @@ -729,7 +729,12 @@ vm_page_sleep(vm_page_t m, const char *m
> /*
>  *	vm_page_dirty:
>  *
> - *	make page all dirty
> + *	Set all bits in the page's dirty field.
> + *
> + *	The object containing the specified page must be locked if the =
call is
> + *	made from the machine-independent layer.  If, however, the call =
is
> + *	made from the pmap layer, then the page queues lock may be =
required.
> + *	See vm_page_clear_dirty_mask().
>  */
> void
> vm_page_dirty(vm_page_t m)
> @@ -2325,15 +2330,41 @@ vm_page_clear_dirty_mask(vm_page_t m, in
> 	/*
> 	 * If the object is locked and the page is neither VPO_BUSY nor
> 	 * PG_WRITEABLE, then the page's dirty field cannot possibly be
> -	 * modified by a concurrent pmap operation.=20
> +	 * set by a concurrent pmap operation.=20
> 	 */
> 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
> 	if ((m->oflags & VPO_BUSY) =3D=3D 0 && (m->flags & PG_WRITEABLE) =
=3D=3D 0)
> 		m->dirty &=3D ~pagebits;
> 	else {
> +#if defined(__amd64__) || defined(__i386__) || defined(__ia64__) || \
> +    defined(__mips__)
> +		/*
> +		 * On the aforementioned architectures, the page queues =
lock
> +		 * is not required by the following read-modify-write
> +		 * operation.  The combination of the object's lock and =
an
> +		 * atomic operation suffice.  Moreover, the pmap layer =
on
> +		 * these architectures can call vm_page_dirty() without
> +		 * holding the page queues lock.
> +		 */
> +#if PAGE_SIZE =3D=3D 4096
> +		atomic_clear_char(&m->dirty, pagebits);
> +#elif PAGE_SIZE =3D=3D 8192
> +		atomic_clear_short(&m->dirty, pagebits);
> +#elif PAGE_SIZE =3D=3D 16384
> +		atomic_clear_int(&m->dirty, pagebits);
> +#else
> +#error "PAGE_SIZE is not supported."
> +#endif
> +#else
> +		/*
> +		 * Otherwise, the page queues lock is required to ensure =
that
> +		 * a concurrent pmap operation does not set the page's =
dirty
> +		 * field during the following read-modify-write =
operation.
> +		 */
> 		vm_page_lock_queues();
> 		m->dirty &=3D ~pagebits;
> 		vm_page_unlock_queues();
> +#endif
> 	}
> }
>=20
>=20
> Modified: head/sys/vm/vm_page.h
> =
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D
> --- head/sys/vm/vm_page.h	Sun Jun 19 18:34:49 2011	=
(r223306)
> +++ head/sys/vm/vm_page.h	Sun Jun 19 19:13:24 2011	=
(r223307)
> @@ -89,10 +89,26 @@
>  *	and offset to which this page belongs (for pageout),
>  *	and sundry status bits.
>  *
> - *	Fields in this structure are locked either by the lock on the
> - *	object that the page belongs to (O), its corresponding page lock =
(P),
> - *	or by the lock on the page queues (Q).
> - *=09
> + *	In general, operations on this structure's mutable fields are
> + *	synchronized using either one of or a combination of the lock on =
the
> + *	object that the page belongs to (O), the pool lock for the page =
(P),
> + *	or the lock for either the free or paging queues (Q).  If a =
field is
> + *	annotated below with two of these locks, then holding either =
lock is
> + *	sufficient for read access, but both locks are required for =
write=20
> + *	access.
> + *
> + *	In contrast, the synchronization of accesses to the page's dirty =
field
> + *	is machine dependent (M).  In the machine-independent layer, the =
lock
> + *	on the object that the page belongs to must be held in order to
> + *	operate on the field.  However, the pmap layer is permitted to =
set
> + *	all bits within the field without holding that lock.  Therefore, =
if
> + *	the underlying architecture does not support atomic =
read-modify-write
> + *	operations on the field's type, then the machine-independent =
layer
> + *	must also hold the page queues lock when performing =
read-modify-write
> + *	operations and the pmap layer must hold the page queues lock =
when
> + *	setting the field.  In the machine-independent layer, the
> + *	implementation of read-modify-write operations on the field is
> + *	encapsulated in vm_page_clear_dirty_mask().
>  */
>=20
> TAILQ_HEAD(pglist, vm_page);
> @@ -120,18 +136,19 @@ struct vm_page {
> 	u_char	busy;			/* page busy count (O) */
> 	/* NOTE that these must support one bit per DEV_BSIZE in a =
page!!! */
> 	/* so, on normal X86 kernels, they must be at least 8 bits wide =
*/
> +	/* In reality, support for 32KB pages is not fully implemented. =
*/
> #if PAGE_SIZE =3D=3D 4096
> 	u_char	valid;			/* map of valid DEV_BSIZE chunks =
(O) */
> -	u_char	dirty;			/* map of dirty DEV_BSIZE chunks =
(O) */
> +	u_char	dirty;			/* map of dirty DEV_BSIZE chunks =
(M) */
> #elif PAGE_SIZE =3D=3D 8192
> 	u_short	valid;			/* map of valid DEV_BSIZE chunks =
(O) */
> -	u_short	dirty;			/* map of dirty DEV_BSIZE chunks =
(O) */
> +	u_short	dirty;			/* map of dirty DEV_BSIZE chunks =
(M) */
> #elif PAGE_SIZE =3D=3D 16384
> 	u_int valid;			/* map of valid DEV_BSIZE chunks =
(O) */
> -	u_int dirty;			/* map of dirty DEV_BSIZE chunks =
(O) */
> +	u_int dirty;			/* map of dirty DEV_BSIZE chunks =
(M) */
> #elif PAGE_SIZE =3D=3D 32768
> 	u_long valid;			/* map of valid DEV_BSIZE chunks =
(O) */
> -	u_long dirty;			/* map of dirty DEV_BSIZE chunks =
(O) */
> +	u_long dirty;			/* map of dirty DEV_BSIZE chunks =
(M) */
> #endif
> };
>=20

--=20
Bjoern A. Zeeb                                 You have to have visions!
         Stop bit received. Insert coin for new address family.




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?BBC34F79-FFA7-4A05-83B3-DE17E0AB14D0>