Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 23 Jun 2011 10:36:06 +0800
From:      Adrian Chadd <adrian@freebsd.org>
To:        Alan Cox <alc@freebsd.org>
Cc:        svn-src-head@freebsd.org, svn-src-all@freebsd.org, src-committers@freebsd.org
Subject:   Re: svn commit: r223307 - head/sys/vm
Message-ID:  <BANLkTi=Ep2BSLmRd2xe9NNznvMDuUy1C0w@mail.gmail.com>
In-Reply-To: <201106191913.p5JJDOqJ006272@svn.freebsd.org>
References:  <201106191913.p5JJDOqJ006272@svn.freebsd.org>

index | next in thread | previous in thread | raw e-mail

Can this commit please be reverted whilst the kinks are worked out for MIPS?

I'm currently knee deep in MIPS related hackery and I'd rather stay on
an unpatched -HEAD so dogfood can be properly self-consumed.
Thanks,


Adrian

On 20 June 2011 03:13, Alan Cox <alc@freebsd.org> wrote:
> Author: alc
> Date: Sun Jun 19 19:13:24 2011
> New Revision: 223307
> URL: http://svn.freebsd.org/changeset/base/223307
>
> Log:
>  Precisely document the synchronization rules for the page's dirty field.
>  (Saying that the lock on the object that the page belongs to must be held
>  only represents one aspect of the rules.)
>
>  Eliminate the use of the page queues lock for atomically performing read-
>  modify-write operations on the dirty field when the underlying architecture
>  supports atomic operations on char and short types.
>
>  Document the fact that 32KB pages aren't really supported.
>
>  Reviewed by:  attilio, kib
>
> Modified:
>  head/sys/vm/vm_fault.c
>  head/sys/vm/vm_page.c
>  head/sys/vm/vm_page.h
>
> Modified: head/sys/vm/vm_fault.c
> ==============================================================================
> --- head/sys/vm/vm_fault.c      Sun Jun 19 18:34:49 2011        (r223306)
> +++ head/sys/vm/vm_fault.c      Sun Jun 19 19:13:24 2011        (r223307)
> @@ -1089,10 +1089,20 @@ vm_fault_quick_hold_pages(vm_map_t map,
>                         * caller's changes may go unnoticed because they are
>                         * performed through an unmanaged mapping or by a DMA
>                         * operation.
> +                        *
> +                        * The object lock is not held here.  Therefore, like
> +                        * a pmap operation, the page queues lock may be
> +                        * required in order to call vm_page_dirty().  See
> +                        * vm_page_clear_dirty_mask().
>                         */
> +#if defined(__amd64__) || defined(__i386__) || defined(__ia64__) || \
> +    defined(__mips__)
> +                       vm_page_dirty(*mp);
> +#else
>                        vm_page_lock_queues();
>                        vm_page_dirty(*mp);
>                        vm_page_unlock_queues();
> +#endif
>                }
>        }
>        if (pmap_failed) {
>
> Modified: head/sys/vm/vm_page.c
> ==============================================================================
> --- head/sys/vm/vm_page.c       Sun Jun 19 18:34:49 2011        (r223306)
> +++ head/sys/vm/vm_page.c       Sun Jun 19 19:13:24 2011        (r223307)
> @@ -729,7 +729,12 @@ vm_page_sleep(vm_page_t m, const char *m
>  /*
>  *     vm_page_dirty:
>  *
> - *     make page all dirty
> + *     Set all bits in the page's dirty field.
> + *
> + *     The object containing the specified page must be locked if the call is
> + *     made from the machine-independent layer.  If, however, the call is
> + *     made from the pmap layer, then the page queues lock may be required.
> + *     See vm_page_clear_dirty_mask().
>  */
>  void
>  vm_page_dirty(vm_page_t m)
> @@ -2325,15 +2330,41 @@ vm_page_clear_dirty_mask(vm_page_t m, in
>        /*
>         * If the object is locked and the page is neither VPO_BUSY nor
>         * PG_WRITEABLE, then the page's dirty field cannot possibly be
> -        * modified by a concurrent pmap operation.
> +        * set by a concurrent pmap operation.
>         */
>        VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
>        if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0)
>                m->dirty &= ~pagebits;
>        else {
> +#if defined(__amd64__) || defined(__i386__) || defined(__ia64__) || \
> +    defined(__mips__)
> +               /*
> +                * On the aforementioned architectures, the page queues lock
> +                * is not required by the following read-modify-write
> +                * operation.  The combination of the object's lock and an
> +                * atomic operation suffice.  Moreover, the pmap layer on
> +                * these architectures can call vm_page_dirty() without
> +                * holding the page queues lock.
> +                */
> +#if PAGE_SIZE == 4096
> +               atomic_clear_char(&m->dirty, pagebits);
> +#elif PAGE_SIZE == 8192
> +               atomic_clear_short(&m->dirty, pagebits);
> +#elif PAGE_SIZE == 16384
> +               atomic_clear_int(&m->dirty, pagebits);
> +#else
> +#error "PAGE_SIZE is not supported."
> +#endif
> +#else
> +               /*
> +                * Otherwise, the page queues lock is required to ensure that
> +                * a concurrent pmap operation does not set the page's dirty
> +                * field during the following read-modify-write operation.
> +                */
>                vm_page_lock_queues();
>                m->dirty &= ~pagebits;
>                vm_page_unlock_queues();
> +#endif
>        }
>  }
>
>
> Modified: head/sys/vm/vm_page.h
> ==============================================================================
> --- head/sys/vm/vm_page.h       Sun Jun 19 18:34:49 2011        (r223306)
> +++ head/sys/vm/vm_page.h       Sun Jun 19 19:13:24 2011        (r223307)
> @@ -89,10 +89,26 @@
>  *     and offset to which this page belongs (for pageout),
>  *     and sundry status bits.
>  *
> - *     Fields in this structure are locked either by the lock on the
> - *     object that the page belongs to (O), its corresponding page lock (P),
> - *     or by the lock on the page queues (Q).
> - *
> + *     In general, operations on this structure's mutable fields are
> + *     synchronized using either one of or a combination of the lock on the
> + *     object that the page belongs to (O), the pool lock for the page (P),
> + *     or the lock for either the free or paging queues (Q).  If a field is
> + *     annotated below with two of these locks, then holding either lock is
> + *     sufficient for read access, but both locks are required for write
> + *     access.
> + *
> + *     In contrast, the synchronization of accesses to the page's dirty field
> + *     is machine dependent (M).  In the machine-independent layer, the lock
> + *     on the object that the page belongs to must be held in order to
> + *     operate on the field.  However, the pmap layer is permitted to set
> + *     all bits within the field without holding that lock.  Therefore, if
> + *     the underlying architecture does not support atomic read-modify-write
> + *     operations on the field's type, then the machine-independent layer
> + *     must also hold the page queues lock when performing read-modify-write
> + *     operations and the pmap layer must hold the page queues lock when
> + *     setting the field.  In the machine-independent layer, the
> + *     implementation of read-modify-write operations on the field is
> + *     encapsulated in vm_page_clear_dirty_mask().
>  */
>
>  TAILQ_HEAD(pglist, vm_page);
> @@ -120,18 +136,19 @@ struct vm_page {
>        u_char  busy;                   /* page busy count (O) */
>        /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
>        /* so, on normal X86 kernels, they must be at least 8 bits wide */
> +       /* In reality, support for 32KB pages is not fully implemented. */
>  #if PAGE_SIZE == 4096
>        u_char  valid;                  /* map of valid DEV_BSIZE chunks (O) */
> -       u_char  dirty;                  /* map of dirty DEV_BSIZE chunks (O) */
> +       u_char  dirty;                  /* map of dirty DEV_BSIZE chunks (M) */
>  #elif PAGE_SIZE == 8192
>        u_short valid;                  /* map of valid DEV_BSIZE chunks (O) */
> -       u_short dirty;                  /* map of dirty DEV_BSIZE chunks (O) */
> +       u_short dirty;                  /* map of dirty DEV_BSIZE chunks (M) */
>  #elif PAGE_SIZE == 16384
>        u_int valid;                    /* map of valid DEV_BSIZE chunks (O) */
> -       u_int dirty;                    /* map of dirty DEV_BSIZE chunks (O) */
> +       u_int dirty;                    /* map of dirty DEV_BSIZE chunks (M) */
>  #elif PAGE_SIZE == 32768
>        u_long valid;                   /* map of valid DEV_BSIZE chunks (O) */
> -       u_long dirty;                   /* map of dirty DEV_BSIZE chunks (O) */
> +       u_long dirty;                   /* map of dirty DEV_BSIZE chunks (M) */
>  #endif
>  };
>
>


help

Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?BANLkTi=Ep2BSLmRd2xe9NNznvMDuUy1C0w>