From owner-svn-src-all@FreeBSD.ORG Wed Oct 8 16:48:06 2014 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [8.8.178.115]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 2B6B44E6; Wed, 8 Oct 2014 16:48:06 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 0C4A0A37; Wed, 8 Oct 2014 16:48:06 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.9/8.14.9) with ESMTP id s98Gm5Yl087396; Wed, 8 Oct 2014 16:48:05 GMT (envelope-from kib@FreeBSD.org) Received: (from kib@localhost) by svn.freebsd.org (8.14.9/8.14.9/Submit) id s98Gm4mn087390; Wed, 8 Oct 2014 16:48:04 GMT (envelope-from kib@FreeBSD.org) Message-Id: <201410081648.s98Gm4mn087390@svn.freebsd.org> X-Authentication-Warning: svn.freebsd.org: kib set sender to kib@FreeBSD.org using -f From: Konstantin Belousov Date: Wed, 8 Oct 2014 16:48:04 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r272761 - in head/sys: amd64/amd64 amd64/include dev/drm2/i915 i386/i386 i386/include X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18-1 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 08 Oct 2014 16:48:06 -0000 Author: kib Date: Wed Oct 8 16:48:03 2014 New Revision: 272761 URL: https://svnweb.freebsd.org/changeset/base/272761 Log: Add an argument to the x86 pmap_invalidate_cache_range() to request forced invalidation of the cache range regardless of the presence of self-snoop feature. Some recent Intel GPUs in some modes are not coherent, and dirty lines in CPU cache must be flushed before the pages are transferred to GPU domain. Reviewed by: alc (previous version) Tested by: pho (amd64) Sponsored by: The FreeBSD Foundation MFC after: 1 week Modified: head/sys/amd64/amd64/pmap.c head/sys/amd64/include/pmap.h head/sys/dev/drm2/i915/intel_ringbuffer.c head/sys/i386/i386/pmap.c head/sys/i386/i386/vm_machdep.c head/sys/i386/include/pmap.h Modified: head/sys/amd64/amd64/pmap.c ============================================================================== --- head/sys/amd64/amd64/pmap.c Wed Oct 8 16:35:57 2014 (r272760) +++ head/sys/amd64/amd64/pmap.c Wed Oct 8 16:48:03 2014 (r272761) @@ -1710,16 +1710,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) void -pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) +pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force) { - KASSERT((sva & PAGE_MASK) == 0, - ("pmap_invalidate_cache_range: sva not page-aligned")); - KASSERT((eva & PAGE_MASK) == 0, - ("pmap_invalidate_cache_range: eva not page-aligned")); + if (force) { + sva &= ~(vm_offset_t)cpu_clflush_line_size; + } else { + KASSERT((sva & PAGE_MASK) == 0, + ("pmap_invalidate_cache_range: sva not page-aligned")); + KASSERT((eva & PAGE_MASK) == 0, + ("pmap_invalidate_cache_range: eva not page-aligned")); + } - if (cpu_feature & CPUID_SS) - ; /* If "Self Snoop" is supported, do nothing. */ + if ((cpu_feature & CPUID_SS) != 0 && !force) + ; /* If "Self Snoop" is supported and allowed, do nothing. */ else if ((cpu_feature & CPUID_CLFSH) != 0 && eva - sva < PMAP_CLFLUSH_THRESHOLD) { @@ -6222,7 +6226,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_ for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); pmap_invalidate_range(kernel_pmap, va, va + tmpsize); - pmap_invalidate_cache_range(va, va + tmpsize); + pmap_invalidate_cache_range(va, va + tmpsize, FALSE); return ((void *)(va + offset)); } @@ -6558,7 +6562,7 @@ pmap_change_attr_locked(vm_offset_t va, */ if (changed) { pmap_invalidate_range(kernel_pmap, base, tmpva); - pmap_invalidate_cache_range(base, tmpva); + pmap_invalidate_cache_range(base, tmpva, FALSE); } return (error); } Modified: head/sys/amd64/include/pmap.h ============================================================================== --- head/sys/amd64/include/pmap.h Wed Oct 8 16:35:57 2014 (r272760) +++ head/sys/amd64/include/pmap.h Wed Oct 8 16:48:03 2014 (r272761) @@ -394,7 +394,8 @@ void pmap_invalidate_range(pmap_t, vm_of void pmap_invalidate_all(pmap_t); void pmap_invalidate_cache(void); void pmap_invalidate_cache_pages(vm_page_t *pages, int count); -void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); +void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, + boolean_t force); void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num); #endif /* _KERNEL */ Modified: head/sys/dev/drm2/i915/intel_ringbuffer.c ============================================================================== --- head/sys/dev/drm2/i915/intel_ringbuffer.c Wed Oct 8 16:35:57 2014 (r272760) +++ head/sys/dev/drm2/i915/intel_ringbuffer.c Wed Oct 8 16:48:03 2014 (r272761) @@ -366,7 +366,7 @@ init_pipe_control(struct intel_ring_buff goto err_unpin; pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1); pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page, - (vm_offset_t)pc->cpu_page + PAGE_SIZE); + (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE); pc->obj = obj; ring->private = pc; @@ -1014,7 +1014,7 @@ static int init_status_page(struct intel pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0], 1); pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr, - (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE); + (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE); ring->status_page.obj = obj; memset(ring->status_page.page_addr, 0, PAGE_SIZE); Modified: head/sys/i386/i386/pmap.c ============================================================================== --- head/sys/i386/i386/pmap.c Wed Oct 8 16:35:57 2014 (r272760) +++ head/sys/i386/i386/pmap.c Wed Oct 8 16:48:03 2014 (r272761) @@ -1172,16 +1172,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) void -pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) +pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force) { - KASSERT((sva & PAGE_MASK) == 0, - ("pmap_invalidate_cache_range: sva not page-aligned")); - KASSERT((eva & PAGE_MASK) == 0, - ("pmap_invalidate_cache_range: eva not page-aligned")); + if (force) { + sva &= ~(vm_offset_t)cpu_clflush_line_size; + } else { + KASSERT((sva & PAGE_MASK) == 0, + ("pmap_invalidate_cache_range: sva not page-aligned")); + KASSERT((eva & PAGE_MASK) == 0, + ("pmap_invalidate_cache_range: eva not page-aligned")); + } - if (cpu_feature & CPUID_SS) - ; /* If "Self Snoop" is supported, do nothing. */ + if ((cpu_feature & CPUID_SS) != 0 && !force) + ; /* If "Self Snoop" is supported and allowed, do nothing. */ else if ((cpu_feature & CPUID_CLFSH) != 0 && eva - sva < PMAP_CLFLUSH_THRESHOLD) { @@ -5164,7 +5168,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_ for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); pmap_invalidate_range(kernel_pmap, va, va + tmpsize); - pmap_invalidate_cache_range(va, va + size); + pmap_invalidate_cache_range(va, va + size, FALSE); return ((void *)(va + offset)); } @@ -5370,7 +5374,7 @@ pmap_change_attr(vm_offset_t va, vm_size */ if (changed) { pmap_invalidate_range(kernel_pmap, base, tmpva); - pmap_invalidate_cache_range(base, tmpva); + pmap_invalidate_cache_range(base, tmpva, FALSE); } return (0); } Modified: head/sys/i386/i386/vm_machdep.c ============================================================================== --- head/sys/i386/i386/vm_machdep.c Wed Oct 8 16:35:57 2014 (r272760) +++ head/sys/i386/i386/vm_machdep.c Wed Oct 8 16:48:03 2014 (r272761) @@ -813,7 +813,7 @@ sf_buf_invalidate(struct sf_buf *sf) * settings are recalculated. */ pmap_qenter(sf->kva, &m, 1); - pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE); + pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE, FALSE); } /* Modified: head/sys/i386/include/pmap.h ============================================================================== --- head/sys/i386/include/pmap.h Wed Oct 8 16:35:57 2014 (r272760) +++ head/sys/i386/include/pmap.h Wed Oct 8 16:48:03 2014 (r272761) @@ -458,7 +458,8 @@ void pmap_invalidate_range(pmap_t, vm_of void pmap_invalidate_all(pmap_t); void pmap_invalidate_cache(void); void pmap_invalidate_cache_pages(vm_page_t *pages, int count); -void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); +void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, + boolean_t force); #endif /* _KERNEL */