From owner-svn-src-stable-10@FreeBSD.ORG Sun Jan 18 09:49:34 2015 Return-Path: Delivered-To: svn-src-stable-10@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 2E533565; Sun, 18 Jan 2015 09:49:34 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 0DABF2B5; Sun, 18 Jan 2015 09:49:34 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.9/8.14.9) with ESMTP id t0I9nXgs023055; Sun, 18 Jan 2015 09:49:33 GMT (envelope-from kib@FreeBSD.org) Received: (from kib@localhost) by svn.freebsd.org (8.14.9/8.14.9/Submit) id t0I9nXHl023048; Sun, 18 Jan 2015 09:49:33 GMT (envelope-from kib@FreeBSD.org) Message-Id: <201501180949.t0I9nXHl023048@svn.freebsd.org> X-Authentication-Warning: svn.freebsd.org: kib set sender to kib@FreeBSD.org using -f From: Konstantin Belousov Date: Sun, 18 Jan 2015 09:49:33 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org Subject: svn commit: r277315 - stable/10/sys/x86/iommu X-SVN-Group: stable-10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-stable-10@freebsd.org X-Mailman-Version: 2.1.18-1 Precedence: list List-Id: SVN commit messages for only the 10-stable src tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 18 Jan 2015 09:49:34 -0000 Author: kib Date: Sun Jan 18 09:49:32 2015 New Revision: 277315 URL: https://svnweb.freebsd.org/changeset/base/277315 Log: MFC r277023: Avoid excessive flushing and do missed neccessary flushing in the IOMMU page table update code. Modified: stable/10/sys/x86/iommu/intel_ctx.c stable/10/sys/x86/iommu/intel_dmar.h stable/10/sys/x86/iommu/intel_idpgtbl.c stable/10/sys/x86/iommu/intel_utils.c Directory Properties: stable/10/ (props changed) Modified: stable/10/sys/x86/iommu/intel_ctx.c ============================================================================== --- stable/10/sys/x86/iommu/intel_ctx.c Sun Jan 18 07:08:06 2015 (r277314) +++ stable/10/sys/x86/iommu/intel_ctx.c Sun Jan 18 09:49:32 2015 (r277315) @@ -96,7 +96,8 @@ dmar_ensure_ctx_page(struct dmar_unit *d re += bus; dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & VM_PAGE_TO_PHYS(ctxm))); - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); + dmar_flush_root_to_ram(dmar, re); + dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; } @@ -153,6 +154,7 @@ ctx_id_entry_init(struct dmar_ctx *ctx, (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | DMAR_CTX1_P); } + dmar_flush_ctx_to_ram(unit, ctxp); } static int @@ -358,7 +360,7 @@ dmar_get_ctx(struct dmar_unit *dmar, dev ctx->domain = alloc_unrl(dmar->domids); if (ctx->domain == -1) { DMAR_UNLOCK(dmar); - dmar_unmap_pgtbl(sf, true); + dmar_unmap_pgtbl(sf); dmar_ctx_dtr(ctx, true, true); TD_PINNED_ASSERT; return (NULL); @@ -383,7 +385,7 @@ dmar_get_ctx(struct dmar_unit *dmar, dev } else { dmar_ctx_dtr(ctx1, true, true); } - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); + dmar_unmap_pgtbl(sf); } ctx->refs++; if ((ctx->flags & DMAR_CTX_RMRR) != 0) @@ -474,7 +476,7 @@ dmar_free_ctx_locked(struct dmar_unit *d if (ctx->refs > 1) { ctx->refs--; DMAR_UNLOCK(dmar); - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); + dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; return; } @@ -490,6 +492,7 @@ dmar_free_ctx_locked(struct dmar_unit *d */ dmar_pte_clear(&ctxp->ctx1); ctxp->ctx2 = 0; + dmar_flush_ctx_to_ram(dmar, ctxp); dmar_inv_ctx_glob(dmar); if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { if (dmar->qi_enabled) @@ -507,7 +510,7 @@ dmar_free_ctx_locked(struct dmar_unit *d taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task); KASSERT(TAILQ_EMPTY(&ctx->unload_entries), ("unfinished unloads %p", ctx)); - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); + dmar_unmap_pgtbl(sf); free_unr(dmar->domids, ctx->domain); dmar_ctx_dtr(ctx, true, true); TD_PINNED_ASSERT; Modified: stable/10/sys/x86/iommu/intel_dmar.h ============================================================================== --- stable/10/sys/x86/iommu/intel_dmar.h Sun Jan 18 07:08:06 2015 (r277314) +++ stable/10/sys/x86/iommu/intel_dmar.h Sun Jan 18 09:49:32 2015 (r277315) @@ -230,11 +230,14 @@ struct vm_page *dmar_pgalloc(vm_object_t void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags); void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags, struct sf_buf **sf); -void dmar_unmap_pgtbl(struct sf_buf *sf, bool coherent); +void dmar_unmap_pgtbl(struct sf_buf *sf); int dmar_load_root_entry_ptr(struct dmar_unit *unit); int dmar_inv_ctx_glob(struct dmar_unit *unit); int dmar_inv_iotlb_glob(struct dmar_unit *unit); int dmar_flush_write_bufs(struct dmar_unit *unit); +void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst); +void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst); +void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst); int dmar_enable_translation(struct dmar_unit *unit); int dmar_disable_translation(struct dmar_unit *unit); bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id); Modified: stable/10/sys/x86/iommu/intel_idpgtbl.c ============================================================================== --- stable/10/sys/x86/iommu/intel_idpgtbl.c Sun Jan 18 07:08:06 2015 (r277314) +++ stable/10/sys/x86/iommu/intel_idpgtbl.c Sun Jan 18 09:49:32 2015 (r277315) @@ -146,7 +146,7 @@ ctx_idmap_nextlvl(struct idpgtbl *tbl, i } } /* ctx_get_idmap_pgtbl flushes CPU cache if needed. */ - dmar_unmap_pgtbl(sf, true); + dmar_unmap_pgtbl(sf); VM_OBJECT_WLOCK(tbl->pgtbl_obj); } @@ -361,7 +361,7 @@ ctx_pgtbl_map_pte(struct dmar_ctx *ctx, pte = (dmar_pte_t *)sf_buf_kva(*sf); } else { if (*sf != NULL) - dmar_unmap_pgtbl(*sf, DMAR_IS_COHERENT(ctx->dmar)); + dmar_unmap_pgtbl(*sf); *idxp = idx; retry: pte = dmar_map_pgtbl(ctx->pgtbl_obj, idx, flags, sf); @@ -397,9 +397,10 @@ retry: } dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W | VM_PAGE_TO_PHYS(m)); + dmar_flush_pte_to_ram(ctx->dmar, ptep); sf_buf_page(sfp)->wire_count += 1; m->wire_count--; - dmar_unmap_pgtbl(sfp, DMAR_IS_COHERENT(ctx->dmar)); + dmar_unmap_pgtbl(sfp); /* Only executed once. */ goto retry; } @@ -467,20 +468,19 @@ ctx_map_buf_locked(struct dmar_ctx *ctx, if (pte == NULL) { KASSERT((flags & DMAR_PGF_WAITOK) == 0, ("failed waitable pte alloc %p", ctx)); - if (sf != NULL) { - dmar_unmap_pgtbl(sf, - DMAR_IS_COHERENT(ctx->dmar)); - } + if (sf != NULL) + dmar_unmap_pgtbl(sf); ctx_unmap_buf_locked(ctx, base1, base - base1, flags); TD_PINNED_ASSERT; return (ENOMEM); } dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags | (superpage ? DMAR_PTE_SP : 0)); + dmar_flush_pte_to_ram(ctx->dmar, pte); sf_buf_page(sf)->wire_count += 1; } if (sf != NULL) - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(ctx->dmar)); + dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; return (0); } @@ -567,9 +567,10 @@ ctx_unmap_clear_pte(struct dmar_ctx *ctx vm_page_t m; dmar_pte_clear(&pte->pte); + dmar_flush_pte_to_ram(ctx->dmar, pte); m = sf_buf_page(*sf); if (free_sf) { - dmar_unmap_pgtbl(*sf, DMAR_IS_COHERENT(ctx->dmar)); + dmar_unmap_pgtbl(*sf); *sf = NULL; } m->wire_count--; @@ -651,7 +652,7 @@ ctx_unmap_buf_locked(struct dmar_ctx *ct (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); } if (sf != NULL) - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(ctx->dmar)); + dmar_unmap_pgtbl(sf); /* * See 11.1 Write Buffer Flushing for an explanation why RWBF * can be ignored there. Modified: stable/10/sys/x86/iommu/intel_utils.c ============================================================================== --- stable/10/sys/x86/iommu/intel_utils.c Sun Jan 18 07:08:06 2015 (r277314) +++ stable/10/sys/x86/iommu/intel_utils.c Sun Jan 18 09:49:32 2015 (r277315) @@ -351,20 +351,46 @@ dmar_map_pgtbl(vm_object_t obj, vm_pinde } void -dmar_unmap_pgtbl(struct sf_buf *sf, bool coherent) +dmar_unmap_pgtbl(struct sf_buf *sf) { - vm_page_t m; - m = sf_buf_page(sf); sf_buf_free(sf); sched_unpin(); +} + +static void +dmar_flush_transl_to_ram(struct dmar_unit *unit, void *dst, size_t sz) +{ + if (DMAR_IS_COHERENT(unit)) + return; /* * If DMAR does not snoop paging structures accesses, flush * CPU cache to memory. */ - if (!coherent) - pmap_invalidate_cache_pages(&m, 1); + pmap_invalidate_cache_range((uintptr_t)dst, (uintptr_t)dst + sz, + TRUE); +} + +void +dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst) +{ + + dmar_flush_transl_to_ram(unit, dst, sizeof(*dst)); +} + +void +dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst) +{ + + dmar_flush_transl_to_ram(unit, dst, sizeof(*dst)); +} + +void +dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst) +{ + + dmar_flush_transl_to_ram(unit, dst, sizeof(*dst)); } /*