From owner-svn-src-all@freebsd.org Mon Nov 2 19:56:16 2020 Return-Path: Delivered-To: svn-src-all@mailman.nyi.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.nyi.freebsd.org (Postfix) with ESMTP id 551134599B5; Mon, 2 Nov 2020 19:56:16 +0000 (UTC) (envelope-from br@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest SHA256 client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 4CQ3ZS1Ys7z3gBR; Mon, 2 Nov 2020 19:56:16 +0000 (UTC) (envelope-from br@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 18EF8EC17; Mon, 2 Nov 2020 19:56:16 +0000 (UTC) (envelope-from br@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id 0A2JuFGV041905; Mon, 2 Nov 2020 19:56:15 GMT (envelope-from br@FreeBSD.org) Received: (from br@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id 0A2JuFdm041903; Mon, 2 Nov 2020 19:56:15 GMT (envelope-from br@FreeBSD.org) Message-Id: <202011021956.0A2JuFdm041903@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: br set sender to br@FreeBSD.org using -f From: Ruslan Bukin Date: Mon, 2 Nov 2020 19:56:15 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r367282 - in head/sys/arm64: arm64 include X-SVN-Group: head X-SVN-Commit-Author: br X-SVN-Commit-Paths: in head/sys/arm64: arm64 include X-SVN-Commit-Revision: 367282 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.33 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 02 Nov 2020 19:56:16 -0000 Author: br Date: Mon Nov 2 19:56:15 2020 New Revision: 367282 URL: https://svnweb.freebsd.org/changeset/base/367282 Log: Add routines for ARM System MMU (SMMU) pmap management. Reviewed by: markj Discussed with: kib Sponsored by: DARPA, Innovate UK Differential Revision: https://reviews.freebsd.org/D26877 Modified: head/sys/arm64/arm64/pmap.c head/sys/arm64/include/pmap.h Modified: head/sys/arm64/arm64/pmap.c ============================================================================== --- head/sys/arm64/arm64/pmap.c Mon Nov 2 19:20:06 2020 (r367281) +++ head/sys/arm64/arm64/pmap.c Mon Nov 2 19:56:15 2020 (r367282) @@ -3605,6 +3605,184 @@ restart: } /* + * Add a single SMMU entry. This function does not sleep. + */ +int +pmap_senter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, + vm_prot_t prot, u_int flags) +{ + pd_entry_t *pde; + pt_entry_t new_l3, orig_l3; + pt_entry_t *l3; + vm_page_t mpte; + int lvl; + int rv; + + PMAP_ASSERT_STAGE1(pmap); + KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); + + va = trunc_page(va); + new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | + ATTR_S1_IDX(VM_MEMATTR_DEVICE) | L3_PAGE); + if ((prot & VM_PROT_WRITE) == 0) + new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); + new_l3 |= ATTR_S1_XN; /* Execute never. */ + new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER); + new_l3 |= ATTR_S1_nG; /* Non global. */ + + CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa); + + PMAP_LOCK(pmap); + + /* + * In the case that a page table page is not + * resident, we are creating it here. + */ +retry: + pde = pmap_pde(pmap, va, &lvl); + if (pde != NULL && lvl == 2) { + l3 = pmap_l2_to_l3(pde, va); + } else { + mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), NULL); + if (mpte == NULL) { + CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); + rv = KERN_RESOURCE_SHORTAGE; + goto out; + } + goto retry; + } + + orig_l3 = pmap_load(l3); + KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid")); + + /* New mapping */ + pmap_store(l3, new_l3); + pmap_resident_count_inc(pmap, 1); + dsb(ishst); + + rv = KERN_SUCCESS; +out: + PMAP_UNLOCK(pmap); + + return (rv); +} + +/* + * Remove a single SMMU entry. + */ +int +pmap_sremove(pmap_t pmap, vm_offset_t va) +{ + pt_entry_t *pte; + int lvl; + int rc; + + PMAP_LOCK(pmap); + + pte = pmap_pte(pmap, va, &lvl); + KASSERT(lvl == 3, + ("Invalid SMMU pagetable level: %d != 3", lvl)); + + if (pte != NULL) { + pmap_resident_count_dec(pmap, 1); + pmap_clear(pte); + rc = KERN_SUCCESS; + } else + rc = KERN_FAILURE; + + PMAP_UNLOCK(pmap); + + return (rc); +} + +/* + * Remove all the allocated L1, L2 pages from SMMU pmap. + * All the L3 entires must be cleared in advance, otherwise + * this function panics. + */ +void +pmap_sremove_pages(pmap_t pmap) +{ + pd_entry_t l0e, *l1, l1e, *l2, l2e; + pt_entry_t *l3, l3e; + vm_page_t m, m0, m1; + vm_offset_t sva; + vm_paddr_t pa; + vm_paddr_t pa0; + vm_paddr_t pa1; + int i, j, k, l; + + PMAP_LOCK(pmap); + + for (sva = VM_MINUSER_ADDRESS, i = pmap_l0_index(sva); + (i < Ln_ENTRIES && sva < VM_MAXUSER_ADDRESS); i++) { + l0e = pmap->pm_l0[i]; + if ((l0e & ATTR_DESCR_VALID) == 0) { + sva += L0_SIZE; + continue; + } + pa0 = l0e & ~ATTR_MASK; + m0 = PHYS_TO_VM_PAGE(pa0); + l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0); + + for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) { + l1e = l1[j]; + if ((l1e & ATTR_DESCR_VALID) == 0) { + sva += L1_SIZE; + continue; + } + if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) { + sva += L1_SIZE; + continue; + } + pa1 = l1e & ~ATTR_MASK; + m1 = PHYS_TO_VM_PAGE(pa1); + l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1); + + for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) { + l2e = l2[k]; + if ((l2e & ATTR_DESCR_VALID) == 0) { + sva += L2_SIZE; + continue; + } + pa = l2e & ~ATTR_MASK; + m = PHYS_TO_VM_PAGE(pa); + l3 = (pt_entry_t *)PHYS_TO_DMAP(pa); + + for (l = pmap_l3_index(sva); l < Ln_ENTRIES; + l++, sva += L3_SIZE) { + l3e = l3[l]; + if ((l3e & ATTR_DESCR_VALID) == 0) + continue; + panic("%s: l3e found for va %jx\n", + __func__, sva); + } + + vm_page_unwire_noq(m1); + vm_page_unwire_noq(m); + pmap_resident_count_dec(pmap, 1); + vm_page_free(m); + pmap_clear(&l2[k]); + } + + vm_page_unwire_noq(m0); + pmap_resident_count_dec(pmap, 1); + vm_page_free(m1); + pmap_clear(&l1[j]); + } + + pmap_resident_count_dec(pmap, 1); + vm_page_free(m0); + pmap_clear(&pmap->pm_l0[i]); + } + + KASSERT(pmap->pm_stats.resident_count == 0, + ("Invalid resident count %jd", pmap->pm_stats.resident_count)); + + PMAP_UNLOCK(pmap); +} + +/* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. Modified: head/sys/arm64/include/pmap.h ============================================================================== --- head/sys/arm64/include/pmap.h Mon Nov 2 19:20:06 2020 (r367281) +++ head/sys/arm64/include/pmap.h Mon Nov 2 19:56:15 2020 (r367282) @@ -187,6 +187,12 @@ bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t * int pmap_fault(pmap_t, uint64_t, uint64_t); +/* System MMU (SMMU). */ +int pmap_senter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, vm_prot_t prot, + u_int flags); +int pmap_sremove(pmap_t pmap, vm_offset_t va); +void pmap_sremove_pages(pmap_t pmap); + struct pcb *pmap_switch(struct thread *, struct thread *); extern void (*pmap_clean_stage2_tlbi)(void);