Date: Thu, 3 Jan 2019 16:21:45 +0000 (UTC) From: Mark Johnston <markj@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r342734 - head/sys/riscv/riscv Message-ID: <201901031621.x03GLjE4042237@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: markj Date: Thu Jan 3 16:21:44 2019 New Revision: 342734 URL: https://svnweb.freebsd.org/changeset/base/342734 Log: Fix some issues with the riscv pmap_protect() implementation. - Handle VM_PROT_EXECUTE. - Clear PTE_D and mark the page dirty when removing write access from a mapping. - Atomically clear PTE_W to avoid clobbering a hardware PTE update. Reviewed by: jhb, kib MFC after: 1 week Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D18719 Modified: head/sys/riscv/riscv/pmap.c Modified: head/sys/riscv/riscv/pmap.c ============================================================================== --- head/sys/riscv/riscv/pmap.c Thu Jan 3 16:19:32 2019 (r342733) +++ head/sys/riscv/riscv/pmap.c Thu Jan 3 16:21:44 2019 (r342734) @@ -1853,22 +1853,28 @@ pmap_remove_all(vm_page_t m) void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { - vm_offset_t va_next; pd_entry_t *l1, *l2; - pt_entry_t *l3p, l3; - pt_entry_t entry; + pt_entry_t *l3, l3e, mask; + vm_page_t m; + vm_offset_t va_next; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } - if ((prot & VM_PROT_WRITE) == VM_PROT_WRITE) + if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == + (VM_PROT_WRITE | VM_PROT_EXECUTE)) return; + mask = 0; + if ((prot & VM_PROT_WRITE) == 0) + mask |= PTE_W | PTE_D; + if ((prot & VM_PROT_EXECUTE) == 0) + mask |= PTE_X; + PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { - l1 = pmap_l1(pmap, sva); if (pmap_load(l1) == 0) { va_next = (sva + L1_SIZE) & ~L1_OFFSET; @@ -1882,26 +1888,30 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t va_next = eva; l2 = pmap_l1_to_l2(l1, sva); - if (l2 == NULL) + if (l2 == NULL || pmap_load(l2) == 0) continue; - if (pmap_load(l2) == 0) - continue; if ((pmap_load(l2) & PTE_RX) != 0) continue; if (va_next > eva) va_next = eva; - for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++, + for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, sva += L3_SIZE) { - l3 = pmap_load(l3p); - if ((l3 & PTE_V) != 0) { - entry = pmap_load(l3p); - entry &= ~PTE_W; - pmap_store(l3p, entry); - /* XXX: Use pmap_invalidate_range */ - pmap_invalidate_page(pmap, sva); + l3e = pmap_load(l3); +retry: + if ((l3e & PTE_V) == 0) + continue; + if ((prot & VM_PROT_WRITE) == 0 && + (l3e & (PTE_SW_MANAGED | PTE_D)) == + (PTE_SW_MANAGED | PTE_D)) { + m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(l3e)); + vm_page_dirty(m); } + if (!atomic_fcmpset_long(l3, &l3e, l3e & ~mask)) + goto retry; + /* XXX: Use pmap_invalidate_range */ + pmap_invalidate_page(pmap, sva); } } PMAP_UNLOCK(pmap);
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201901031621.x03GLjE4042237>