Date: Tue, 2 Dec 2003 17:15:34 -0800 (PST) From: Peter Wemm <peter@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 43330 for review Message-ID: <200312030115.hB31FYFt091378@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=43330 Change 43330 by peter@peter_overcee on 2003/12/02 17:14:53 do away with protection_codes array, this is more efficient anyway. Affected files ... .. //depot/projects/hammer/sys/amd64/amd64/pmap.c#48 edit Differences ... ==== //depot/projects/hammer/sys/amd64/amd64/pmap.c#48 (text+ko) ==== @@ -158,13 +158,6 @@ #define PMAP_INLINE #endif -/* - * Given a map and a machine independent protection code, - * convert to a vax protection code. - */ -#define pte_prot(m, p) (protection_codes[p]) -static pt_entry_t protection_codes[8]; - struct pmap kernel_pmap_store; LIST_HEAD(pmaplist, pmap); static struct pmaplist allpmaps; @@ -219,7 +212,6 @@ static PMAP_INLINE void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(void); -static void amd64_protection_init(void); static void pmap_clear_ptes(vm_page_t m, int bit) __always_inline; @@ -477,11 +469,6 @@ load_cr3(KPML4phys); /* - * Initialize protection array. - */ - amd64_protection_init(); - - /* * Initialize the kernel pmap (which is statically allocated). */ kernel_pmap->pm_pml4 = (pdp_entry_t *) (KERNBASE + KPML4phys); @@ -2095,8 +2082,13 @@ /* * Now validate mapping with desired protection/wiring. */ - newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) | PG_V); - + newpte = (pt_entry_t)(pa | PG_V); + if ((prot & VM_PROT_WRITE) != 0) + newpte |= PG_RW; +#ifdef PG_NX + if ((prot & VM_PROT_EXECUTE) == 0) + newpte |= PG_NX; +#endif if (wired) newpte |= PG_W; if (va < VM_MAXUSER_ADDRESS) @@ -2845,40 +2837,6 @@ * Miscellaneous support routines follow */ -static void -amd64_protection_init() -{ - register long *kp, prot; - -#if 0 -#define PG_NX (1ul << 63) -#else -#define PG_NX 0 -#endif - - kp = protection_codes; - for (prot = 0; prot < 8; prot++) { - switch (prot) { - case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: - case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: - *kp++ = PG_NX; - break; - case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: - case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: - *kp++ = 0; - break; - case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: - case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: - *kp++ = PG_RW | PG_NX; - break; - case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: - case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: - *kp++ = PG_RW; - break; - } - } -} - /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200312030115.hB31FYFt091378>
