Date: Sat, 20 Jan 2018 12:19:02 +0000 (UTC) From: Konstantin Belousov <kib@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r328192 - head/sys/vm Message-ID: <201801201219.w0KCJ2mE093323@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: kib Date: Sat Jan 20 12:19:02 2018 New Revision: 328192 URL: https://svnweb.freebsd.org/changeset/base/328192 Log: Assign map->header values to avoid boundary checks. In several places, entry start and end field are checked, after excluding the possibility that the entry is map->header. By assigning max and min values to the start and end fields of map->header in vm_map_init, the explicit map->header checks become unnecessary. Submitted by: Doug Moore <dougm@rice.edu> Reviewed by: alc, kib, markj (previous version) Tested by: pho (previous version) MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D13735 Modified: head/sys/vm/vm_map.c head/sys/vm/vm_map.h head/sys/vm/vm_mmap.c Modified: head/sys/vm/vm_map.c ============================================================================== --- head/sys/vm/vm_map.c Sat Jan 20 11:21:22 2018 (r328191) +++ head/sys/vm/vm_map.c Sat Jan 20 12:19:02 2018 (r328192) @@ -1003,12 +1003,10 @@ vm_map_entry_link(vm_map_t map, "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, map->nentries, entry, after_where); VM_MAP_ASSERT_LOCKED(map); - KASSERT(after_where == &map->header || - after_where->end <= entry->start, + KASSERT(after_where->end <= entry->start, ("vm_map_entry_link: prev end %jx new start %jx overlap", (uintmax_t)after_where->end, (uintmax_t)entry->start)); - KASSERT(after_where->next == &map->header || - entry->end <= after_where->next->start, + KASSERT(entry->end <= after_where->next->start, ("vm_map_entry_link: new end %jx next start %jx overlap", (uintmax_t)entry->end, (uintmax_t)after_where->next->start)); @@ -1030,8 +1028,7 @@ vm_map_entry_link(vm_map_t map, entry->right = map->root; entry->left = NULL; } - entry->adj_free = (entry->next == &map->header ? map->max_offset : - entry->next->start) - entry->end; + entry->adj_free = entry->next->start - entry->end; vm_map_entry_set_max_free(entry); map->root = entry; } @@ -1050,8 +1047,7 @@ vm_map_entry_unlink(vm_map_t map, else { root = vm_map_entry_splay(entry->start, entry->left); root->right = entry->right; - root->adj_free = (entry->next == &map->header ? map->max_offset : - entry->next->start) - root->end; + root->adj_free = entry->next->start - root->end; vm_map_entry_set_max_free(root); } map->root = root; @@ -1087,8 +1083,7 @@ vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t if (entry != map->root) map->root = vm_map_entry_splay(entry->start, map->root); - entry->adj_free = (entry->next == &map->header ? map->max_offset : - entry->next->start) - entry->end; + entry->adj_free = entry->next->start - entry->end; vm_map_entry_set_max_free(entry); } @@ -1218,7 +1213,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_oof /* * Assert that the next entry doesn't overlap the end point. */ - if (prev_entry->next != &map->header && prev_entry->next->start < end) + if (prev_entry->next->start < end) return (KERN_NO_SPACE); if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || @@ -2090,8 +2085,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_off /* * Make a first pass to check for protection violations. */ - for (current = entry; current != &map->header && current->start < end; - current = current->next) { + for (current = entry; current->start < end; current = current->next) { if ((current->eflags & MAP_ENTRY_GUARD) != 0) continue; if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { @@ -2109,8 +2103,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_off * now will do cow due to allowed write (e.g. debugger sets * breakpoint on text segment) */ - for (current = entry; current != &map->header && current->start < end; - current = current->next) { + for (current = entry; current->start < end; current = current->next) { vm_map_clip_end(map, current, end); @@ -2164,8 +2157,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_off * Go back and fix up protections. [Note that clipping is not * necessary the second time.] */ - for (current = entry; current != &map->header && current->start < end; - current = current->next) { + for (current = entry; current->start < end; current = current->next) { if ((current->eflags & MAP_ENTRY_GUARD) != 0) continue; @@ -2274,10 +2266,8 @@ vm_map_madvise( * We clip the vm_map_entry so that behavioral changes are * limited to the specified address range. */ - for (current = entry; - (current != &map->header) && (current->start < end); - current = current->next - ) { + for (current = entry; current->start < end; + current = current->next) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) continue; @@ -2321,10 +2311,8 @@ vm_map_madvise( * Since we don't clip the vm_map_entry, we have to clip * the vm_object pindex and count. */ - for (current = entry; - (current != &map->header) && (current->start < end); - current = current->next - ) { + for (current = entry; current->start < end; + current = current->next) { vm_offset_t useEnd, useStart; if (current->eflags & MAP_ENTRY_IS_SUB_MAP) @@ -2420,7 +2408,7 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_off vm_map_clip_start(map, entry, start); } else entry = temp_entry->next; - while ((entry != &map->header) && (entry->start < end)) { + while (entry->start < end) { vm_map_clip_end(map, entry, end); if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || new_inheritance != VM_INHERIT_ZERO) @@ -2462,7 +2450,7 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offs } last_timestamp = map->timestamp; entry = first_entry; - while (entry != &map->header && entry->start < end) { + while (entry->start < end) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { /* * We have not yet clipped the entry. @@ -2525,8 +2513,7 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offs * If VM_MAP_WIRE_HOLESOK was specified, skip this check. */ if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && - (entry->end < end && (entry->next == &map->header || - entry->next->start > entry->end))) { + (entry->end < end && entry->next->start > entry->end)) { end = entry->end; rv = KERN_INVALID_ADDRESS; goto done; @@ -2552,8 +2539,7 @@ done: else KASSERT(result, ("vm_map_unwire: lookup failed")); } - for (entry = first_entry; entry != &map->header && entry->start < end; - entry = entry->next) { + for (entry = first_entry; entry->start < end; entry = entry->next) { /* * If VM_MAP_WIRE_HOLESOK was specified, an empty * space in the unwired region could have been mapped @@ -2667,7 +2653,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset } last_timestamp = map->timestamp; entry = first_entry; - while (entry != &map->header && entry->start < end) { + while (entry->start < end) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { /* * We have not yet clipped the entry. @@ -2804,8 +2790,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset */ next_entry: if ((flags & VM_MAP_WIRE_HOLESOK) == 0 && - entry->end < end && (entry->next == &map->header || - entry->next->start > entry->end)) { + entry->end < end && entry->next->start > entry->end) { end = entry->end; rv = KERN_INVALID_ADDRESS; goto done; @@ -2822,8 +2807,7 @@ done: else KASSERT(result, ("vm_map_wire: lookup failed")); } - for (entry = first_entry; entry != &map->header && entry->start < end; - entry = entry->next) { + for (entry = first_entry; entry->start < end; entry = entry->next) { /* * If VM_MAP_WIRE_HOLESOK was specified, an empty * space in the unwired region could have been mapped @@ -2927,15 +2911,13 @@ vm_map_sync( /* * Make a first pass to check for user-wired memory and holes. */ - for (current = entry; current != &map->header && current->start < end; - current = current->next) { + for (current = entry; current->start < end; current = current->next) { if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { vm_map_unlock_read(map); return (KERN_INVALID_ARGUMENT); } if (end > current->end && - (current->next == &map->header || - current->end != current->next->start)) { + current->end != current->next->start) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); } @@ -2949,7 +2931,7 @@ vm_map_sync( * Make a second pass, cleaning/uncaching pages from the indicated * objects as we go. */ - for (current = entry; current != &map->header && current->start < end;) { + for (current = entry; current->start < end;) { offset = current->offset + (start - current->start); size = (end <= current->end ? end : current->end) - start; if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { @@ -3126,7 +3108,7 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offs /* * Step through all entries in this region */ - while ((entry != &map->header) && (entry->start < end)) { + while (entry->start < end) { vm_map_entry_t next; /* @@ -3234,8 +3216,6 @@ vm_map_check_protection(vm_map_t map, vm_offset_t star entry = tmp_entry; while (start < end) { - if (entry == &map->header) - return (FALSE); /* * No holes allowed! */ @@ -3699,8 +3679,7 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, /* * If we can't accommodate max_ssize in the current mapping, no go. */ - if ((prev_entry->next != &map->header) && - (prev_entry->next->start < addrbos + max_ssize)) + if (prev_entry->next->start < addrbos + max_ssize) return (KERN_NO_SPACE); /* Modified: head/sys/vm/vm_map.h ============================================================================== --- head/sys/vm/vm_map.h Sat Jan 20 11:21:22 2018 (r328191) +++ head/sys/vm/vm_map.h Sat Jan 20 12:19:02 2018 (r328192) @@ -173,15 +173,19 @@ vm_map_entry_system_wired_count(vm_map_entry_t entry) * A map is a set of map entries. These map entries are * organized both as a binary search tree and as a doubly-linked * list. Both structures are ordered based upon the start and - * end addresses contained within each map entry. Sleator and - * Tarjan's top-down splay algorithm is employed to control - * height imbalance in the binary search tree. + * end addresses contained within each map entry. The list + * header has max start value and min end value to act as + * sentinels for sequential search of the doubly-linked list. + * Sleator and Tarjan's top-down splay algorithm is employed to + * control height imbalance in the binary search tree. * * List of locks * (c) const until freed */ struct vm_map { struct vm_map_entry header; /* List of entries */ +#define min_offset header.end /* (c) */ +#define max_offset header.start /* (c) */ struct sx lock; /* Lock for map data */ struct mtx system_mtx; int nentries; /* Number of entries */ @@ -192,8 +196,6 @@ struct vm_map { vm_flags_t flags; /* flags for this vm_map */ vm_map_entry_t root; /* Root of a binary search tree */ pmap_t pmap; /* (c) Physical map */ -#define min_offset header.start /* (c) */ -#define max_offset header.end /* (c) */ int busy; }; Modified: head/sys/vm/vm_mmap.c ============================================================================== --- head/sys/vm/vm_mmap.c Sat Jan 20 11:21:22 2018 (r328191) +++ head/sys/vm/vm_mmap.c Sat Jan 20 12:19:02 2018 (r328192) @@ -543,8 +543,7 @@ kern_munmap(struct thread *td, uintptr_t addr0, size_t */ pkm.pm_address = (uintptr_t) NULL; if (vm_map_lookup_entry(map, addr, &entry)) { - for (; - entry != &map->header && entry->start < addr + size; + for (; entry->start < addr + size; entry = entry->next) { if (vm_map_check_protection(map, entry->start, entry->end, VM_PROT_EXECUTE) == TRUE) { @@ -770,16 +769,12 @@ RestartScan: * up the pages elsewhere. */ lastvecindex = -1; - for (current = entry; - (current != &map->header) && (current->start < end); - current = current->next) { + for (current = entry; current->start < end; current = current->next) { /* * check for contiguity */ - if (current->end < end && - (entry->next == &map->header || - current->next->start > current->end)) { + if (current->end < end && current->next->start > current->end) { vm_map_unlock_read(map); return (ENOMEM); }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201801201219.w0KCJ2mE093323>