Date: Wed, 28 Nov 2018 18:42:42 +0000 (UTC) From: Konstantin Belousov <kib@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org Subject: svn commit: r341161 - in stable/12/sys: kern vm Message-ID: <201811281842.wASIggAY051213@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: kib Date: Wed Nov 28 18:42:42 2018 New Revision: 341161 URL: https://svnweb.freebsd.org/changeset/base/341161 Log: MFC r339506, r339508, r340064 (by markj), r340546 (by alc): Tidy up code to merge vm_entry neighbors and simplify related checks. r339506: Reduce code duplication in merging vm_entry neighbors. r339508: Unindent. r340064: Initialize the eflags field of vm_map headers. r340546: Tidy up vm_map_simplify_entry() and its recently introduced helper functions. Discussed with: markj Modified: stable/12/sys/kern/sys_process.c stable/12/sys/vm/vm_map.c stable/12/sys/vm/vm_map.h Directory Properties: stable/12/ (props changed) Modified: stable/12/sys/kern/sys_process.c ============================================================================== --- stable/12/sys/kern/sys_process.c Wed Nov 28 18:09:42 2018 (r341160) +++ stable/12/sys/kern/sys_process.c Wed Nov 28 18:42:42 2018 (r341161) @@ -387,8 +387,9 @@ ptrace_vm_entry(struct thread *td, struct proc *p, str error = EINVAL; break; } - while (entry != &map->header && - (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { + KASSERT((map->header.eflags & MAP_ENTRY_IS_SUB_MAP) == 0, + ("Submap in map header")); + while ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { entry = entry->next; index++; } Modified: stable/12/sys/vm/vm_map.c ============================================================================== --- stable/12/sys/vm/vm_map.c Wed Nov 28 18:09:42 2018 (r341160) +++ stable/12/sys/vm/vm_map.c Wed Nov 28 18:42:42 2018 (r341161) @@ -796,6 +796,7 @@ _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t mi { map->header.next = map->header.prev = &map->header; + map->header.eflags = MAP_ENTRY_HEADER; map->needs_wakeup = FALSE; map->system_map = 0; map->pmap = pmap; @@ -1277,8 +1278,8 @@ charged: if (object->ref_count > 1 || object->shadow_count != 0) vm_object_clear_flag(object, OBJ_ONEMAPPING); VM_OBJECT_WUNLOCK(object); - } else if (prev_entry != &map->header && - (prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == protoeflags && + } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == + protoeflags && (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 && prev_entry->end == start && (prev_entry->cred == cred || (prev_entry->object.vm_object != NULL && @@ -1644,6 +1645,54 @@ vm_map_find_min(vm_map_t map, vm_object_t object, vm_o } /* + * A map entry with any of the following flags set must not be merged with + * another entry. + */ +#define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \ + MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP) + +static bool +vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) +{ + + KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || + (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, + ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable", + prev, entry)); + return (prev->end == entry->start && + prev->object.vm_object == entry->object.vm_object && + (prev->object.vm_object == NULL || + prev->offset + (prev->end - prev->start) == entry->offset) && + prev->eflags == entry->eflags && + prev->protection == entry->protection && + prev->max_protection == entry->max_protection && + prev->inheritance == entry->inheritance && + prev->wired_count == entry->wired_count && + prev->cred == entry->cred); +} + +static void +vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) +{ + + /* + * If the backing object is a vnode object, vm_object_deallocate() + * calls vrele(). However, vrele() does not lock the vnode because + * the vnode has additional references. Thus, the map lock can be + * kept without causing a lock-order reversal with the vnode lock. + * + * Since we count the number of virtual page mappings in + * object->un_pager.vnp.writemappings, the writemappings value + * should not be adjusted when the entry is disposed of. + */ + if (entry->object.vm_object != NULL) + vm_object_deallocate(entry->object.vm_object); + if (entry->cred != NULL) + crfree(entry->cred); + vm_map_entry_dispose(map, entry); +} + +/* * vm_map_simplify_entry: * * Simplify the given map entry by merging with either neighbor. This @@ -1659,81 +1708,27 @@ void vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) { vm_map_entry_t next, prev; - vm_size_t prevsize, esize; - if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | - MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0) + if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) != 0) return; - prev = entry->prev; - if (prev != &map->header) { - prevsize = prev->end - prev->start; - if ( (prev->end == entry->start) && - (prev->object.vm_object == entry->object.vm_object) && - (!prev->object.vm_object || - (prev->offset + prevsize == entry->offset)) && - (prev->eflags == entry->eflags) && - (prev->protection == entry->protection) && - (prev->max_protection == entry->max_protection) && - (prev->inheritance == entry->inheritance) && - (prev->wired_count == entry->wired_count) && - (prev->cred == entry->cred)) { - vm_map_entry_unlink(map, prev); - entry->start = prev->start; - entry->offset = prev->offset; - if (entry->prev != &map->header) - vm_map_entry_resize_free(map, entry->prev); - - /* - * If the backing object is a vnode object, - * vm_object_deallocate() calls vrele(). - * However, vrele() does not lock the vnode - * because the vnode has additional - * references. Thus, the map lock can be kept - * without causing a lock-order reversal with - * the vnode lock. - * - * Since we count the number of virtual page - * mappings in object->un_pager.vnp.writemappings, - * the writemappings value should not be adjusted - * when the entry is disposed of. - */ - if (prev->object.vm_object) - vm_object_deallocate(prev->object.vm_object); - if (prev->cred != NULL) - crfree(prev->cred); - vm_map_entry_dispose(map, prev); - } + if (vm_map_mergeable_neighbors(prev, entry)) { + vm_map_entry_unlink(map, prev); + entry->start = prev->start; + entry->offset = prev->offset; + if (entry->prev != &map->header) + vm_map_entry_resize_free(map, entry->prev); + vm_map_merged_neighbor_dispose(map, prev); } - next = entry->next; - if (next != &map->header) { - esize = entry->end - entry->start; - if ((entry->end == next->start) && - (next->object.vm_object == entry->object.vm_object) && - (!entry->object.vm_object || - (entry->offset + esize == next->offset)) && - (next->eflags == entry->eflags) && - (next->protection == entry->protection) && - (next->max_protection == entry->max_protection) && - (next->inheritance == entry->inheritance) && - (next->wired_count == entry->wired_count) && - (next->cred == entry->cred)) { - vm_map_entry_unlink(map, next); - entry->end = next->end; - vm_map_entry_resize_free(map, entry); - - /* - * See comment above. - */ - if (next->object.vm_object) - vm_object_deallocate(next->object.vm_object); - if (next->cred != NULL) - crfree(next->cred); - vm_map_entry_dispose(map, next); - } + if (vm_map_mergeable_neighbors(entry, next)) { + vm_map_entry_unlink(map, next); + entry->end = next->end; + vm_map_entry_resize_free(map, entry); + vm_map_merged_neighbor_dispose(map, next); } } + /* * vm_map_clip_start: [ internal use only ] * Modified: stable/12/sys/vm/vm_map.h ============================================================================== --- stable/12/sys/vm/vm_map.h Wed Nov 28 18:09:42 2018 (r341160) +++ stable/12/sys/vm/vm_map.h Wed Nov 28 18:42:42 2018 (r341161) @@ -146,6 +146,7 @@ struct vm_map_entry { #define MAP_ENTRY_GUARD 0x10000 #define MAP_ENTRY_STACK_GAP_DN 0x20000 #define MAP_ENTRY_STACK_GAP_UP 0x40000 +#define MAP_ENTRY_HEADER 0x80000 #ifdef _KERNEL static __inline u_char @@ -175,24 +176,22 @@ vm_map_entry_system_wired_count(vm_map_entry_t entry) * list. Both structures are ordered based upon the start and * end addresses contained within each map entry. * - * Counterintuitively, the map's min offset value is stored in - * map->header.end, and its max offset value is stored in - * map->header.start. - * - * The list header has max start value and min end value to act - * as sentinels for sequential search of the doubly-linked list. * Sleator and Tarjan's top-down splay algorithm is employed to * control height imbalance in the binary search tree. * + * The map's min offset value is stored in map->header.end, and + * its max offset value is stored in map->header.start. These + * values act as sentinels for any forward or backward address + * scan of the list. The map header has a special value for the + * eflags field, MAP_ENTRY_HEADER, that is set initially, is + * never changed, and prevents an eflags match of the header + * with any other map entry. + * * List of locks * (c) const until freed */ struct vm_map { struct vm_map_entry header; /* List of entries */ -/* - map min_offset header.end (c) - map max_offset header.start (c) -*/ struct sx lock; /* Lock for map data */ struct mtx system_mtx; int nentries; /* Number of entries */
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201811281842.wASIggAY051213>