From owner-svn-src-stable@FreeBSD.ORG Sat Oct 2 17:41:48 2010 Return-Path: Delivered-To: svn-src-stable@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 47F0D106566C; Sat, 2 Oct 2010 17:41:48 +0000 (UTC) (envelope-from kib@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 351CB8FC15; Sat, 2 Oct 2010 17:41:48 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id o92Hfm7g046493; Sat, 2 Oct 2010 17:41:48 GMT (envelope-from kib@svn.freebsd.org) Received: (from kib@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id o92HfmRH046486; Sat, 2 Oct 2010 17:41:48 GMT (envelope-from kib@svn.freebsd.org) Message-Id: <201010021741.o92HfmRH046486@svn.freebsd.org> From: Konstantin Belousov Date: Sat, 2 Oct 2010 17:41:48 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org X-SVN-Group: stable-8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r213362 - in stable/8/sys: kern sys vm X-BeenThere: svn-src-stable@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: SVN commit messages for all the -stable branches of the src tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 02 Oct 2010 17:41:48 -0000 Author: kib Date: Sat Oct 2 17:41:47 2010 New Revision: 213362 URL: http://svn.freebsd.org/changeset/base/213362 Log: MFC r212824: Adopt the deferring of object deallocation for the deleted map entries on map unlock to the lock downgrade and later read unlock operation. MFC r212868 (by alc) [1]: Make refinements to r212824. Redo the implementation of vm_map_unlock_and_wait(). Approved by: alc [1] Modified: stable/8/sys/kern/kern_fork.c stable/8/sys/kern/kern_kthread.c stable/8/sys/kern/kern_thr.c stable/8/sys/sys/proc.h stable/8/sys/vm/vm_map.c stable/8/sys/vm/vm_map.h Directory Properties: stable/8/sys/ (props changed) stable/8/sys/amd64/include/xen/ (props changed) stable/8/sys/cddl/contrib/opensolaris/ (props changed) stable/8/sys/contrib/dev/acpica/ (props changed) stable/8/sys/contrib/pf/ (props changed) stable/8/sys/dev/xen/xenpci/ (props changed) Modified: stable/8/sys/kern/kern_fork.c ============================================================================== --- stable/8/sys/kern/kern_fork.c Sat Oct 2 17:20:30 2010 (r213361) +++ stable/8/sys/kern/kern_fork.c Sat Oct 2 17:41:47 2010 (r213362) @@ -532,6 +532,7 @@ again: bzero(&td2->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bzero(&td2->td_rux, sizeof(td2->td_rux)); + td2->td_map_def_user = NULL; bcopy(&td->td_startcopy, &td2->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); Modified: stable/8/sys/kern/kern_kthread.c ============================================================================== --- stable/8/sys/kern/kern_kthread.c Sat Oct 2 17:20:30 2010 (r213361) +++ stable/8/sys/kern/kern_kthread.c Sat Oct 2 17:41:47 2010 (r213362) @@ -263,6 +263,7 @@ kthread_add(void (*func)(void *), void * bzero(&newtd->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bzero(&newtd->td_rux, sizeof(newtd->td_rux)); + newtd->td_map_def_user = NULL; /* XXX check if we should zero. */ bcopy(&oldtd->td_startcopy, &newtd->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); Modified: stable/8/sys/kern/kern_thr.c ============================================================================== --- stable/8/sys/kern/kern_thr.c Sat Oct 2 17:20:30 2010 (r213361) +++ stable/8/sys/kern/kern_thr.c Sat Oct 2 17:41:47 2010 (r213362) @@ -200,6 +200,7 @@ create_thread(struct thread *td, mcontex bzero(&newtd->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bzero(&newtd->td_rux, sizeof(newtd->td_rux)); + newtd->td_map_def_user = NULL; bcopy(&td->td_startcopy, &newtd->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); newtd->td_proc = td->td_proc; Modified: stable/8/sys/sys/proc.h ============================================================================== --- stable/8/sys/sys/proc.h Sat Oct 2 17:20:30 2010 (r213361) +++ stable/8/sys/sys/proc.h Sat Oct 2 17:41:47 2010 (r213362) @@ -303,6 +303,7 @@ struct thread { struct vnet *td_vnet; /* (k) Effective vnet. */ const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */ struct rusage_ext td_rux; /* (t) Internal rusage information. */ + struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ }; struct mtx *thread_lock_block(struct thread *); Modified: stable/8/sys/vm/vm_map.c ============================================================================== --- stable/8/sys/vm/vm_map.c Sat Oct 2 17:20:30 2010 (r213361) +++ stable/8/sys/vm/vm_map.c Sat Oct 2 17:41:47 2010 (r213362) @@ -127,6 +127,7 @@ static void vmspace_zfini(void *mem, int static int vm_map_zinit(void *mem, int ize, int flags); static void vm_map_zfini(void *mem, int size); static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max); +static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); #ifdef INVARIANTS static void vm_map_zdtor(void *mem, int size, void *arg); @@ -457,30 +458,29 @@ _vm_map_lock(vm_map_t map, const char *f map->timestamp++; } +static void +vm_map_process_deferred(void) +{ + struct thread *td; + vm_map_entry_t entry; + + td = curthread; + + while ((entry = td->td_map_def_user) != NULL) { + td->td_map_def_user = entry->next; + vm_map_entry_deallocate(entry, FALSE); + } +} + void _vm_map_unlock(vm_map_t map, const char *file, int line) { - vm_map_entry_t free_entry, entry; - vm_object_t object; - - free_entry = map->deferred_freelist; - map->deferred_freelist = NULL; if (map->system_map) _mtx_unlock_flags(&map->system_mtx, 0, file, line); - else + else { _sx_xunlock(&map->lock, file, line); - - while (free_entry != NULL) { - entry = free_entry; - free_entry = free_entry->next; - - if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { - object = entry->object.vm_object; - vm_object_deallocate(object); - } - - vm_map_entry_dispose(map, entry); + vm_map_process_deferred(); } } @@ -500,8 +500,10 @@ _vm_map_unlock_read(vm_map_t map, const if (map->system_map) _mtx_unlock_flags(&map->system_mtx, 0, file, line); - else + else { _sx_sunlock(&map->lock, file, line); + vm_map_process_deferred(); + } } int @@ -551,6 +553,7 @@ _vm_map_lock_upgrade(vm_map_t map, const if (!_sx_try_upgrade(&map->lock, file, line)) { last_timestamp = map->timestamp; _sx_sunlock(&map->lock, file, line); + vm_map_process_deferred(); /* * If the map's timestamp does not change while the * map is unlocked, then the upgrade succeeds. @@ -627,19 +630,37 @@ _vm_map_assert_locked_read(vm_map_t map, #endif /* - * vm_map_unlock_and_wait: + * _vm_map_unlock_and_wait: + * + * Atomically releases the lock on the specified map and puts the calling + * thread to sleep. The calling thread will remain asleep until either + * vm_map_wakeup() is performed on the map or the specified timeout is + * exceeded. + * + * WARNING! This function does not perform deferred deallocations of + * objects and map entries. Therefore, the calling thread is expected to + * reacquire the map lock after reawakening and later perform an ordinary + * unlock operation, such as vm_map_unlock(), before completing its + * operation on the map. */ int -vm_map_unlock_and_wait(vm_map_t map, int timo) +_vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) { mtx_lock(&map_sleep_mtx); - vm_map_unlock(map); - return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", timo)); + if (map->system_map) + _mtx_unlock_flags(&map->system_mtx, 0, file, line); + else + _sx_xunlock(&map->lock, file, line); + return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", + timo)); } /* * vm_map_wakeup: + * + * Awaken any threads that have slept on the map using + * vm_map_unlock_and_wait(). */ void vm_map_wakeup(vm_map_t map) @@ -647,8 +668,8 @@ vm_map_wakeup(vm_map_t map) /* * Acquire and release map_sleep_mtx to prevent a wakeup() - * from being performed (and lost) between the vm_map_unlock() - * and the msleep() in vm_map_unlock_and_wait(). + * from being performed (and lost) between the map unlock + * and the msleep() in _vm_map_unlock_and_wait(). */ mtx_lock(&map_sleep_mtx); mtx_unlock(&map_sleep_mtx); @@ -703,7 +724,6 @@ _vm_map_init(vm_map_t map, vm_offset_t m map->flags = 0; map->root = NULL; map->timestamp = 0; - map->deferred_freelist = NULL; } void @@ -2603,6 +2623,15 @@ vm_map_entry_unwire(vm_map_t map, vm_map entry->wired_count = 0; } +static void +vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) +{ + + if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) + vm_object_deallocate(entry->object.vm_object); + uma_zfree(system_map ? kmapentzone : mapentzone, entry); +} + /* * vm_map_entry_delete: [ internal use only ] * @@ -2657,6 +2686,12 @@ vm_map_entry_delete(vm_map_t map, vm_map VM_OBJECT_UNLOCK(object); } else entry->object.vm_object = NULL; + if (map->system_map) + vm_map_entry_deallocate(entry, TRUE); + else { + entry->next = curthread->td_map_def_user; + curthread->td_map_def_user = entry; + } } /* @@ -2745,8 +2780,6 @@ vm_map_delete(vm_map_t map, vm_offset_t * will be set in the wrong object!) */ vm_map_entry_delete(map, entry); - entry->next = map->deferred_freelist; - map->deferred_freelist = entry; entry = next; } return (KERN_SUCCESS); Modified: stable/8/sys/vm/vm_map.h ============================================================================== --- stable/8/sys/vm/vm_map.h Sat Oct 2 17:20:30 2010 (r213361) +++ stable/8/sys/vm/vm_map.h Sat Oct 2 17:41:47 2010 (r213362) @@ -185,7 +185,6 @@ struct vm_map { vm_flags_t flags; /* flags for this vm_map */ vm_map_entry_t root; /* Root of a binary search tree */ pmap_t pmap; /* (c) Physical map */ - vm_map_entry_t deferred_freelist; #define min_offset header.start /* (c) */ #define max_offset header.end /* (c) */ }; @@ -267,6 +266,7 @@ vmspace_pmap(struct vmspace *vmspace) void _vm_map_lock(vm_map_t map, const char *file, int line); void _vm_map_unlock(vm_map_t map, const char *file, int line); +int _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line); void _vm_map_lock_read(vm_map_t map, const char *file, int line); void _vm_map_unlock_read(vm_map_t map, const char *file, int line); int _vm_map_trylock(vm_map_t map, const char *file, int line); @@ -274,11 +274,12 @@ int _vm_map_trylock_read(vm_map_t map, c int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line); void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line); int vm_map_locked(vm_map_t map); -int vm_map_unlock_and_wait(vm_map_t map, int timo); void vm_map_wakeup(vm_map_t map); #define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE) #define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE) +#define vm_map_unlock_and_wait(map, timo) \ + _vm_map_unlock_and_wait(map, timo, LOCK_FILE, LOCK_LINE) #define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE) #define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE) #define vm_map_trylock(map) _vm_map_trylock(map, LOCK_FILE, LOCK_LINE)