Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 21 Jan 2009 18:00:33 -0500
From:      Brian Fundakowski Feldman <green@FreeBSD.org>
To:        Jason Evans <jasone@FreeBSD.org>
Cc:        hackers@FreeBSD.org, Julian Elischer <julian@elischer.org>
Subject:   Re: threaded, forked, rethreaded processes will deadlock
Message-ID:  <20090121230033.GC12007@green.homeunix.org>
In-Reply-To: <20090120004135.GB12007@green.homeunix.org>
References:  <20090109031942.GA2825@green.homeunix.org> <Pine.GSO.4.64.0901082237001.28531@sea.ntplx.net> <20090109053117.GB2825@green.homeunix.org> <4966F81C.3070406@elischer.org> <20090109163426.GC2825@green.homeunix.org> <49678BBC.8050306@elischer.org> <20090116211959.GA12007@green.homeunix.org> <49710BD6.7040705@FreeBSD.org> <20090120004135.GB12007@green.homeunix.org>

next in thread | previous in thread | raw e-mail | index | archive | help
On Mon, Jan 19, 2009 at 07:41:35PM -0500, Brian Fundakowski Feldman wrote:
> On Fri, Jan 16, 2009 at 02:36:06PM -0800, Jason Evans wrote:
> > Brian Fundakowski Feldman wrote:
> >  > Could you, and anyone else who would care to, check this out?  It's a 
> > regression
> >> fix but it also makes the code a little bit clearer.  Thanks!
> >> 
> >> Index: lib/libc/stdlib/malloc.c
> > 
> > Why does malloc need to change for this?  Unless there's a really good 
> > reason, I don't want the extra branches in the locking functions.
> 
> Because malloc is the thing causing the regression.  It is easy enough
> to optimize out the one extra fetch and branch in the single-threaded case
> if I can get some consensus that the fix to it is actually fine.

Pessimization removed:
Index: lib/libc/stdlib/malloc.c
===================================================================
--- lib/libc/stdlib/malloc.c	(revision 187160)
+++ lib/libc/stdlib/malloc.c	(working copy)
@@ -1217,6 +1217,13 @@
 		_SPINUNLOCK(&mutex->lock);
 }
 
+static inline void
+malloc_mutex_always_unlock(malloc_mutex_t *mutex)
+{
+
+		_SPINUNLOCK(&mutex->lock);
+}
+
 /*
  * End mutex.
  */
@@ -1300,6 +1307,13 @@
 		_pthread_mutex_unlock(lock);
 }
 
+static inline void
+malloc_spin_always_unlock(pthread_mutex_t *lock)
+{
+
+	_pthread_mutex_unlock(lock);
+}
+
 /*
  * End spin lock.
  */
@@ -5515,9 +5529,8 @@
 void
 _malloc_prefork(void)
 {
+	arena_t *larenas[narenas];
 	bool again;
-	unsigned i, j;
-	arena_t *larenas[narenas], *tarenas[narenas];
 
 	/* Acquire all mutexes in a safe order. */
 
@@ -5530,19 +5543,23 @@
 	 */
 	memset(larenas, 0, sizeof(arena_t *) * narenas);
 	do {
+		unsigned int i;
+
 		again = false;
 
 		malloc_spin_lock(&arenas_lock);
 		for (i = 0; i < narenas; i++) {
 			if (arenas[i] != larenas[i]) {
+				arena_t *tarenas[narenas];
+				unsigned int j;
+
 				memcpy(tarenas, arenas, sizeof(arena_t *) *
 				    narenas);
 				malloc_spin_unlock(&arenas_lock);
 				for (j = 0; j < narenas; j++) {
 					if (larenas[j] != tarenas[j]) {
 						larenas[j] = tarenas[j];
-						malloc_spin_lock(
-						    &larenas[j]->lock);
+						malloc_spin_lock(&larenas[j]->lock);
 					}
 				}
 				again = true;
@@ -5569,19 +5586,24 @@
 	/* Release all mutexes, now that fork() has completed. */
 
 #ifdef MALLOC_DSS
-	malloc_mutex_unlock(&dss_mtx);
+	malloc_mutex_always_unlock(&dss_mtx);
 #endif
 
-	malloc_mutex_unlock(&huge_mtx);
+	malloc_mutex_always_unlock(&huge_mtx);
 
-	malloc_mutex_unlock(&base_mtx);
+	malloc_mutex_always_unlock(&base_mtx);
 
 	memcpy(larenas, arenas, sizeof(arena_t *) * narenas);
-	malloc_spin_unlock(&arenas_lock);
+	malloc_spin_always_unlock(&arenas_lock);
 	for (i = 0; i < narenas; i++) {
 		if (larenas[i] != NULL)
-			malloc_spin_unlock(&larenas[i]->lock);
+			malloc_spin_always_unlock(&larenas[i]->lock);
 	}
+	/*
+	 * This ends the special post-__isthreaded exemption behavior for
+	 * malloc stuff.  We should really be single threaded right now
+	 * in effect regardless of __isthreaded status.
+	 */
 }
 
 /*
Index: lib/libthr/thread/thr_fork.c
===================================================================
--- lib/libthr/thread/thr_fork.c	(revision 187160)
+++ lib/libthr/thread/thr_fork.c	(working copy)
@@ -105,7 +105,7 @@
 	struct pthread_atfork *af;
 	pid_t ret;
 	int errsave;
-	int unlock_malloc;
+	int was_threaded;
 	int rtld_locks[MAX_RTLD_LOCKS];
 
 	if (!_thr_is_inited())
@@ -122,16 +122,16 @@
 	}
 
 	/*
-	 * Try our best to protect memory from being corrupted in
-	 * child process because another thread in malloc code will
-	 * simply be kill by fork().
+	 * All bets are off as to what should happen soon if the parent
+	 * process was not so kindly as to set up pthread fork hooks to
+	 * relinquish all running threads.
 	 */
 	if (_thr_isthreaded() != 0) {
-		unlock_malloc = 1;
+		was_threaded = 1;
 		_malloc_prefork();
 		_rtld_atfork_pre(rtld_locks);
 	} else {
-		unlock_malloc = 0;
+		was_threaded = 0;
 	}
 
 	/*
@@ -159,7 +159,7 @@
 		_thr_umutex_init(&curthread->lock);
 		_thr_umutex_init(&_thr_atfork_lock);
 
-		if (unlock_malloc)
+		if (was_threaded)
 			_rtld_atfork_post(rtld_locks);
 		_thr_setthreaded(0);
 
@@ -173,7 +173,7 @@
 		/* Ready to continue, unblock signals. */ 
 		_thr_signal_unblock(curthread);
 
-		if (unlock_malloc)
+		if (was_threaded)
 			_malloc_postfork();
 
 		/* Run down atfork child handlers. */
@@ -188,7 +188,7 @@
 		/* Ready to continue, unblock signals. */ 
 		_thr_signal_unblock(curthread);
 
-		if (unlock_malloc) {
+		if (was_threaded) {
 			_rtld_atfork_post(rtld_locks);
 			_malloc_postfork();
 		}

-- 
Brian Fundakowski Feldman                           \'[ FreeBSD ]''''''''''\
  <> green@FreeBSD.org                               \  The Power to Serve! \
 Opinions expressed are my own.                       \,,,,,,,,,,,,,,,,,,,,,,\



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20090121230033.GC12007>