Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 4 Feb 2021 18:55:44 GMT
From:      Ryan Stone <rstone@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: b58cf1cb35ab - main - Fix race condition in linuxkpi workqueue
Message-ID:  <202102041855.114Itiod020377@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by rstone:

URL: https://cgit.FreeBSD.org/src/commit/?id=b58cf1cb35abc095d7fb9773630794b38bbc6b1d

commit b58cf1cb35abc095d7fb9773630794b38bbc6b1d
Author:     Ryan Stone <rstone@FreeBSD.org>
AuthorDate: 2021-01-07 17:25:56 +0000
Commit:     Ryan Stone <rstone@FreeBSD.org>
CommitDate: 2021-02-04 18:54:53 +0000

    Fix race condition in linuxkpi workqueue
    
    Consider the following scenario:
    
    1. A delayed_work struct in the WORK_ST_TIMER state.
    2. Thread A calls mod_delayed_work()
    3. Thread B (a callout thread) simultaneously calls
    linux_delayed_work_timer_fn()
    
    The following sequence of events is possible:
    
    A: Call linux_cancel_delayed_work()
    A: Change state from TIMER TO CANCEL
    B: Change state from CANCEL to TASK
    B: taskqueue_enqueue() the task
    A: taskqueue_cancel() the task
    A: Call linux_queue_delayed_work_on().  This is a no-op because the
    state is WORK_ST_TASK.
    
    As a result, the delayed_work struct will never be invoked.  This is
    causing address resolution in ib_addr.c to stop permanently, as it
    never tries to reschedule a task that it thinks is already scheduled.
    
    Fix this by introducing locking into the cancel path (which
    corresponds with the lock held while the callout runs).  This will
    prevent the callout from changing the state of the task until the
    cancel is complete, preventing the race.
    
    Differential Revision:  https://reviews.freebsd.org/D28420
    Reviewed by: hselasky
    MFC after: 2 months
---
 sys/compat/linuxkpi/common/src/linux_work.c | 49 ++++++++++++++++-------------
 1 file changed, 27 insertions(+), 22 deletions(-)

diff --git a/sys/compat/linuxkpi/common/src/linux_work.c b/sys/compat/linuxkpi/common/src/linux_work.c
index 45025378baa9..f9cf62928760 100644
--- a/sys/compat/linuxkpi/common/src/linux_work.c
+++ b/sys/compat/linuxkpi/common/src/linux_work.c
@@ -435,13 +435,17 @@ linux_cancel_delayed_work(struct delayed_work *dwork)
 		[WORK_ST_CANCEL] = WORK_ST_CANCEL,	/* NOP */
 	};
 	struct taskqueue *tq;
+	bool cancelled;
 
+	mtx_lock(&dwork->timer.mtx);
 	switch (linux_update_state(&dwork->work.state, states)) {
 	case WORK_ST_TIMER:
 	case WORK_ST_CANCEL:
-		if (linux_cancel_timer(dwork, 0)) {
+		cancelled = (callout_stop(&dwork->timer.callout) == 1);
+		if (cancelled) {
 			atomic_cmpxchg(&dwork->work.state,
 			    WORK_ST_CANCEL, WORK_ST_IDLE);
+			mtx_unlock(&dwork->timer.mtx);
 			return (true);
 		}
 		/* FALLTHROUGH */
@@ -450,10 +454,12 @@ linux_cancel_delayed_work(struct delayed_work *dwork)
 		if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {
 			atomic_cmpxchg(&dwork->work.state,
 			    WORK_ST_CANCEL, WORK_ST_IDLE);
+			mtx_unlock(&dwork->timer.mtx);
 			return (true);
 		}
 		/* FALLTHROUGH */
 	default:
+		mtx_unlock(&dwork->timer.mtx);
 		return (false);
 	}
 }
@@ -475,37 +481,36 @@ linux_cancel_delayed_work_sync(struct delayed_work *dwork)
 	};
 	struct taskqueue *tq;
 	bool retval = false;
+	int ret, state;
+	bool cancelled;
 
 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
 	    "linux_cancel_delayed_work_sync() might sleep");
-retry:
-	switch (linux_update_state(&dwork->work.state, states)) {
+	mtx_lock(&dwork->timer.mtx);
+
+	state = linux_update_state(&dwork->work.state, states);
+	switch (state) {
 	case WORK_ST_IDLE:
+		mtx_unlock(&dwork->timer.mtx);
 		return (retval);
-	case WORK_ST_EXEC:
-		tq = dwork->work.work_queue->taskqueue;
-		if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) != 0)
-			taskqueue_drain(tq, &dwork->work.work_task);
-		goto retry;	/* work may have restarted itself */
 	case WORK_ST_TIMER:
 	case WORK_ST_CANCEL:
-		if (linux_cancel_timer(dwork, 1)) {
-			/*
-			 * Make sure taskqueue is also drained before
-			 * returning:
-			 */
-			tq = dwork->work.work_queue->taskqueue;
-			taskqueue_drain(tq, &dwork->work.work_task);
-			retval = true;
-			goto retry;
-		}
-		/* FALLTHROUGH */
+		cancelled = (callout_stop(&dwork->timer.callout) == 1);
+
+		tq = dwork->work.work_queue->taskqueue;
+		ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
+		mtx_unlock(&dwork->timer.mtx);
+
+		callout_drain(&dwork->timer.callout);
+		taskqueue_drain(tq, &dwork->work.work_task);
+		return (cancelled || (ret != 0));
 	default:
 		tq = dwork->work.work_queue->taskqueue;
-		if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) != 0)
+		ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
+		mtx_unlock(&dwork->timer.mtx);
+		if (ret != 0)
 			taskqueue_drain(tq, &dwork->work.work_task);
-		retval = true;
-		goto retry;
+		return (ret != 0);
 	}
 }
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202102041855.114Itiod020377>