Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 14 Nov 2020 01:55:54 +0000 (UTC)
From:      Kyle Evans <kevans@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org
Subject:   svn commit: r367662 - stable/12/sys/kern
Message-ID:  <202011140155.0AE1ts7P095117@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kevans
Date: Sat Nov 14 01:55:54 2020
New Revision: 367662
URL: https://svnweb.freebsd.org/changeset/base/367662

Log:
  MFC r367440: epoch: support non-preemptible epochs checking in_epoch()
  
  Previously, non-preemptible epochs could not check; in_epoch() would always
  fail, usually because non-preemptible epochs don't imply THREAD_NO_SLEEPING.
  
  For default epochs, it's easy enough to verify that we're in the given
  epoch: if we're in a critical section and our record for the given epoch
  is active, then we're in it.
  
  This patch also adds some additional INVARIANTS bookkeeping. Notably, we set
  and check the recorded thread in epoch_enter/epoch_exit to try and catch
  some edge-cases for the caller. It also checks upon freeing that none of the
  records had a thread in the epoch, which may make it a little easier to
  diagnose some improper use if epoch_free() took place while some other
  thread was inside.
  
  This version differs slightly from what was just previously reviewed by the
  below-listed, in that in_epoch() will assert that no CPU has this thread
  recorded even if it *is* currently in a critical section. This is intended
  to catch cases where the caller might have somehow messed up critical
  section nesting, we can catch both if they exited the critical section or if
  they exited, migrated, then re-entered (on the wrong CPU).

Modified:
  stable/12/sys/kern/subr_epoch.c
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/kern/subr_epoch.c
==============================================================================
--- stable/12/sys/kern/subr_epoch.c	Sat Nov 14 01:55:02 2020	(r367661)
+++ stable/12/sys/kern/subr_epoch.c	Sat Nov 14 01:55:54 2020	(r367662)
@@ -69,6 +69,10 @@ typedef struct epoch_record {
 	/* fields above are part of KBI and cannot be modified */
 	struct epoch_context er_drain_ctx;
 	struct epoch *er_parent;
+#ifdef INVARIANTS
+	/* Used to verify record ownership for non-preemptible epochs. */
+	struct thread *er_td;
+#endif
 } __aligned(EPOCH_ALIGN)     *epoch_record_t;
 
 struct epoch {
@@ -251,6 +255,9 @@ done:
 void
 epoch_free(epoch_t epoch)
 {
+#ifdef INVARIANTS
+	int cpu;
+#endif
 
 	EPOCH_LOCK();
 
@@ -264,6 +271,21 @@ epoch_free(epoch_t epoch)
 	 * to zero, by calling epoch_wait() on the global_epoch:
 	 */
 	epoch_wait(global_epoch);
+#ifdef INVARIANTS
+	CPU_FOREACH(cpu) {
+		epoch_record_t er;
+
+		er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
+
+		/*
+		 * Sanity check: none of the records should be in use anymore.
+		 * We drained callbacks above and freeing the pcpu records is
+		 * imminent.
+		 */
+		MPASS(er->er_td == NULL);
+		MPASS(TAILQ_EMPTY(&er->er_tdlist));
+	}
+#endif
 	uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
 	mtx_destroy(&epoch->e_drain_mtx);
 	sx_destroy(&epoch->e_drain_sx);
@@ -306,6 +328,8 @@ epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
 
 	td->td_pre_epoch_prio = td->td_priority;
 	er = epoch_currecord(epoch);
+	/* Record-level tracking is reserved for non-preemptible epochs. */
+	MPASS(er->er_td == NULL);
 	TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
 	ck_epoch_begin(&er->er_record, &et->et_section);
 	critical_exit();
@@ -324,6 +348,15 @@ epoch_enter(epoch_t epoch)
 	td->td_epochnest++;
 	critical_enter();
 	er = epoch_currecord(epoch);
+#ifdef INVARIANTS
+	if (er->er_record.active == 0) {
+		MPASS(er->er_td == NULL);
+		er->er_td = curthread;
+	} else {
+		/* We've recursed, just make sure our accounting isn't wrong. */
+		MPASS(er->er_td == curthread);
+	}
+#endif
 	ck_epoch_begin(&er->er_record, NULL);
 }
 
@@ -351,6 +384,8 @@ epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
 #endif
 #ifdef INVARIANTS
 	et->et_td = (void*)0xDEADBEEF;
+	/* Record-level tracking is reserved for non-preemptible epochs. */
+	MPASS(er->er_td == NULL);
 #endif
 	ck_epoch_end(&er->er_record, &et->et_section);
 	TAILQ_REMOVE(&er->er_tdlist, et, et_link);
@@ -372,6 +407,11 @@ epoch_exit(epoch_t epoch)
 	td->td_epochnest--;
 	er = epoch_currecord(epoch);
 	ck_epoch_end(&er->er_record, NULL);
+#ifdef INVARIANTS
+	MPASS(er->er_td == curthread);
+	if (er->er_record.active == 0)
+		er->er_td = NULL;
+#endif
 	critical_exit();
 }
 
@@ -662,18 +702,18 @@ epoch_call_task(void *arg __unused)
 	}
 }
 
-int
-in_epoch_verbose(epoch_t epoch, int dump_onfail)
+static int
+in_epoch_verbose_preempt(epoch_t epoch, int dump_onfail)
 {
+	epoch_record_t er;
 	struct epoch_tracker *tdwait;
 	struct thread *td;
-	epoch_record_t er;
 
+	MPASS(epoch != NULL);
+	MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
 	td = curthread;
 	if (td->td_epochnest == 0)
 		return (0);
-	if (__predict_false((epoch) == NULL))
-		return (0);
 	critical_enter();
 	er = epoch_currecord(epoch);
 	TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
@@ -692,6 +732,66 @@ in_epoch_verbose(epoch_t epoch, int dump_onfail)
 #endif
 	critical_exit();
 	return (0);
+}
+
+#ifdef INVARIANTS
+static void
+epoch_assert_nocpu(epoch_t epoch, struct thread *td)
+{
+	epoch_record_t er;
+	int cpu;
+	bool crit;
+
+	crit = td->td_critnest > 0;
+
+	/* Check for a critical section mishap. */
+	CPU_FOREACH(cpu) {
+		er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
+		KASSERT(er->er_td != td,
+		    ("%s critical section in epoch from cpu %d",
+		    (crit ? "exited" : "re-entered"), cpu));
+	}
+}
+#else
+#define	epoch_assert_nocpu(e, td)
+#endif
+
+int
+in_epoch_verbose(epoch_t epoch, int dump_onfail)
+{
+	epoch_record_t er;
+	struct thread *td;
+
+	if (__predict_false((epoch) == NULL))
+		return (0);
+	if ((epoch->e_flags & EPOCH_PREEMPT) != 0)
+		return (in_epoch_verbose_preempt(epoch, dump_onfail));
+
+	/*
+	 * The thread being in a critical section is a necessary
+	 * condition to be correctly inside a non-preemptible epoch,
+	 * so it's definitely not in this epoch.
+	 */
+	td = curthread;
+	if (td->td_critnest == 0) {
+		epoch_assert_nocpu(epoch, td);
+		return (0);
+	}
+
+	/*
+	 * The current cpu is in a critical section, so the epoch record will be
+	 * stable for the rest of this function.  Knowing that the record is not
+	 * active is sufficient for knowing whether we're in this epoch or not,
+	 * since it's a pcpu record.
+	 */
+	er = epoch_currecord(epoch);
+	if (er->er_record.active == 0) {
+		epoch_assert_nocpu(epoch, td);
+		return (0);
+	}
+
+	MPASS(er->er_td == td);
+	return (1);
 }
 
 int



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202011140155.0AE1ts7P095117>