Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 6 Nov 2019 08:46:24 +0000 (UTC)
From:      Andriy Gapon <avg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org
Subject:   svn commit: r354378 - vendor-sys/illumos/dist/common/zfs vendor-sys/illumos/dist/uts/common vendor-sys/illumos/dist/uts/common/fs/zfs vendor-sys/illumos/dist/uts/common/fs/zfs/sys vendor-sys/illumo...
Message-ID:  <201911060846.xA68kOqc040866@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: avg
Date: Wed Nov  6 08:46:24 2019
New Revision: 354378
URL: https://svnweb.freebsd.org/changeset/base/354378

Log:
  10499 Multi-modifier protection (MMP)
  
  illumos/illumos-gate@e0f1c0afa46cc84d4b1e40124032a9a87310386e
  https://github.com/illumos/illumos-gate/commit/e0f1c0afa46cc84d4b1e40124032a9a87310386e
  
  https://www.illumos.org/issues/10499
    Port the following ZFS commits from ZoL to illumos.
    379ca9cf2 Multi-modifier protection (MMP)
    bbffb59ef Fix multihost stale cache file import
    0d398b256 Do not initiate MMP writes while pool is suspended
  
  Portions contributed by: Jerry Jelinek <jerry.jelinek@joyent.com>
  Portions contributed by: Tim Chase <tim@chase2k.com>
  Portions contributed by: sanjeevbagewadi <sanjeev.bagewadi@gmail.com>
  Portions contributed by: John L. Hammond <john.hammond@intel.com>
  Portions contributed by: Giuseppe Di Natale <dinatale2@llnl.gov>
  Portions contributed by: Prakash Surya <surya1@llnl.gov>
  Author: Olaf Faaland <faaland1@llnl.gov>

Added:
  vendor-sys/illumos/dist/uts/common/fs/zfs/mmp.c   (contents, props changed)
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/mmp.h   (contents, props changed)
Modified:
  vendor-sys/illumos/dist/common/zfs/zfs_comutil.h
  vendor-sys/illumos/dist/common/zfs/zpool_prop.c
  vendor-sys/illumos/dist/uts/common/Makefile.files
  vendor-sys/illumos/dist/uts/common/fs/zfs/dsl_pool.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/spa.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/spa_config.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/spa_misc.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/dsl_pool.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/spa.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/spa_impl.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/uberblock.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/uberblock_impl.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/vdev.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/vdev_impl.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/zio.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/uberblock.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/vdev.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_label.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/zfs_ioctl.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/zio.c
  vendor-sys/illumos/dist/uts/common/sys/fs/zfs.h

Changes in other areas also in this revision:
Modified:
  vendor/illumos/dist/cmd/zdb/zdb.c
  vendor/illumos/dist/cmd/zhack/zhack.c
  vendor/illumos/dist/cmd/zpool/zpool_main.c
  vendor/illumos/dist/cmd/ztest/ztest.c
  vendor/illumos/dist/lib/libzfs/common/libzfs.h
  vendor/illumos/dist/lib/libzfs/common/libzfs_dataset.c
  vendor/illumos/dist/lib/libzfs/common/libzfs_impl.h
  vendor/illumos/dist/lib/libzfs/common/libzfs_import.c
  vendor/illumos/dist/lib/libzfs/common/libzfs_pool.c
  vendor/illumos/dist/lib/libzfs/common/libzfs_status.c
  vendor/illumos/dist/lib/libzfs/common/libzfs_util.c
  vendor/illumos/dist/lib/libzpool/common/kernel.c
  vendor/illumos/dist/man/man1m/zpool.1m

Modified: vendor-sys/illumos/dist/common/zfs/zfs_comutil.h
==============================================================================
--- vendor-sys/illumos/dist/common/zfs/zfs_comutil.h	Wed Nov  6 08:44:35 2019	(r354377)
+++ vendor-sys/illumos/dist/common/zfs/zfs_comutil.h	Wed Nov  6 08:46:24 2019	(r354378)
@@ -21,6 +21,7 @@
 /*
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
+ * Copyright 2019 Joyent, Inc.
  */
 
 #ifndef	_ZFS_COMUTIL_H
@@ -32,6 +33,9 @@
 #ifdef	__cplusplus
 extern "C" {
 #endif
+
+/* Needed for ZoL errno usage in MMP kernel and user code */
+#define	EREMOTEIO EREMOTE
 
 extern boolean_t zfs_allocatable_devs(nvlist_t *);
 extern void zpool_get_load_policy(nvlist_t *, zpool_load_policy_t *);

Modified: vendor-sys/illumos/dist/common/zfs/zpool_prop.c
==============================================================================
--- vendor-sys/illumos/dist/common/zfs/zpool_prop.c	Wed Nov  6 08:44:35 2019	(r354377)
+++ vendor-sys/illumos/dist/common/zfs/zpool_prop.c	Wed Nov  6 08:46:24 2019	(r354378)
@@ -125,6 +125,9 @@ zpool_prop_init(void)
 	    PROP_DEFAULT, ZFS_TYPE_POOL, "on | off", "EXPAND", boolean_table);
 	zprop_register_index(ZPOOL_PROP_READONLY, "readonly", 0,
 	    PROP_DEFAULT, ZFS_TYPE_POOL, "on | off", "RDONLY", boolean_table);
+	zprop_register_index(ZPOOL_PROP_MULTIHOST, "multihost", 0,
+	    PROP_DEFAULT, ZFS_TYPE_POOL, "on | off", "MULTIHOST",
+	    boolean_table);
 
 	/* default index properties */
 	zprop_register_index(ZPOOL_PROP_FAILUREMODE, "failmode",

Modified: vendor-sys/illumos/dist/uts/common/Makefile.files
==============================================================================
--- vendor-sys/illumos/dist/uts/common/Makefile.files	Wed Nov  6 08:44:35 2019	(r354377)
+++ vendor-sys/illumos/dist/uts/common/Makefile.files	Wed Nov  6 08:46:24 2019	(r354378)
@@ -1376,6 +1376,7 @@ ZFS_COMMON_OBJS +=		\
 	lz4.o			\
 	lzjb.o			\
 	metaslab.o		\
+	mmp.o			\
 	multilist.o		\
 	range_tree.o		\
 	refcount.o		\

Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/dsl_pool.c
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/dsl_pool.c	Wed Nov  6 08:44:35 2019	(r354377)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/dsl_pool.c	Wed Nov  6 08:46:24 2019	(r354378)
@@ -50,6 +50,7 @@
 #include <sys/zfeature.h>
 #include <sys/zil_impl.h>
 #include <sys/dsl_userhold.h>
+#include <sys/mmp.h>
 
 /*
  * ZFS Write Throttle
@@ -192,6 +193,7 @@ dsl_pool_open_impl(spa_t *spa, uint64_t txg)
 	dp->dp_meta_rootbp = *bp;
 	rrw_init(&dp->dp_config_rwlock, B_TRUE);
 	txg_init(dp, txg);
+	mmp_init(spa);
 
 	txg_list_create(&dp->dp_dirty_datasets, spa,
 	    offsetof(dsl_dataset_t, ds_dirty_link));
@@ -393,6 +395,7 @@ dsl_pool_close(dsl_pool_t *dp)
 	 */
 	arc_flush(dp->dp_spa, FALSE);
 
+	mmp_fini(dp->dp_spa);
 	txg_fini(dp);
 	dsl_scan_fini(dp);
 	dmu_buf_user_evict_wait();

Added: vendor-sys/illumos/dist/uts/common/fs/zfs/mmp.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/mmp.c	Wed Nov  6 08:46:24 2019	(r354378)
@@ -0,0 +1,582 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
+ * Copyright 2019 Joyent, Inc.
+ */
+
+#include <sys/abd.h>
+#include <sys/mmp.h>
+#include <sys/spa.h>
+#include <sys/spa_impl.h>
+#include <sys/time.h>
+#include <sys/vdev.h>
+#include <sys/vdev_impl.h>
+#include <sys/zfs_context.h>
+#include <sys/callb.h>
+
+/*
+ * Multi-Modifier Protection (MMP) attempts to prevent a user from importing
+ * or opening a pool on more than one host at a time.  In particular, it
+ * prevents "zpool import -f" on a host from succeeding while the pool is
+ * already imported on another host.  There are many other ways in which a
+ * device could be used by two hosts for different purposes at the same time
+ * resulting in pool damage.  This implementation does not attempt to detect
+ * those cases.
+ *
+ * MMP operates by ensuring there are frequent visible changes on disk (a
+ * "heartbeat") at all times.  And by altering the import process to check
+ * for these changes and failing the import when they are detected.  This
+ * functionality is enabled by setting the 'multihost' pool property to on.
+ *
+ * Uberblocks written by the txg_sync thread always go into the first
+ * (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP.
+ * They are used to hold uberblocks which are exactly the same as the last
+ * synced uberblock except that the ub_timestamp is frequently updated.
+ * Like all other uberblocks, the slot is written with an embedded checksum,
+ * and slots with invalid checksums are ignored.  This provides the
+ * "heartbeat", with no risk of overwriting good uberblocks that must be
+ * preserved, e.g. previous txgs and associated block pointers.
+ *
+ * Two optional fields are added to uberblock structure: ub_mmp_magic and
+ * ub_mmp_delay.  The magic field allows zfs to tell whether ub_mmp_delay is
+ * valid.  The delay field is a decaying average of the amount of time between
+ * completion of successive MMP writes, in nanoseconds.  It is used to predict
+ * how long the import must wait to detect activity in the pool, before
+ * concluding it is not in use.
+ *
+ * During import an activity test may now be performed to determine if
+ * the pool is in use.  The activity test is typically required if the
+ * ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is
+ * POOL_STATE_ACTIVE, and the pool is not a root pool.
+ *
+ * The activity test finds the "best" uberblock (highest txg & timestamp),
+ * waits some time, and then finds the "best" uberblock again.  If the txg
+ * and timestamp in both "best" uberblocks do not match, the pool is in use
+ * by another host and the import fails.  Since the granularity of the
+ * timestamp is in seconds this activity test must take a bare minimum of one
+ * second.  In order to assure the accuracy of the activity test, the default
+ * values result in an activity test duration of 10x the mmp write interval.
+ *
+ * The "zpool import"  activity test can be expected to take a minimum time of
+ * zfs_multihost_import_intervals * zfs_multihost_interval milliseconds.  If the
+ * "best" uberblock has a valid ub_mmp_delay field, then the duration of the
+ * test may take longer if MMP writes were occurring less frequently than
+ * expected.  Additionally, the duration is then extended by a random 25% to
+ * attempt to to detect simultaneous imports.  For example, if both partner
+ * hosts are rebooted at the same time and automatically attempt to import the
+ * pool.
+ */
+
+/*
+ * Used to control the frequency of mmp writes which are performed when the
+ * 'multihost' pool property is on.  This is one factor used to determine the
+ * length of the activity check during import.
+ *
+ * The mmp write period is zfs_multihost_interval / leaf-vdevs milliseconds.
+ * This means that on average an mmp write will be issued for each leaf vdev
+ * every zfs_multihost_interval milliseconds.  In practice, the observed period
+ * can vary with the I/O load and this observed value is the delay which is
+ * stored in the uberblock.  The minimum allowed value is 100 ms.
+ */
+ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL;
+
+/*
+ * Used to control the duration of the activity test on import.  Smaller values
+ * of zfs_multihost_import_intervals will reduce the import time but increase
+ * the risk of failing to detect an active pool.  The total activity check time
+ * is never allowed to drop below one second.  A value of 0 is ignored and
+ * treated as if it was set to 1.
+ */
+uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
+
+/*
+ * Controls the behavior of the pool when mmp write failures are detected.
+ *
+ * When zfs_multihost_fail_intervals = 0 then mmp write failures are ignored.
+ * The failures will still be reported to the ZED which depending on its
+ * configuration may take action such as suspending the pool or taking a
+ * device offline.
+ *
+ * When zfs_multihost_fail_intervals > 0 then sequential mmp write failures will
+ * cause the pool to be suspended.  This occurs when
+ * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds have
+ * passed since the last successful mmp write.  This guarantees the activity
+ * test will see mmp writes if the
+ * pool is imported.
+ */
+uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
+
+char *mmp_tag = "mmp_write_uberblock";
+static void mmp_thread(void *arg);
+
+void
+mmp_init(spa_t *spa)
+{
+	mmp_thread_t *mmp = &spa->spa_mmp;
+
+	mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
+	cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
+	mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
+	mmp->mmp_kstat_id = 1;
+}
+
+void
+mmp_fini(spa_t *spa)
+{
+	mmp_thread_t *mmp = &spa->spa_mmp;
+
+	mutex_destroy(&mmp->mmp_thread_lock);
+	cv_destroy(&mmp->mmp_thread_cv);
+	mutex_destroy(&mmp->mmp_io_lock);
+}
+
+static void
+mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr)
+{
+	CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG);
+	mutex_enter(&mmp->mmp_thread_lock);
+}
+
+static void
+mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
+{
+	ASSERT(*mpp != NULL);
+	*mpp = NULL;
+	cv_broadcast(&mmp->mmp_thread_cv);
+	CALLB_CPR_EXIT(cpr);		/* drops &mmp->mmp_thread_lock */
+	thread_exit();
+}
+
+void
+mmp_thread_start(spa_t *spa)
+{
+	mmp_thread_t *mmp = &spa->spa_mmp;
+
+	if (spa_writeable(spa)) {
+		mutex_enter(&mmp->mmp_thread_lock);
+		if (!mmp->mmp_thread) {
+			dprintf("mmp_thread_start pool %s\n",
+			    spa->spa_name);
+			mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
+			    spa, 0, &p0, TS_RUN, minclsyspri);
+		}
+		mutex_exit(&mmp->mmp_thread_lock);
+	}
+}
+
+void
+mmp_thread_stop(spa_t *spa)
+{
+	mmp_thread_t *mmp = &spa->spa_mmp;
+
+	mutex_enter(&mmp->mmp_thread_lock);
+	mmp->mmp_thread_exiting = 1;
+	cv_broadcast(&mmp->mmp_thread_cv);
+
+	while (mmp->mmp_thread) {
+		cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
+	}
+	mutex_exit(&mmp->mmp_thread_lock);
+
+	ASSERT(mmp->mmp_thread == NULL);
+	mmp->mmp_thread_exiting = 0;
+}
+
+typedef enum mmp_vdev_state_flag {
+	MMP_FAIL_NOT_WRITABLE	= (1 << 0),
+	MMP_FAIL_WRITE_PENDING	= (1 << 1),
+} mmp_vdev_state_flag_t;
+
+/*
+ * Find a leaf vdev to write an MMP block to.  It must not have an outstanding
+ * mmp write (if so a new write will also likely block).  If there is no usable
+ * leaf, a nonzero error value is returned. The error value returned is a bit
+ * field.
+ *
+ * MMP_FAIL_WRITE_PENDING   One or more leaf vdevs are writeable, but have an
+ *                          outstanding MMP write.
+ * MMP_FAIL_NOT_WRITABLE    One or more leaf vdevs are not writeable.
+ */
+
+static int
+mmp_next_leaf(spa_t *spa)
+{
+	vdev_t *leaf;
+	vdev_t *starting_leaf;
+	int fail_mask = 0;
+
+	ASSERT(MUTEX_HELD(&spa->spa_mmp.mmp_io_lock));
+	ASSERT(spa_config_held(spa, SCL_STATE, RW_READER));
+	ASSERT(list_link_active(&spa->spa_leaf_list.list_head) == B_TRUE);
+	ASSERT(!list_is_empty(&spa->spa_leaf_list));
+
+	if (spa->spa_mmp.mmp_leaf_last_gen != spa->spa_leaf_list_gen) {
+		spa->spa_mmp.mmp_last_leaf = list_head(&spa->spa_leaf_list);
+		spa->spa_mmp.mmp_leaf_last_gen = spa->spa_leaf_list_gen;
+	}
+
+	leaf = spa->spa_mmp.mmp_last_leaf;
+	if (leaf == NULL)
+		leaf = list_head(&spa->spa_leaf_list);
+	starting_leaf = leaf;
+
+	do {
+		leaf = list_next(&spa->spa_leaf_list, leaf);
+		if (leaf == NULL)
+			leaf = list_head(&spa->spa_leaf_list);
+
+		if (!vdev_writeable(leaf)) {
+			fail_mask |= MMP_FAIL_NOT_WRITABLE;
+		} else if (leaf->vdev_mmp_pending != 0) {
+			fail_mask |= MMP_FAIL_WRITE_PENDING;
+		} else {
+			spa->spa_mmp.mmp_last_leaf = leaf;
+			return (0);
+		}
+	} while (leaf != starting_leaf);
+
+	ASSERT(fail_mask);
+
+	return (fail_mask);
+}
+
+/*
+ * MMP writes are issued on a fixed schedule, but may complete at variable,
+ * much longer, intervals.  The mmp_delay captures long periods between
+ * successful writes for any reason, including disk latency, scheduling delays,
+ * etc.
+ *
+ * The mmp_delay is usually calculated as a decaying average, but if the latest
+ * delay is higher we do not average it, so that we do not hide sudden spikes
+ * which the importing host must wait for.
+ *
+ * If writes are occurring frequently, such as due to a high rate of txg syncs,
+ * the mmp_delay could become very small.  Since those short delays depend on
+ * activity we cannot count on, we never allow mmp_delay to get lower than rate
+ * expected if only mmp_thread writes occur.
+ *
+ * If an mmp write was skipped or fails, and we have already waited longer than
+ * mmp_delay, we need to update it so the next write reflects the longer delay.
+ *
+ * Do not set mmp_delay if the multihost property is not on, so as not to
+ * trigger an activity check on import.
+ */
+static void
+mmp_delay_update(spa_t *spa, boolean_t write_completed)
+{
+	mmp_thread_t *mts = &spa->spa_mmp;
+	hrtime_t delay = gethrtime() - mts->mmp_last_write;
+
+	ASSERT(MUTEX_HELD(&mts->mmp_io_lock));
+
+	if (spa_multihost(spa) == B_FALSE) {
+		mts->mmp_delay = 0;
+		return;
+	}
+
+	if (delay > mts->mmp_delay)
+		mts->mmp_delay = delay;
+
+	if (write_completed == B_FALSE)
+		return;
+
+	mts->mmp_last_write = gethrtime();
+
+	/*
+	 * strictly less than, in case delay was changed above.
+	 */
+	if (delay < mts->mmp_delay) {
+		hrtime_t min_delay = MSEC2NSEC(zfs_multihost_interval) /
+		    MAX(1, vdev_count_leaves(spa));
+		mts->mmp_delay = MAX(((delay + mts->mmp_delay * 127) / 128),
+		    min_delay);
+	}
+}
+
+static void
+mmp_write_done(zio_t *zio)
+{
+	spa_t *spa = zio->io_spa;
+	vdev_t *vd = zio->io_vd;
+	mmp_thread_t *mts = zio->io_private;
+
+	mutex_enter(&mts->mmp_io_lock);
+	uint64_t mmp_kstat_id = vd->vdev_mmp_kstat_id;
+	hrtime_t mmp_write_duration = gethrtime() - vd->vdev_mmp_pending;
+
+	mmp_delay_update(spa, (zio->io_error == 0));
+
+	vd->vdev_mmp_pending = 0;
+	vd->vdev_mmp_kstat_id = 0;
+
+	mutex_exit(&mts->mmp_io_lock);
+	spa_config_exit(spa, SCL_STATE, mmp_tag);
+
+	abd_free(zio->io_abd);
+}
+
+/*
+ * When the uberblock on-disk is updated by a spa_sync,
+ * creating a new "best" uberblock, update the one stored
+ * in the mmp thread state, used for mmp writes.
+ */
+void
+mmp_update_uberblock(spa_t *spa, uberblock_t *ub)
+{
+	mmp_thread_t *mmp = &spa->spa_mmp;
+
+	mutex_enter(&mmp->mmp_io_lock);
+	mmp->mmp_ub = *ub;
+	mmp->mmp_ub.ub_timestamp = gethrestime_sec();
+	mmp_delay_update(spa, B_TRUE);
+	mutex_exit(&mmp->mmp_io_lock);
+}
+
+/*
+ * Choose a random vdev, label, and MMP block, and write over it
+ * with a copy of the last-synced uberblock, whose timestamp
+ * has been updated to reflect that the pool is in use.
+ */
+static void
+mmp_write_uberblock(spa_t *spa)
+{
+	int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
+	mmp_thread_t *mmp = &spa->spa_mmp;
+	uberblock_t *ub;
+	vdev_t *vd = NULL;
+	int label, error;
+	uint64_t offset;
+
+	hrtime_t lock_acquire_time = gethrtime();
+	spa_config_enter(spa, SCL_STATE, mmp_tag, RW_READER);
+	lock_acquire_time = gethrtime() - lock_acquire_time;
+	if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
+		zfs_dbgmsg("SCL_STATE acquisition took %llu ns\n",
+		    (u_longlong_t)lock_acquire_time);
+
+	mutex_enter(&mmp->mmp_io_lock);
+
+	error = mmp_next_leaf(spa);
+
+	/*
+	 * spa_mmp_history has two types of entries:
+	 * Issued MMP write: records time issued, error status, etc.
+	 * Skipped MMP write: an MMP write could not be issued because no
+	 * suitable leaf vdev was available.  See comment above struct
+	 * spa_mmp_history for details.
+	 */
+
+	if (error) {
+		mmp_delay_update(spa, B_FALSE);
+		if (mmp->mmp_skip_error == error) {
+			/*
+			 * ZoL porting note: the following is TBD
+			 * spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1);
+			 */
+		} else {
+			mmp->mmp_skip_error = error;
+			/*
+			 * ZoL porting note: the following is TBD
+			 * spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg,
+			 * gethrestime_sec(), mmp->mmp_delay, NULL, 0,
+			 * mmp->mmp_kstat_id++, error);
+			 */
+		}
+		mutex_exit(&mmp->mmp_io_lock);
+		spa_config_exit(spa, SCL_STATE, mmp_tag);
+		return;
+	}
+
+	vd = spa->spa_mmp.mmp_last_leaf;
+	mmp->mmp_skip_error = 0;
+
+	if (mmp->mmp_zio_root == NULL)
+		mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
+		    flags | ZIO_FLAG_GODFATHER);
+
+	ub = &mmp->mmp_ub;
+	ub->ub_timestamp = gethrestime_sec();
+	ub->ub_mmp_magic = MMP_MAGIC;
+	ub->ub_mmp_delay = mmp->mmp_delay;
+	vd->vdev_mmp_pending = gethrtime();
+	vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id;
+
+	zio_t *zio  = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
+	abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
+	abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
+	abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
+
+	mmp->mmp_kstat_id++;
+	mutex_exit(&mmp->mmp_io_lock);
+
+	offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) -
+	    MMP_BLOCKS_PER_LABEL + spa_get_random(MMP_BLOCKS_PER_LABEL));
+
+	label = spa_get_random(VDEV_LABELS);
+	vdev_label_write(zio, vd, label, ub_abd, offset,
+	    VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
+	    flags | ZIO_FLAG_DONT_PROPAGATE);
+
+	/*
+	 * ZoL porting note: the following is TBD
+	 * (void) spa_mmp_history_add(spa, ub->ub_txg, ub->ub_timestamp,
+	 * ub->ub_mmp_delay, vd, label, vd->vdev_mmp_kstat_id, 0);
+	 */
+
+	zio_nowait(zio);
+}
+
+static void
+mmp_thread(void *arg)
+{
+	spa_t *spa = (spa_t *)arg;
+	mmp_thread_t *mmp = &spa->spa_mmp;
+	boolean_t last_spa_suspended = spa_suspended(spa);
+	boolean_t last_spa_multihost = spa_multihost(spa);
+	callb_cpr_t cpr;
+	hrtime_t max_fail_ns = zfs_multihost_fail_intervals *
+	    MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
+
+	mmp_thread_enter(mmp, &cpr);
+
+	/*
+	 * The mmp_write_done() function calculates mmp_delay based on the
+	 * prior value of mmp_delay and the elapsed time since the last write.
+	 * For the first mmp write, there is no "last write", so we start
+	 * with fake, but reasonable, default non-zero values.
+	 */
+	mmp->mmp_delay = MSEC2NSEC(MAX(zfs_multihost_interval,
+	    MMP_MIN_INTERVAL)) / MAX(vdev_count_leaves(spa), 1);
+	mmp->mmp_last_write = gethrtime() - mmp->mmp_delay;
+
+	while (!mmp->mmp_thread_exiting) {
+		uint64_t mmp_fail_intervals = zfs_multihost_fail_intervals;
+		uint64_t mmp_interval = MSEC2NSEC(
+		    MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
+		boolean_t suspended = spa_suspended(spa);
+		boolean_t multihost = spa_multihost(spa);
+		hrtime_t next_time;
+
+		if (multihost)
+			next_time = gethrtime() + mmp_interval /
+			    MAX(vdev_count_leaves(spa), 1);
+		else
+			next_time = gethrtime() +
+			    MSEC2NSEC(MMP_DEFAULT_INTERVAL);
+
+		/*
+		 * MMP off => on, or suspended => !suspended:
+		 * No writes occurred recently.  Update mmp_last_write to give
+		 * us some time to try.
+		 */
+		if ((!last_spa_multihost && multihost) ||
+		    (last_spa_suspended && !suspended)) {
+			mutex_enter(&mmp->mmp_io_lock);
+			mmp->mmp_last_write = gethrtime();
+			mutex_exit(&mmp->mmp_io_lock);
+		}
+
+		/*
+		 * MMP on => off:
+		 * mmp_delay == 0 tells importing node to skip activity check.
+		 */
+		if (last_spa_multihost && !multihost) {
+			mutex_enter(&mmp->mmp_io_lock);
+			mmp->mmp_delay = 0;
+			mutex_exit(&mmp->mmp_io_lock);
+		}
+		last_spa_multihost = multihost;
+		last_spa_suspended = suspended;
+
+		/*
+		 * Smooth max_fail_ns when its factors are decreased, because
+		 * making (max_fail_ns < mmp_interval) results in the pool being
+		 * immediately suspended before writes can occur at the new
+		 * higher frequency.
+		 */
+		if ((mmp_interval * mmp_fail_intervals) < max_fail_ns) {
+			max_fail_ns = ((31 * max_fail_ns) + (mmp_interval *
+			    mmp_fail_intervals)) / 32;
+		} else {
+			max_fail_ns = mmp_interval * mmp_fail_intervals;
+		}
+
+		/*
+		 * Suspend the pool if no MMP write has succeeded in over
+		 * mmp_interval * mmp_fail_intervals nanoseconds.
+		 */
+		if (!suspended && mmp_fail_intervals && multihost &&
+		    (gethrtime() - mmp->mmp_last_write) > max_fail_ns) {
+			cmn_err(CE_WARN, "MMP writes to pool '%s' have not "
+			    "succeeded in over %llus; suspending pool",
+			    spa_name(spa),
+			    NSEC2SEC(gethrtime() - mmp->mmp_last_write));
+			zio_suspend(spa, NULL, ZIO_SUSPEND_MMP);
+		}
+
+		if (multihost && !suspended)
+			mmp_write_uberblock(spa);
+
+		CALLB_CPR_SAFE_BEGIN(&cpr);
+		(void) cv_timedwait_sig_hrtime(&mmp->mmp_thread_cv,
+		    &mmp->mmp_thread_lock, next_time);
+		CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
+	}
+
+	/* Outstanding writes are allowed to complete. */
+	if (mmp->mmp_zio_root)
+		zio_wait(mmp->mmp_zio_root);
+
+	mmp->mmp_zio_root = NULL;
+	mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
+}
+
+/*
+ * Signal the MMP thread to wake it, when it is sleeping on
+ * its cv.  Used when some module parameter has changed and
+ * we want the thread to know about it.
+ * Only signal if the pool is active and mmp thread is
+ * running, otherwise there is no thread to wake.
+ */
+static void
+mmp_signal_thread(spa_t *spa)
+{
+	mmp_thread_t *mmp = &spa->spa_mmp;
+
+	mutex_enter(&mmp->mmp_thread_lock);
+	if (mmp->mmp_thread)
+		cv_broadcast(&mmp->mmp_thread_cv);
+	mutex_exit(&mmp->mmp_thread_lock);
+}
+
+void
+mmp_signal_all_threads(void)
+{
+	spa_t *spa = NULL;
+
+	mutex_enter(&spa_namespace_lock);
+	while ((spa = spa_next(spa))) {
+		if (spa->spa_state == POOL_STATE_ACTIVE)
+			mmp_signal_thread(spa);
+	}
+	mutex_exit(&spa_namespace_lock);
+}

Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/spa.c
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/spa.c	Wed Nov  6 08:44:35 2019	(r354377)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/spa.c	Wed Nov  6 08:46:24 2019	(r354378)
@@ -57,6 +57,7 @@
 #include <sys/vdev_initialize.h>
 #include <sys/metaslab.h>
 #include <sys/metaslab_impl.h>
+#include <sys/mmp.h>
 #include <sys/uberblock_impl.h>
 #include <sys/txg.h>
 #include <sys/avl.h>
@@ -546,6 +547,16 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
 				error = SET_ERROR(EINVAL);
 			break;
 
+		case ZPOOL_PROP_MULTIHOST:
+			error = nvpair_value_uint64(elem, &intval);
+			if (!error && intval > 1)
+				error = SET_ERROR(EINVAL);
+
+			if (!error && !spa_get_hostid())
+				error = SET_ERROR(ENOTSUP);
+
+			break;
+
 		case ZPOOL_PROP_BOOTFS:
 			/*
 			 * If the pool version is less than SPA_VERSION_BOOTFS,
@@ -1348,6 +1359,9 @@ spa_unload(spa_t *spa)
 		spa_config_exit(spa, SCL_ALL, spa);
 	}
 
+	if (spa->spa_mmp.mmp_thread)
+		mmp_thread_stop(spa);
+
 	/*
 	 * Wait for any outstanding async I/O to complete.
 	 */
@@ -2298,7 +2312,206 @@ vdev_count_verify_zaps(vdev_t *vd)
 	return (total);
 }
 
+/*
+ * Determine whether the activity check is required.
+ */
+static boolean_t
+spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
+    nvlist_t *config)
+{
+	uint64_t state = 0;
+	uint64_t hostid = 0;
+	uint64_t tryconfig_txg = 0;
+	uint64_t tryconfig_timestamp = 0;
+	nvlist_t *nvinfo;
+
+	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
+		nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
+		(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
+		    &tryconfig_txg);
+		(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
+		    &tryconfig_timestamp);
+	}
+
+	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
+
+	/*
+	 * Disable the MMP activity check - This is used by zdb which
+	 * is intended to be used on potentially active pools.
+	 */
+	if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
+		return (B_FALSE);
+
+	/*
+	 * Skip the activity check when the MMP feature is disabled.
+	 */
+	if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
+		return (B_FALSE);
+	/*
+	 * If the tryconfig_* values are nonzero, they are the results of an
+	 * earlier tryimport.  If they match the uberblock we just found, then
+	 * the pool has not changed and we return false so we do not test a
+	 * second time.
+	 */
+	if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
+	    tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp)
+		return (B_FALSE);
+
+	/*
+	 * Allow the activity check to be skipped when importing the pool
+	 * on the same host which last imported it.  Since the hostid from
+	 * configuration may be stale use the one read from the label.
+	 */
+	if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
+		hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
+
+	if (hostid == spa_get_hostid())
+		return (B_FALSE);
+
+	/*
+	 * Skip the activity test when the pool was cleanly exported.
+	 */
+	if (state != POOL_STATE_ACTIVE)
+		return (B_FALSE);
+
+	return (B_TRUE);
+}
+
+/*
+ * Perform the import activity check.  If the user canceled the import or
+ * we detected activity then fail.
+ */
 static int
+spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
+{
+	uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
+	uint64_t txg = ub->ub_txg;
+	uint64_t timestamp = ub->ub_timestamp;
+	uint64_t import_delay = NANOSEC;
+	hrtime_t import_expire;
+	nvlist_t *mmp_label = NULL;
+	vdev_t *rvd = spa->spa_root_vdev;
+	kcondvar_t cv;
+	kmutex_t mtx;
+	int error = 0;
+
+	cv_init(&cv, NULL, CV_DEFAULT, NULL);
+	mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
+	mutex_enter(&mtx);
+
+	/*
+	 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
+	 * during the earlier tryimport.  If the txg recorded there is 0 then
+	 * the pool is known to be active on another host.
+	 *
+	 * Otherwise, the pool might be in use on another node.  Check for
+	 * changes in the uberblocks on disk if necessary.
+	 */
+	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
+		nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
+		    ZPOOL_CONFIG_LOAD_INFO);
+
+		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
+		    fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
+			vdev_uberblock_load(rvd, ub, &mmp_label);
+			error = SET_ERROR(EREMOTEIO);
+			goto out;
+		}
+	}
+
+	/*
+	 * Preferentially use the zfs_multihost_interval from the node which
+	 * last imported the pool.  This value is stored in an MMP uberblock as.
+	 *
+	 * ub_mmp_delay * vdev_count_leaves() == zfs_multihost_interval
+	 */
+	if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay)
+		import_delay = MAX(import_delay, import_intervals *
+		    ub->ub_mmp_delay * MAX(vdev_count_leaves(spa), 1));
+
+	/* Apply a floor using the local default values. */
+	import_delay = MAX(import_delay, import_intervals *
+	    MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)));
+
+	zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu import_intervals=%u "
+	    "leaves=%u", import_delay, ub->ub_mmp_delay, import_intervals,
+	    vdev_count_leaves(spa));
+
+	/* Add a small random factor in case of simultaneous imports (0-25%) */
+	import_expire = gethrtime() + import_delay +
+	    (import_delay * spa_get_random(250) / 1000);
+
+	while (gethrtime() < import_expire) {
+		vdev_uberblock_load(rvd, ub, &mmp_label);
+
+		if (txg != ub->ub_txg || timestamp != ub->ub_timestamp) {
+			error = SET_ERROR(EREMOTEIO);
+			break;
+		}
+
+		if (mmp_label) {
+			nvlist_free(mmp_label);
+			mmp_label = NULL;
+		}
+
+		error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
+		if (error != -1) {
+			error = SET_ERROR(EINTR);
+			break;
+		}
+		error = 0;
+	}
+
+out:
+	mutex_exit(&mtx);
+	mutex_destroy(&mtx);
+	cv_destroy(&cv);
+
+	/*
+	 * If the pool is determined to be active store the status in the
+	 * spa->spa_load_info nvlist.  If the remote hostname or hostid are
+	 * available from configuration read from disk store them as well.
+	 * This allows 'zpool import' to generate a more useful message.
+	 *
+	 * ZPOOL_CONFIG_MMP_STATE    - observed pool status (mandatory)
+	 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
+	 * ZPOOL_CONFIG_MMP_HOSTID   - hostid from the active pool
+	 */
+	if (error == EREMOTEIO) {
+		char *hostname = "<unknown>";
+		uint64_t hostid = 0;
+
+		if (mmp_label) {
+			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
+				hostname = fnvlist_lookup_string(mmp_label,
+				    ZPOOL_CONFIG_HOSTNAME);
+				fnvlist_add_string(spa->spa_load_info,
+				    ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
+			}
+
+			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
+				hostid = fnvlist_lookup_uint64(mmp_label,
+				    ZPOOL_CONFIG_HOSTID);
+				fnvlist_add_uint64(spa->spa_load_info,
+				    ZPOOL_CONFIG_MMP_HOSTID, hostid);
+			}
+		}
+
+		fnvlist_add_uint64(spa->spa_load_info,
+		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
+		fnvlist_add_uint64(spa->spa_load_info,
+		    ZPOOL_CONFIG_MMP_TXG, 0);
+
+		error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
+	}
+
+	if (mmp_label)
+		nvlist_free(mmp_label);
+
+	return (error);
+}
+
+static int
 spa_verify_host(spa_t *spa, nvlist_t *mos_config)
 {
 	uint64_t hostid;
@@ -2548,6 +2761,7 @@ spa_ld_select_uberblock(spa_t *spa, spa_import_type_t 
 	vdev_t *rvd = spa->spa_root_vdev;
 	nvlist_t *label;
 	uberblock_t *ub = &spa->spa_uberblock;
+	boolean_t activity_check = B_FALSE;
 
 	/*
 	 * If we are opening the checkpointed state of the pool by
@@ -2590,6 +2804,34 @@ spa_ld_select_uberblock(spa_t *spa, spa_import_type_t 
 	    (u_longlong_t)ub->ub_txg);
 
 	/*
+	 * For pools which have the multihost property on determine if the
+	 * pool is truly inactive and can be safely imported.  Prevent
+	 * hosts which don't have a hostid set from importing the pool.
+	 */
+	activity_check = spa_activity_check_required(spa, ub, label,
+	    spa->spa_config);
+	if (activity_check) {
+		if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
+		    spa_get_hostid() == 0) {
+			nvlist_free(label);
+			fnvlist_add_uint64(spa->spa_load_info,
+			    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
+			return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
+		}
+
+		int error = spa_activity_check(spa, ub, spa->spa_config);
+		if (error) {
+			nvlist_free(label);
+			return (error);
+		}
+
+		fnvlist_add_uint64(spa->spa_load_info,
+		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
+		fnvlist_add_uint64(spa->spa_load_info,
+		    ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
+	}
+
+	/*
 	 * If the pool has an unsupported version we can't open it.
 	 */
 	if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
@@ -3144,6 +3386,7 @@ spa_ld_get_props(spa_t *spa)
 		spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
 		spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
 		spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
+		spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
 		spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
 		    &spa->spa_dedup_ditto);
 
@@ -3232,6 +3475,18 @@ spa_ld_load_vdev_metadata(spa_t *spa)
 	vdev_t *rvd = spa->spa_root_vdev;
 
 	/*
+	 * If the 'multihost' property is set, then never allow a pool to
+	 * be imported when the system hostid is zero.  The exception to
+	 * this rule is zdb which is always allowed to access pools.
+	 */
+	if (spa_multihost(spa) && spa_get_hostid() == 0 &&
+	    (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
+		fnvlist_add_uint64(spa->spa_load_info,
+		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
+		return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
+	}
+
+	/*
 	 * If the 'autoreplace' property is set, then post a resource notifying
 	 * the ZFS DE that it should not issue any faults for unopenable
 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
@@ -3831,6 +4086,7 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, char
 		 */
 		spa->spa_sync_on = B_TRUE;
 		txg_sync_start(spa->spa_dsl_pool);
+		mmp_thread_start(spa);
 
 		/*
 		 * Wait for all claims to sync.  We sync up to the highest
@@ -4354,10 +4610,14 @@ spa_get_stats(const char *name, nvlist_t **config,
 			    ZPOOL_CONFIG_ERRCOUNT,
 			    spa_get_errlog_size(spa)) == 0);
 
-			if (spa_suspended(spa))
+			if (spa_suspended(spa)) {
 				VERIFY(nvlist_add_uint64(*config,
 				    ZPOOL_CONFIG_SUSPENDED,
 				    spa->spa_failmode) == 0);
+				VERIFY(nvlist_add_uint64(*config,
+				    ZPOOL_CONFIG_SUSPENDED_REASON,
+				    spa->spa_suspended) == 0);
+			}
 
 			spa_add_spares(spa, *config);
 			spa_add_l2cache(spa, *config);
@@ -4444,18 +4704,6 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, ui
 			goto out;
 		}
 
-		/*
-		 * The L2ARC currently only supports disk devices in
-		 * kernel context.  For user-level testing, we allow it.
-		 */
-#ifdef _KERNEL

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201911060846.xA68kOqc040866>