Date: Tue, 31 Jul 2018 18:49:11 +0000 (UTC) From: Alexander Motin <mav@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org Subject: svn commit: r336991 - vendor-sys/illumos/dist/uts/common vendor-sys/illumos/dist/uts/common/fs/zfs vendor-sys/illumos/dist/uts/common/fs/zfs/sys vendor-sys/illumos/dist/uts/common/sys/fs vendor/ill... Message-ID: <201807311849.w6VInBQN065564@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mav Date: Tue Jul 31 18:49:07 2018 New Revision: 336991 URL: https://svnweb.freebsd.org/changeset/base/336991 Log: 9102 zfs should be able to initialize storage devices The first access to a disk block can incur a performance penalty on some platforms (e.g. AWS's EBS, VMware VMDKs). Therefore it is recommended that volumes be "thick provisioned", where supported by the platform (VMware). Thick provisioning is time consuming and often is ignored. If the thick provision step is omitted, customers will see suboptimal performance until we have written to all parts of the LUN. ZFS should be able to initialize any unused storage to remove any first-write penalty that exists. illumos/illumos-gate@094e47e980b0796b94b1b8f51f462a64d246e516 Reviewed by: John Wren Kennedy <john.kennedy@delphix.com> Reviewed by: Matthew Ahrens <mahrens@delphix.com> Reviewed by: Pavel Zakharov <pavel.zakharov@delphix.com> Reviewed by: Prakash Surya <prakash.surya@delphix.com> Approved by: Richard Lowe <richlowe@richlowe.net> Modified: vendor/illumos/dist/cmd/zpool/zpool_main.c vendor/illumos/dist/cmd/ztest/ztest.c vendor/illumos/dist/lib/libzfs/common/libzfs.h vendor/illumos/dist/lib/libzfs/common/libzfs_pool.c vendor/illumos/dist/lib/libzfs/common/libzfs_util.c vendor/illumos/dist/lib/libzfs_core/common/libzfs_core.c vendor/illumos/dist/lib/libzfs_core/common/libzfs_core.h vendor/illumos/dist/man/man1m/zpool.1m Changes in other areas also in this revision: Modified: vendor-sys/illumos/dist/uts/common/Makefile.files vendor-sys/illumos/dist/uts/common/fs/zfs/metaslab.c vendor-sys/illumos/dist/uts/common/fs/zfs/spa.c vendor-sys/illumos/dist/uts/common/fs/zfs/spa_misc.c vendor-sys/illumos/dist/uts/common/fs/zfs/sys/metaslab_impl.h vendor-sys/illumos/dist/uts/common/fs/zfs/sys/spa.h vendor-sys/illumos/dist/uts/common/fs/zfs/sys/vdev_impl.h vendor-sys/illumos/dist/uts/common/fs/zfs/sys/zio_priority.h vendor-sys/illumos/dist/uts/common/fs/zfs/vdev.c vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_disk.c vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_file.c vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_indirect.c vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_mirror.c vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_missing.c vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_queue.c vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_raidz.c vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_removal.c vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_root.c vendor-sys/illumos/dist/uts/common/fs/zfs/zfs_ioctl.c vendor-sys/illumos/dist/uts/common/sys/fs/zfs.h Modified: vendor/illumos/dist/cmd/zpool/zpool_main.c ============================================================================== --- vendor/illumos/dist/cmd/zpool/zpool_main.c Tue Jul 31 18:32:57 2018 (r336990) +++ vendor/illumos/dist/cmd/zpool/zpool_main.c Tue Jul 31 18:49:07 2018 (r336991) @@ -84,6 +84,7 @@ static int zpool_do_detach(int, char **); static int zpool_do_replace(int, char **); static int zpool_do_split(int, char **); +static int zpool_do_initialize(int, char **); static int zpool_do_scrub(int, char **); static int zpool_do_import(int, char **); @@ -133,6 +134,7 @@ typedef enum { HELP_ONLINE, HELP_REPLACE, HELP_REMOVE, + HELP_INITIALIZE, HELP_SCRUB, HELP_STATUS, HELP_UPGRADE, @@ -184,6 +186,7 @@ static zpool_command_t command_table[] = { { "replace", zpool_do_replace, HELP_REPLACE }, { "split", zpool_do_split, HELP_SPLIT }, { NULL }, + { "initialize", zpool_do_initialize, HELP_INITIALIZE }, { "scrub", zpool_do_scrub, HELP_SCRUB }, { NULL }, { "import", zpool_do_import, HELP_IMPORT }, @@ -257,6 +260,8 @@ get_usage(zpool_help_t idx) return (gettext("\tremove [-nps] <pool> <device> ...\n")); case HELP_REOPEN: return (gettext("\treopen <pool>\n")); + case HELP_INITIALIZE: + return (gettext("\tinitialize [-cs] <pool> [<device> ...]\n")); case HELP_SCRUB: return (gettext("\tscrub [-s | -p] <pool> ...\n")); case HELP_STATUS: @@ -1589,6 +1594,43 @@ print_status_config(zpool_handle_t *zhp, const char *n "resilvering" : "repairing"); } + if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE || + vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED || + vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) && + !vs->vs_scan_removing) { + char zbuf[1024]; + char tbuf[256]; + struct tm zaction_ts; + + time_t t = vs->vs_initialize_action_time; + int initialize_pct = 100; + if (vs->vs_initialize_state != VDEV_INITIALIZE_COMPLETE) { + initialize_pct = (vs->vs_initialize_bytes_done * 100 / + (vs->vs_initialize_bytes_est + 1)); + } + + (void) localtime_r(&t, &zaction_ts); + (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts); + + switch (vs->vs_initialize_state) { + case VDEV_INITIALIZE_SUSPENDED: + (void) snprintf(zbuf, sizeof (zbuf), + ", suspended, started at %s", tbuf); + break; + case VDEV_INITIALIZE_ACTIVE: + (void) snprintf(zbuf, sizeof (zbuf), + ", started at %s", tbuf); + break; + case VDEV_INITIALIZE_COMPLETE: + (void) snprintf(zbuf, sizeof (zbuf), + ", completed at %s", tbuf); + break; + } + + (void) printf(gettext(" (%d%% initialized%s)"), + initialize_pct, zbuf); + } + (void) printf("\n"); for (c = 0; c < children; c++) { @@ -4163,6 +4205,119 @@ zpool_do_scrub(int argc, char **argv) } return (for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb)); +} + +static void +zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res) +{ + uint_t children = 0; + nvlist_t **child; + uint_t i; + + (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + &child, &children); + + if (children == 0) { + char *path = zpool_vdev_name(g_zfs, zhp, nvroot, B_FALSE); + fnvlist_add_boolean(res, path); + free(path); + return; + } + + for (i = 0; i < children; i++) { + zpool_collect_leaves(zhp, child[i], res); + } +} + +/* + * zpool initialize [-cs] <pool> [<vdev> ...] + * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool + * if none specified. + * + * -c Cancel. Ends active initializing. + * -s Suspend. Initializing can then be restarted with no flags. + */ +int +zpool_do_initialize(int argc, char **argv) +{ + int c; + char *poolname; + zpool_handle_t *zhp; + nvlist_t *vdevs; + int err = 0; + + struct option long_options[] = { + {"cancel", no_argument, NULL, 'c'}, + {"suspend", no_argument, NULL, 's'}, + {0, 0, 0, 0} + }; + + pool_initialize_func_t cmd_type = POOL_INITIALIZE_DO; + while ((c = getopt_long(argc, argv, "cs", long_options, NULL)) != -1) { + switch (c) { + case 'c': + if (cmd_type != POOL_INITIALIZE_DO) { + (void) fprintf(stderr, gettext("-c cannot be " + "combined with other options\n")); + usage(B_FALSE); + } + cmd_type = POOL_INITIALIZE_CANCEL; + break; + case 's': + if (cmd_type != POOL_INITIALIZE_DO) { + (void) fprintf(stderr, gettext("-s cannot be " + "combined with other options\n")); + usage(B_FALSE); + } + cmd_type = POOL_INITIALIZE_SUSPEND; + break; + case '?': + if (optopt != 0) { + (void) fprintf(stderr, + gettext("invalid option '%c'\n"), optopt); + } else { + (void) fprintf(stderr, + gettext("invalid option '%s'\n"), + argv[optind - 1]); + } + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool name argument\n")); + usage(B_FALSE); + return (-1); + } + + poolname = argv[0]; + zhp = zpool_open(g_zfs, poolname); + if (zhp == NULL) + return (-1); + + vdevs = fnvlist_alloc(); + if (argc == 1) { + /* no individual leaf vdevs specified, so add them all */ + nvlist_t *config = zpool_get_config(zhp, NULL); + nvlist_t *nvroot = fnvlist_lookup_nvlist(config, + ZPOOL_CONFIG_VDEV_TREE); + zpool_collect_leaves(zhp, nvroot, vdevs); + } else { + int i; + for (i = 1; i < argc; i++) { + fnvlist_add_boolean(vdevs, argv[i]); + } + } + + err = zpool_initialize(zhp, cmd_type, vdevs); + + fnvlist_free(vdevs); + zpool_close(zhp); + + return (err); } typedef struct status_cbdata { Modified: vendor/illumos/dist/cmd/ztest/ztest.c ============================================================================== --- vendor/illumos/dist/cmd/ztest/ztest.c Tue Jul 31 18:32:57 2018 (r336990) +++ vendor/illumos/dist/cmd/ztest/ztest.c Tue Jul 31 18:49:07 2018 (r336991) @@ -103,6 +103,7 @@ #include <sys/zil_impl.h> #include <sys/vdev_impl.h> #include <sys/vdev_file.h> +#include <sys/vdev_initialize.h> #include <sys/spa_impl.h> #include <sys/metaslab_impl.h> #include <sys/dsl_prop.h> @@ -346,6 +347,7 @@ ztest_func_t ztest_spa_upgrade; ztest_func_t ztest_device_removal; ztest_func_t ztest_remap_blocks; ztest_func_t ztest_spa_checkpoint_create_discard; +ztest_func_t ztest_initialize; uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ @@ -389,7 +391,8 @@ ztest_info_t ztest_info[] = { &ztest_opts.zo_vdevtime }, { ztest_device_removal, 1, &zopt_sometimes }, { ztest_remap_blocks, 1, &zopt_sometimes }, - { ztest_spa_checkpoint_create_discard, 1, &zopt_rarely } + { ztest_spa_checkpoint_create_discard, 1, &zopt_rarely }, + { ztest_initialize, 1, &zopt_sometimes } }; #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) @@ -5468,6 +5471,97 @@ ztest_spa_rename(ztest_ds_t *zd, uint64_t id) umem_free(newname, strlen(newname) + 1); rw_exit(&ztest_name_lock); +} + +static vdev_t * +ztest_random_concrete_vdev_leaf(vdev_t *vd) +{ + if (vd == NULL) + return (NULL); + + if (vd->vdev_children == 0) + return (vd); + + vdev_t *eligible[vd->vdev_children]; + int eligible_idx = 0, i; + for (i = 0; i < vd->vdev_children; i++) { + vdev_t *cvd = vd->vdev_child[i]; + if (cvd->vdev_top->vdev_removing) + continue; + if (cvd->vdev_children > 0 || + (vdev_is_concrete(cvd) && !cvd->vdev_detached)) { + eligible[eligible_idx++] = cvd; + } + } + VERIFY(eligible_idx > 0); + + uint64_t child_no = ztest_random(eligible_idx); + return (ztest_random_concrete_vdev_leaf(eligible[child_no])); +} + +/* ARGSUSED */ +void +ztest_initialize(ztest_ds_t *zd, uint64_t id) +{ + spa_t *spa = ztest_spa; + int error = 0; + + mutex_enter(&ztest_vdev_lock); + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + + /* Random leaf vdev */ + vdev_t *rand_vd = ztest_random_concrete_vdev_leaf(spa->spa_root_vdev); + if (rand_vd == NULL) { + spa_config_exit(spa, SCL_VDEV, FTAG); + mutex_exit(&ztest_vdev_lock); + return; + } + + /* + * The random vdev we've selected may change as soon as we + * drop the spa_config_lock. We create local copies of things + * we're interested in. + */ + uint64_t guid = rand_vd->vdev_guid; + char *path = strdup(rand_vd->vdev_path); + boolean_t active = rand_vd->vdev_initialize_thread != NULL; + + zfs_dbgmsg("vd %p, guid %llu", rand_vd, guid); + spa_config_exit(spa, SCL_VDEV, FTAG); + + uint64_t cmd = ztest_random(POOL_INITIALIZE_FUNCS); + error = spa_vdev_initialize(spa, guid, cmd); + switch (cmd) { + case POOL_INITIALIZE_CANCEL: + if (ztest_opts.zo_verbose >= 4) { + (void) printf("Cancel initialize %s", path); + if (!active) + (void) printf(" failed (no initialize active)"); + (void) printf("\n"); + } + break; + case POOL_INITIALIZE_DO: + if (ztest_opts.zo_verbose >= 4) { + (void) printf("Start initialize %s", path); + if (active && error == 0) + (void) printf(" failed (already active)"); + else if (error != 0) + (void) printf(" failed (error %d)", error); + (void) printf("\n"); + } + break; + case POOL_INITIALIZE_SUSPEND: + if (ztest_opts.zo_verbose >= 4) { + (void) printf("Suspend initialize %s", path); + if (!active) + (void) printf(" failed (no initialize active)"); + (void) printf("\n"); + } + break; + } + free(path); + mutex_exit(&ztest_vdev_lock); } /* Modified: vendor/illumos/dist/lib/libzfs/common/libzfs.h ============================================================================== --- vendor/illumos/dist/lib/libzfs/common/libzfs.h Tue Jul 31 18:32:57 2018 (r336990) +++ vendor/illumos/dist/lib/libzfs/common/libzfs.h Tue Jul 31 18:49:07 2018 (r336991) @@ -136,6 +136,9 @@ typedef enum zfs_error { EZFS_NO_CHECKPOINT, /* pool has no checkpoint */ EZFS_DEVRM_IN_PROGRESS, /* a device is currently being removed */ EZFS_VDEV_TOO_BIG, /* a device is too big to be used */ + EZFS_TOOMANY, /* argument list too long */ + EZFS_INITIALIZING, /* currently initializing */ + EZFS_NO_INITIALIZE, /* no active initialize */ EZFS_UNKNOWN } zfs_error_t; @@ -260,6 +263,8 @@ typedef struct splitflags { * Functions to manipulate pool and vdev state */ extern int zpool_scan(zpool_handle_t *, pool_scan_func_t, pool_scrub_cmd_t); +extern int zpool_initialize(zpool_handle_t *, pool_initialize_func_t, + nvlist_t *); extern int zpool_clear(zpool_handle_t *, const char *, nvlist_t *); extern int zpool_reguid(zpool_handle_t *); extern int zpool_reopen(zpool_handle_t *); Modified: vendor/illumos/dist/lib/libzfs/common/libzfs_pool.c ============================================================================== --- vendor/illumos/dist/lib/libzfs/common/libzfs_pool.c Tue Jul 31 18:32:57 2018 (r336990) +++ vendor/illumos/dist/lib/libzfs/common/libzfs_pool.c Tue Jul 31 18:49:07 2018 (r336991) @@ -1969,6 +1969,100 @@ zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, } } +static int +xlate_init_err(int err) +{ + switch (err) { + case ENODEV: + return (EZFS_NODEVICE); + case EINVAL: + case EROFS: + return (EZFS_BADDEV); + case EBUSY: + return (EZFS_INITIALIZING); + case ESRCH: + return (EZFS_NO_INITIALIZE); + } + return (err); +} + +/* + * Begin, suspend, or cancel the initialization (initializing of all free + * blocks) for the given vdevs in the given pool. + */ +int +zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, + nvlist_t *vds) +{ + char msg[1024]; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + nvlist_t *errlist; + + /* translate vdev names to guids */ + nvlist_t *vdev_guids = fnvlist_alloc(); + nvlist_t *guids_to_paths = fnvlist_alloc(); + boolean_t spare, cache; + nvlist_t *tgt; + nvpair_t *elem; + + for (elem = nvlist_next_nvpair(vds, NULL); elem != NULL; + elem = nvlist_next_nvpair(vds, elem)) { + char *vd_path = nvpair_name(elem); + tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache, NULL); + + if ((tgt == NULL) || cache || spare) { + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot initialize '%s'"), + vd_path); + int err = (tgt == NULL) ? EZFS_NODEVICE : + (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE); + fnvlist_free(vdev_guids); + fnvlist_free(guids_to_paths); + return (zfs_error(hdl, err, msg)); + } + + uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); + fnvlist_add_uint64(vdev_guids, vd_path, guid); + + (void) snprintf(msg, sizeof (msg), "%llu", guid); + fnvlist_add_string(guids_to_paths, msg, vd_path); + } + + int err = lzc_initialize(zhp->zpool_name, cmd_type, vdev_guids, + &errlist); + fnvlist_free(vdev_guids); + + if (err == 0) { + fnvlist_free(guids_to_paths); + return (0); + } + + nvlist_t *vd_errlist = NULL; + if (errlist != NULL) { + vd_errlist = fnvlist_lookup_nvlist(errlist, + ZPOOL_INITIALIZE_VDEVS); + } + + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "operation failed")); + + for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL; + elem = nvlist_next_nvpair(vd_errlist, elem)) { + int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem)); + char *path = fnvlist_lookup_string(guids_to_paths, + nvpair_name(elem)); + (void) zfs_error_fmt(hdl, vd_error, "cannot initialize '%s'", + path); + } + + fnvlist_free(guids_to_paths); + if (vd_errlist != NULL) + return (-1); + + return (zpool_standard_error(hdl, err, msg)); +} + /* * This provides a very minimal check whether a given string is likely a * c#t#d# style string. Users of this are expected to do their own Modified: vendor/illumos/dist/lib/libzfs/common/libzfs_util.c ============================================================================== --- vendor/illumos/dist/lib/libzfs/common/libzfs_util.c Tue Jul 31 18:32:57 2018 (r336990) +++ vendor/illumos/dist/lib/libzfs/common/libzfs_util.c Tue Jul 31 18:49:07 2018 (r336991) @@ -249,6 +249,13 @@ libzfs_error_description(libzfs_handle_t *hdl) return (dgettext(TEXT_DOMAIN, "device removal in progress")); case EZFS_VDEV_TOO_BIG: return (dgettext(TEXT_DOMAIN, "device exceeds supported size")); + case EZFS_TOOMANY: + return (dgettext(TEXT_DOMAIN, "argument list too long")); + case EZFS_INITIALIZING: + return (dgettext(TEXT_DOMAIN, "currently initializing")); + case EZFS_NO_INITIALIZE: + return (dgettext(TEXT_DOMAIN, "there is no active " + "initialization")); case EZFS_UNKNOWN: return (dgettext(TEXT_DOMAIN, "unknown error")); default: Modified: vendor/illumos/dist/lib/libzfs_core/common/libzfs_core.c ============================================================================== --- vendor/illumos/dist/lib/libzfs_core/common/libzfs_core.c Tue Jul 31 18:32:57 2018 (r336990) +++ vendor/illumos/dist/lib/libzfs_core/common/libzfs_core.c Tue Jul 31 18:49:07 2018 (r336991) @@ -1038,3 +1038,40 @@ lzc_channel_program_nosync(const char *pool, const cha return (lzc_channel_program_impl(pool, program, B_FALSE, timeout, memlimit, argnvl, outnvl)); } + +/* + * Changes initializing state. + * + * vdevs should be a list of (<key>, guid) where guid is a uint64 vdev GUID. + * The key is ignored. + * + * If there are errors related to vdev arguments, per-vdev errors are returned + * in an nvlist with the key "vdevs". Each error is a (guid, errno) pair where + * guid is stringified with PRIu64, and errno is one of the following as + * an int64_t: + * - ENODEV if the device was not found + * - EINVAL if the devices is not a leaf or is not concrete (e.g. missing) + * - EROFS if the device is not writeable + * - EBUSY start requested but the device is already being initialized + * - ESRCH cancel/suspend requested but device is not being initialized + * + * If the errlist is empty, then return value will be: + * - EINVAL if one or more arguments was invalid + * - Other spa_open failures + * - 0 if the operation succeeded + */ +int +lzc_initialize(const char *poolname, pool_initialize_func_t cmd_type, + nvlist_t *vdevs, nvlist_t **errlist) +{ + int error; + nvlist_t *args = fnvlist_alloc(); + fnvlist_add_uint64(args, ZPOOL_INITIALIZE_COMMAND, (uint64_t)cmd_type); + fnvlist_add_nvlist(args, ZPOOL_INITIALIZE_VDEVS, vdevs); + + error = lzc_ioctl(ZFS_IOC_POOL_INITIALIZE, poolname, args, errlist); + + fnvlist_free(args); + + return (error); +} Modified: vendor/illumos/dist/lib/libzfs_core/common/libzfs_core.h ============================================================================== --- vendor/illumos/dist/lib/libzfs_core/common/libzfs_core.h Tue Jul 31 18:32:57 2018 (r336990) +++ vendor/illumos/dist/lib/libzfs_core/common/libzfs_core.h Tue Jul 31 18:49:07 2018 (r336991) @@ -31,7 +31,9 @@ #include <libnvpair.h> #include <sys/param.h> #include <sys/types.h> +#include <sys/fs/zfs.h> + #ifdef __cplusplus extern "C" { #endif @@ -56,6 +58,8 @@ int lzc_destroy_snaps(nvlist_t *, boolean_t, nvlist_t int lzc_bookmark(nvlist_t *, nvlist_t **); int lzc_get_bookmarks(const char *, nvlist_t *, nvlist_t **); int lzc_destroy_bookmarks(nvlist_t *, nvlist_t **); +int lzc_initialize(const char *, pool_initialize_func_t, nvlist_t *, + nvlist_t **); int lzc_snaprange_space(const char *, const char *, uint64_t *); Modified: vendor/illumos/dist/man/man1m/zpool.1m ============================================================================== --- vendor/illumos/dist/man/man1m/zpool.1m Tue Jul 31 18:32:57 2018 (r336990) +++ vendor/illumos/dist/man/man1m/zpool.1m Tue Jul 31 18:49:07 2018 (r336991) @@ -105,6 +105,11 @@ .Ar pool Ns | Ns Ar id .Op Ar newpool .Nm +.Cm initialize +.Op Fl cs +.Ar pool +.Op Ar device Ns ... +.Nm .Cm iostat .Op Fl v .Op Fl T Sy u Ns | Ns Sy d @@ -1333,6 +1338,32 @@ mounting option is enabled. In this case, the checkpointed state of the pool is opened and an administrator can see how the pool would look like if they were to fully rewind. +.El +.It Xo +.Nm +.Cm initialize +.Op Fl cs +.Ar pool +.Op Ar device Ns ... +.Xc +Begins initializing by writing to all unallocated regions on the specified +devices, or all eligible devices in the pool if no individual devices are +specified. +Only leaf data or log devices may be initialized. +.Bl -tag -width Ds +.It Fl c, -cancel +Cancel initializing on the specified devices, or all eligible devices if none +are specified. +If one or more target devices are invalid or are not currently being +initialized, the command will fail and no cancellation will occur on any device. +.It Fl s -suspend +Suspend initializing on the specified devices, or all eligible devices if none +are specified. +If one or more target devices are invalid or are not currently being +initialized, the command will fail and no suspension will occur on any device. +Initializing can then be resumed by running +.Nm zpool Cm initialize +with no flags on the relevant target devices. .El .It Xo .Nm
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201807311849.w6VInBQN065564>