Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 18 Feb 2018 00:25:21 +0000 (UTC)
From:      Alexander Motin <mav@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org
Subject:   svn commit: r329492 - stable/11/cddl/contrib/opensolaris/lib/libzfs/common
Message-ID:  <201802180025.w1I0PLbn061544@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mav
Date: Sun Feb 18 00:25:21 2018
New Revision: 329492
URL: https://svnweb.freebsd.org/changeset/base/329492

Log:
  MFC r328250: MFV r328249:
  8641 "zpool clear" and "zinject" don't work on "spare" or "replacing" vdevs
  
  illumos/illumos-gate@2ba5f978a4f9b02da9db1b8cdd9ea5498eb00ad9
  
  https://www.illumos.org/issues/8641:
  "zpool clear" and "zinject -d" can both operate on specific vdevs, either
  leaf or interior. However, due to an oversight, neither works on a "spare"
  or "replacing" vdev. For example:
  
  sudo zpool create foo raidz1 c1t5000CCA000081D61d0 c1t5000CCA000186235d0 spare c
  1t5000CCA000094115d0
  sudo zpool replace foo c1t5000CCA000186235d0 c1t5000CCA000094115d0
  $ zpool status foo pool: foo
  state: ONLINE
  scan: resilvered 81.5K in 0h0m with 0 errors on Fri Sep 8 10:53:03 2017
  config:
  
  NAME                         STATE     READ WRITE CKSUM
          foo                          ONLINE       0     0     0
            raidz1-0                   ONLINE       0     0     0
              c1t5000CCA000081D61d0    ONLINE       0     0     0
              spare-1                  ONLINE       0     0     0
                c1t5000CCA000186235d0  ONLINE       0     0     0
                c1t5000CCA000094115d0  ONLINE       0     0     0
          spares
            c1t5000CCA000094115d0      INUSE     currently in use
  $ sudo zinject -d spare-1 -A degrade foo
  cannot find device 'spare-1' in pool 'foo'
  $ sudo zpool clear foo spare-1
  cannot clear errors for spare-1: no such device in pool
  
  Even though there was nothing to clear, those commands shouldn't have
  reported an error. by contrast, trying to clear "raidz1-0" works just fine:
  $ sudo zpool clear foo raidz1-0
  
  Reviewed by: Matthew Ahrens <mahrens@delphix.com>
  Approved by: Gordon Ross <gwr@nexenta.com>
  Author: Alan Somers <asomers@gmail.com>

Modified:
  stable/11/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c
==============================================================================
--- stable/11/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c	Sun Feb 18 00:24:31 2018	(r329491)
+++ stable/11/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c	Sun Feb 18 00:25:21 2018	(r329492)
@@ -50,6 +50,7 @@
 #include "zfeature_common.h"
 
 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
+static boolean_t zpool_vdev_is_interior(const char *name);
 
 #define	BACKUP_SLICE	"s2"
 
@@ -2065,10 +2066,7 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, bo
 				break;
 			}
 
-			verify(strncmp(type, VDEV_TYPE_RAIDZ,
-			    strlen(VDEV_TYPE_RAIDZ)) == 0 ||
-			    strncmp(type, VDEV_TYPE_MIRROR,
-			    strlen(VDEV_TYPE_MIRROR)) == 0);
+			verify(zpool_vdev_is_interior(type));
 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
 			    &id) == 0);
 
@@ -2175,10 +2173,13 @@ zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const
 /*
  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
  */
-boolean_t
+static boolean_t
 zpool_vdev_is_interior(const char *name)
 {
 	if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
+	    strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
+	    strncmp(name,
+	    VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
 	    strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
 		return (B_TRUE);
 	return (B_FALSE);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201802180025.w1I0PLbn061544>