Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 4 Feb 2011 12:41:49 +0000 (UTC)
From:      Alexander Motin <mav@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r218265 - projects/graid/head/sys/geom/raid
Message-ID:  <201102041241.p14Cfnju081915@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mav
Date: Fri Feb  4 12:41:49 2011
New Revision: 218265
URL: http://svn.freebsd.org/changeset/base/218265

Log:
  Complete move control over volume startup process to the metadata modules.
  They must know number of disks in array or volume and they can easily
  thack number of present disks. No need to duplicate that functionality
  in places that have even less information to take decision.

Modified:
  projects/graid/head/sys/geom/raid/g_raid.c
  projects/graid/head/sys/geom/raid/g_raid.h
  projects/graid/head/sys/geom/raid/md_intel.c
  projects/graid/head/sys/geom/raid/tr_raid0.c
  projects/graid/head/sys/geom/raid/tr_raid1.c

Modified: projects/graid/head/sys/geom/raid/g_raid.c
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.c	Fri Feb  4 12:03:48 2011	(r218264)
+++ projects/graid/head/sys/geom/raid/g_raid.c	Fri Feb  4 12:41:49 2011	(r218265)
@@ -59,10 +59,10 @@ u_int g_raid_debug = 2;
 TUNABLE_INT("kern.geom.raid.debug", &g_raid_debug);
 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RW, &g_raid_debug, 0,
     "Debug level");
-u_int g_raid_start_timeout = 4;
+u_int g_raid_start_timeout = 15;
 TUNABLE_INT("kern.geom.raid.start_timeout", &g_raid_start_timeout);
 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, timeout, CTLFLAG_RW, &g_raid_start_timeout,
-    0, "Time to wait on all mirror components");
+    0, "Time to wait for all array components");
 static u_int g_raid_cleantime = 5;
 TUNABLE_INT("kern.geom.raid.cleantime", &g_raid_cleantime);
 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, cleantime, CTLFLAG_RW,
@@ -553,7 +553,9 @@ g_raid_nsubdisks(struct g_raid_volume *v
 	n = 0;
 	for (i = 0; i < vol->v_disks_count; i++) {
 		subdisk = &vol->v_subdisks[i];
-		if (subdisk->sd_state == state || state == -1)
+		if ((state == -1 &&
+		     subdisk->sd_state != G_RAID_SUBDISK_S_NONE) ||
+		    subdisk->sd_state == state)
 			n++;
 	}
 	return (n);
@@ -1338,19 +1340,6 @@ g_raid_destroy_provider(struct g_raid_vo
 	vol->v_provider = NULL;
 }
 
-static void
-g_raid_go(void *arg)
-{
-	struct g_raid_volume *vol;
-
-	vol = arg;
-	if (vol->v_starting) {
-		G_RAID_DEBUG(0, "Force volume %s start due to timeout.", vol->v_name);
-		g_raid_event_send(vol, G_RAID_VOLUME_E_START,
-		    G_RAID_EVENT_VOLUME);
-	}
-}
-
 /*
  * Update device state.
  */
@@ -1383,7 +1372,6 @@ g_raid_update_volume(struct g_raid_volum
 	/* Manage root mount release. */
 	if (vol->v_starting) {
 		vol->v_starting = 0;
-		callout_drain(&vol->v_start_co);
 		G_RAID_DEBUG(1, "root_mount_rel %p", vol->v_rootmount);
 		root_mount_rel(vol->v_rootmount);
 		vol->v_rootmount = NULL;
@@ -1585,9 +1573,6 @@ g_raid_create_volume(struct g_raid_softc
 	/* Delay root mounting. */
 	vol->v_rootmount = root_mount_hold("GRAID");
 	G_RAID_DEBUG(1, "root_mount_hold %p", vol->v_rootmount);
-	callout_init(&vol->v_start_co, 1);
-	callout_reset(&vol->v_start_co, g_raid_start_timeout * hz,
-	    g_raid_go, vol);
 	vol->v_starting = 1;
 	TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next);
 	return (vol);
@@ -1720,7 +1705,6 @@ g_raid_destroy_volume(struct g_raid_volu
 		return (EBUSY);
 	if (vol->v_rootmount)
 		root_mount_rel(vol->v_rootmount);
-	callout_drain(&vol->v_start_co);
 	g_topology_lock();
 	LIST_REMOVE(vol, v_global_next);
 	g_topology_unlock();

Modified: projects/graid/head/sys/geom/raid/g_raid.h
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.h	Fri Feb  4 12:03:48 2011	(r218264)
+++ projects/graid/head/sys/geom/raid/g_raid.h	Fri Feb  4 12:41:49 2011	(r218265)
@@ -231,8 +231,7 @@ struct g_raid_volume {
 	time_t			 v_last_write;	/* Time of the last write. */
 	u_int			 v_writes;	/* Number of active writes. */
 	struct root_hold_token	*v_rootmount;	/* Root mount delay token. */
-	struct callout		 v_start_co;	/* STARTING state timer. */
-	int			 v_starting;	/* STARTING state timer armed */
+	int			 v_starting;	/* Volume is starting */
 	int			 v_stopping;	/* Volume is stopping */
 	int			 v_provider_open; /* Number of opens. */
 	int			 v_global_id;	/* Global volume ID (rX). */

Modified: projects/graid/head/sys/geom/raid/md_intel.c
==============================================================================
--- projects/graid/head/sys/geom/raid/md_intel.c	Fri Feb  4 12:03:48 2011	(r218264)
+++ projects/graid/head/sys/geom/raid/md_intel.c	Fri Feb  4 12:41:49 2011	(r218265)
@@ -867,6 +867,11 @@ g_raid_md_intel_start(struct g_raid_soft
 	/* Pickup any STALE/SPARE disks to refill array if needed. */
 	g_raid_md_intel_refill(sc);
 
+	TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
+		g_raid_event_send(vol, G_RAID_VOLUME_E_START,
+		    G_RAID_EVENT_VOLUME);
+	}
+
 	callout_stop(&mdi->mdio_start_co);
 	G_RAID_DEBUG(1, "root_mount_rel %p", mdi->mdio_rootmount);
 	root_mount_rel(mdi->mdio_rootmount);
@@ -1446,6 +1451,9 @@ makedisk:
 
 		/* Pickup any STALE/SPARE disks to refill array if needed. */
 		g_raid_md_intel_refill(sc);
+
+		g_raid_event_send(vol, G_RAID_VOLUME_E_START,
+		    G_RAID_EVENT_VOLUME);
 		return (0);
 	}
 	if (strcmp(verb, "add") == 0) {
@@ -1611,6 +1619,9 @@ makedisk:
 
 		/* Write metadata based on created entities. */
 		g_raid_md_write_intel(md, NULL, NULL, NULL);
+
+		g_raid_event_send(vol, G_RAID_VOLUME_E_START,
+		    G_RAID_EVENT_VOLUME);
 		return (0);
 	}
 	if (strcmp(verb, "delete") == 0) {

Modified: projects/graid/head/sys/geom/raid/tr_raid0.c
==============================================================================
--- projects/graid/head/sys/geom/raid/tr_raid0.c	Fri Feb  4 12:03:48 2011	(r218264)
+++ projects/graid/head/sys/geom/raid/tr_raid0.c	Fri Feb  4 12:41:49 2011	(r218265)
@@ -101,6 +101,8 @@ g_raid_tr_update_state_raid0(struct g_ra
 	trs = (struct g_raid_tr_raid0_object *)vol->v_tr;
 	if (trs->trso_stopped)
 		s = G_RAID_VOLUME_S_STOPPED;
+	else if (trs->trso_starting)
+		s = G_RAID_VOLUME_S_STARTING;
 	else {
 		n = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
 		f = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_FAILED);
@@ -109,10 +111,7 @@ g_raid_tr_update_state_raid0(struct g_ra
 				s = G_RAID_VOLUME_S_OPTIMAL;
 			else
 				s = G_RAID_VOLUME_S_SUBOPTIMAL;
-			trs->trso_starting = 0;
-		} else if (trs->trso_starting)
-			s = G_RAID_VOLUME_S_STARTING;
-		else
+		} else
 			s = G_RAID_VOLUME_S_BROKEN;
 	}
 	if (s != vol->v_state) {

Modified: projects/graid/head/sys/geom/raid/tr_raid1.c
==============================================================================
--- projects/graid/head/sys/geom/raid/tr_raid1.c	Fri Feb  4 12:03:48 2011	(r218264)
+++ projects/graid/head/sys/geom/raid/tr_raid1.c	Fri Feb  4 12:41:49 2011	(r218265)
@@ -170,19 +170,16 @@ g_raid_tr_update_state_raid1(struct g_ra
 	if (trs->trso_stopping &&
 	    (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
 		s = G_RAID_VOLUME_S_STOPPED;
+	else if (trs->trso_starting)
+		s = G_RAID_VOLUME_S_STARTING;
 	else {
 		n = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
-		if (n == vol->v_disks_count) {
+		if (n == vol->v_disks_count)
 			s = G_RAID_VOLUME_S_OPTIMAL;
-			trs->trso_starting = 0;
-		} else {
-			if (trs->trso_starting)
-				s = G_RAID_VOLUME_S_STARTING;
-			else if (n > 0)
-				s = G_RAID_VOLUME_S_DEGRADED;
-			else
-				s = G_RAID_VOLUME_S_BROKEN;
-		}
+		else if (n > 0)
+			s = G_RAID_VOLUME_S_DEGRADED;
+		else
+			s = G_RAID_VOLUME_S_BROKEN;
 	}
 	g_raid_tr_raid1_maybe_rebuild(vol->v_tr, vol);
 	if (s != vol->v_state) {



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201102041241.p14Cfnju081915>