Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 14 Jan 2011 16:42:13 +0000 (UTC)
From:      Alexander Motin <mav@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r217409 - projects/graid/head/sys/geom/raid
Message-ID:  <201101141642.p0EGgDsM094159@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mav
Date: Fri Jan 14 16:42:13 2011
New Revision: 217409
URL: http://svn.freebsd.org/changeset/base/217409

Log:
  Change disks, subdisks and volumes lists from LIST_ to TAILQ_. For now tail
  insertion fixes volumes and subdisks reordering on every metadata parse.

Modified:
  projects/graid/head/sys/geom/raid/g_raid.c
  projects/graid/head/sys/geom/raid/g_raid.h
  projects/graid/head/sys/geom/raid/md_intel.c

Modified: projects/graid/head/sys/geom/raid/g_raid.c
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.c	Fri Jan 14 16:30:22 2011	(r217408)
+++ projects/graid/head/sys/geom/raid/g_raid.c	Fri Jan 14 16:42:13 2011	(r217409)
@@ -455,7 +455,7 @@ g_raid_ndisks(struct g_raid_softc *sc, i
 	sx_assert(&sc->sc_lock, SX_LOCKED);
 
 	n = 0;
-	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 		if (disk->d_state == state || state == -1)
 			n++;
 	}
@@ -601,7 +601,7 @@ g_raid_bump_syncid(struct g_raid_softc *
 	sc->sc_syncid++;
 	G_RAID_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
 	    sc->sc_syncid);
-	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 		if (disk->d_state == G_RAID_DISK_S_ACTIVE ||
 		    disk->d_state == G_RAID_DISK_S_SYNCHRONIZING) {
 //			g_raid_update_metadata(disk);
@@ -625,7 +625,7 @@ g_raid_bump_genid(struct g_raid_softc *s
 	sc->sc_genid++;
 	G_RAID_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
 	    sc->sc_genid);
-	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 		if (disk->d_state == G_RAID_DISK_S_ACTIVE ||
 		    disk->d_state == G_RAID_DISK_S_SYNCHRONIZING) {
 			disk->d_genid = sc->sc_genid;
@@ -662,7 +662,7 @@ g_raid_idle(struct g_raid_volume *vol, i
 	}
 	vol->v_idle = 1;
 // ZZZ
-	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 		if (disk->d_state != G_RAID_DISK_S_ACTIVE)
 			continue;
 		G_RAID_DEBUG(1, "Disk %s (device %s) marked as clean.",
@@ -688,7 +688,7 @@ g_raid_unidle(struct g_raid_volume *vol)
 	vol->v_idle = 0;
 	vol->v_last_write = time_uptime;
 //ZZZ
-	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 		if (disk->d_state != G_RAID_DISK_S_ACTIVE)
 			continue;
 		G_RAID_DEBUG(1, "Disk %s (device %s) marked as dirty.",
@@ -1120,7 +1120,7 @@ g_raid_launch_provider(struct g_raid_vol
 	pp->stripesize = 0;
 	pp->stripeoffset = 0;
 #if 0
-	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 		if (disk->d_consumer && disk->d_consumer->provider &&
 		    disk->d_consumer->provider->stripesize > pp->stripesize) {
 			pp->stripesize = disk->d_consumer->provider->stripesize;
@@ -1312,8 +1312,8 @@ g_raid_create_node(struct g_class *mp,
 	sc->sc_md = md;
 	sc->sc_geom = gp;
 	sc->sc_flags = 0;
-	LIST_INIT(&sc->sc_volumes);
-	LIST_INIT(&sc->sc_disks);
+	TAILQ_INIT(&sc->sc_volumes);
+	TAILQ_INIT(&sc->sc_disks);
 	sx_init(&sc->sc_lock, "gmirror:lock");
 	mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
 	TAILQ_INIT(&sc->sc_events);
@@ -1361,7 +1361,7 @@ g_raid_create_volume(struct g_raid_softc
 	callout_reset(&vol->v_start_co, g_raid_start_timeout * hz,
 	    g_raid_go, vol);
 	vol->v_starting = 1;
-	LIST_INSERT_HEAD(&sc->sc_volumes, vol, v_next);
+	TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next);
 	return (vol);
 }
 
@@ -1374,8 +1374,8 @@ g_raid_create_disk(struct g_raid_softc *
 	disk = malloc(sizeof(*disk), M_RAID, M_WAITOK | M_ZERO);
 	disk->d_softc = sc;
 	disk->d_state = G_RAID_DISK_S_NONE;
-	LIST_INIT(&disk->d_subdisks);
-	LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
+	TAILQ_INIT(&disk->d_subdisks);
+	TAILQ_INSERT_TAIL(&sc->sc_disks, disk, d_next);
 	return (disk);
 }
 
@@ -1418,13 +1418,13 @@ g_raid_destroy_node(struct g_raid_softc 
 	int error = 0;
 
 	sc->sc_stopping = 1;
-	LIST_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tmpv) {
+	TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tmpv) {
 		if (g_raid_destroy_volume(vol))
 			error = EBUSY;
 	}
 	if (error)
 		return (error);
-	LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpd) {
+	TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpd) {
 		if (g_raid_destroy_disk(disk))
 			error = EBUSY;
 	}
@@ -1464,6 +1464,7 @@ int
 g_raid_destroy_volume(struct g_raid_volume *vol)
 {
 	struct g_raid_softc *sc;
+	struct g_raid_disk *disk;
 	int i;
 
 	sc = vol->v_softc;
@@ -1490,11 +1491,12 @@ g_raid_destroy_volume(struct g_raid_volu
 	if (vol->v_rootmount)
 		root_mount_rel(vol->v_rootmount);
 	callout_drain(&vol->v_start_co);
-	LIST_REMOVE(vol, v_next);
+	TAILQ_REMOVE(&sc->sc_volumes, vol, v_next);
 	for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
-		if (vol->v_subdisks[i].sd_disk == NULL)
+		disk = vol->v_subdisks[i].sd_disk;
+		if (disk == NULL)
 			continue;
-		LIST_REMOVE(&vol->v_subdisks[i], sd_next);
+		TAILQ_REMOVE(&disk->d_subdisks, &vol->v_subdisks[i], sd_next);
 	}
 	G_RAID_DEBUG(2, "Volume %s destroyed.", vol->v_name);
 	free(vol, M_RAID);
@@ -1517,13 +1519,13 @@ g_raid_destroy_disk(struct g_raid_disk *
 		g_topology_unlock();
 		disk->d_consumer = NULL;
 	}
-	LIST_FOREACH_SAFE(sd, &disk->d_subdisks, sd_next, tmp) {
+	TAILQ_FOREACH_SAFE(sd, &disk->d_subdisks, sd_next, tmp) {
 		g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
 		    G_RAID_EVENT_SUBDISK);
-		LIST_REMOVE(sd, sd_next);
+		TAILQ_REMOVE(&disk->d_subdisks, sd, sd_next);
 		sd->sd_disk = NULL;
 	}
-	LIST_REMOVE(disk, d_next);
+	TAILQ_REMOVE(&sc->sc_disks, disk, d_next);
 	if (sc->sc_md)
 		G_RAID_MD_FREE_DISK(sc->sc_md, disk);
 	free(disk, M_RAID);
@@ -1543,7 +1545,7 @@ g_raid_destroy(struct g_raid_softc *sc, 
 
 	/* Count open volumes. */
 	opens = 0;
-	LIST_FOREACH(vol, &sc->sc_volumes, v_next) {
+	TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
 		if (vol->v_provider_open != 0)
 			opens++;
 	}
@@ -1714,12 +1716,12 @@ g_raid_dumpconf(struct sbuf *sb, const c
 		sx_xlock(&sc->sc_lock);
 		sbuf_printf(sb, "%s<State>%s", indent,
 		    g_raid_disk_state2str(disk->d_state));
-		if (!LIST_EMPTY(&disk->d_subdisks)) {
+		if (!TAILQ_EMPTY(&disk->d_subdisks)) {
 			sbuf_printf(sb, " (");
-			LIST_FOREACH(sd, &disk->d_subdisks, sd_next) {
+			TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
 				sbuf_printf(sb, "%s",
 				    g_raid_subdisk_state2str(sd->sd_state));
-				if (LIST_NEXT(sd, sd_next))
+				if (TAILQ_NEXT(sd, sd_next))
 					sbuf_printf(sb, ", ");
 			}
 			sbuf_printf(sb, ")");
@@ -1734,9 +1736,9 @@ g_raid_dumpconf(struct sbuf *sb, const c
 			sbuf_printf(sb, "%s<Metadata>%s</Metadata>\n", indent,
 			    sc->sc_md->mdo_class->name);
 		}
-		if (!LIST_EMPTY(&sc->sc_volumes)) {
+		if (!TAILQ_EMPTY(&sc->sc_volumes)) {
 			s = 0xff;
-			LIST_FOREACH(vol, &sc->sc_volumes, v_next) {
+			TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
 				if (vol->v_state < s)
 					s = vol->v_state;
 			}

Modified: projects/graid/head/sys/geom/raid/g_raid.h
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.h	Fri Jan 14 16:30:22 2011	(r217408)
+++ projects/graid/head/sys/geom/raid/g_raid.h	Fri Jan 14 16:42:13 2011	(r217409)
@@ -115,8 +115,8 @@ struct g_raid_disk {
 	uint64_t		 d_flags;	/* Additional flags. */
 	u_int			 d_load;	/* Disk average load. */
 	off_t			 d_last_offset;	/* Last head offset. */
-	LIST_HEAD(, g_raid_subdisk)	 d_subdisks; /* List of subdisks. */
-	LIST_ENTRY(g_raid_disk)	 d_next;	/* Next disk in the node. */
+	TAILQ_HEAD(, g_raid_subdisk)	 d_subdisks; /* List of subdisks. */
+	TAILQ_ENTRY(g_raid_disk)	 d_next;	/* Next disk in the node. */
 };
 
 #define G_RAID_SUBDISK_S_NONE		0x00
@@ -137,7 +137,7 @@ struct g_raid_subdisk {
 	u_int			 sd_pos;	/* Position in volume. */
 	u_int			 sd_state;	/* Subdisk state. */
 	int			 sd_read_errs;  /* Count of the read errors */
-	LIST_ENTRY(g_raid_subdisk)	 sd_next; /* Next subdisk on disk. */
+	TAILQ_ENTRY(g_raid_subdisk)	 sd_next; /* Next subdisk on disk. */
 };
 
 #define G_RAID_MAX_SUBDISKS	16
@@ -205,15 +205,15 @@ struct g_raid_volume {
 	int			 v_starting;	/* STARTING state timer armed */
 	int			 v_stopping;	/* Volume is stopping */
 	int			 v_provider_open; /* Number of opens. */
-	LIST_ENTRY(g_raid_volume)	 v_next; /* List of volumes entry. */
+	TAILQ_ENTRY(g_raid_volume)	 v_next; /* List of volumes entry. */
 };
 
 struct g_raid_softc {
 	struct g_raid_md_object	*sc_md;		/* Metadata object. */
 	struct g_geom		*sc_geom;	/* GEOM class instance. */
 	uint64_t		 sc_flags;	/* Additional flags. */
-	LIST_HEAD(, g_raid_volume)	 sc_volumes;	/* List of volumes. */
-	LIST_HEAD(, g_raid_disk)	 sc_disks;	/* List of disks. */
+	TAILQ_HEAD(, g_raid_volume)	 sc_volumes;	/* List of volumes. */
+	TAILQ_HEAD(, g_raid_disk)	 sc_disks;	/* List of disks. */
 	struct sx		 sc_lock;	/* Main node lock. */
 	struct proc		*sc_worker;	/* Worker process. */
 	struct mtx		 sc_queue_mtx;	/* Worker queues lock. */

Modified: projects/graid/head/sys/geom/raid/md_intel.c
==============================================================================
--- projects/graid/head/sys/geom/raid/md_intel.c	Fri Jan 14 16:30:22 2011	(r217408)
+++ projects/graid/head/sys/geom/raid/md_intel.c	Fri Jan 14 16:42:13 2011	(r217409)
@@ -452,7 +452,7 @@ g_raid_md_intel_get_disk(struct g_raid_s
 	struct g_raid_disk	*disk;
 	struct g_raid_md_intel_perdisk *pd;
 
-	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 		pd = (struct g_raid_md_intel_perdisk *)disk->d_md_data;
 		if (pd->pd_disk_pos == id)
 			break;
@@ -465,7 +465,7 @@ g_raid_md_intel_get_volume(struct g_raid
 {
 	struct g_raid_volume	*mvol;
 
-	LIST_FOREACH(mvol, &sc->sc_volumes, v_next) {
+	TAILQ_FOREACH(mvol, &sc->sc_volumes, v_next) {
 		if ((intptr_t)(mvol->v_md_data) == id)
 			break;
 	}
@@ -519,7 +519,7 @@ g_raid_md_intel_start_disk(struct g_raid
 
 	/* Welcome the "new" disk. */
 	g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
-	LIST_FOREACH(sd, &disk->d_subdisks, sd_next) {
+	TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
 		g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
 		    G_RAID_EVENT_SUBDISK);
 	}
@@ -593,12 +593,12 @@ g_raid_md_intel_start(struct g_raid_soft
 			vol = g_raid_md_intel_get_volume(sc, i);
 			sd = &vol->v_subdisks[j];
 			sd->sd_disk = disk;
-			LIST_INSERT_HEAD(&disk->d_subdisks, sd, sd_next);
+			TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
 		}
 	}
 
 	/* Make existing disks take their places. */
-	LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpdisk) {
+	TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpdisk) {
 		if (disk->d_state == G_RAID_DISK_S_NONE)
 			g_raid_md_intel_start_disk(disk);
 	}
@@ -860,7 +860,7 @@ g_raid_md_event_intel(struct g_raid_md_o
 				g_topology_unlock();
 				disk->d_consumer = NULL;
 			}
-			LIST_FOREACH(sd, &disk->d_subdisks, sd_next) {
+			TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
 				g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
 				    G_RAID_EVENT_SUBDISK);
 			}
@@ -1054,13 +1054,13 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 		g_raid_start_volume(vol);
 
 		/* , and subdisks. */
-		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+		TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 			pd = (struct g_raid_md_intel_perdisk *)disk->d_md_data;
 			sd = &vol->v_subdisks[pd->pd_disk_pos];
 			sd->sd_disk = disk;
 			sd->sd_offset = 0;
 			sd->sd_size = size;
-			LIST_INSERT_HEAD(&disk->d_subdisks, sd, sd_next);
+			TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
 			g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
 			g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
 			    G_RAID_EVENT_SUBDISK);
@@ -1086,7 +1086,7 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 			if (strncmp(diskname, "/dev/", 5) == 0)
 				diskname += 5;
 
-			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+			TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 				if (disk->d_consumer != NULL && 
 				    disk->d_consumer->provider != NULL &&
 				    strcmp(disk->d_consumer->provider->name,
@@ -1113,7 +1113,7 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 					g_topology_unlock();
 					disk->d_consumer = NULL;
 				}
-				LIST_FOREACH(sd, &disk->d_subdisks, sd_next) {
+				TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
 					g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
 					    G_RAID_EVENT_SUBDISK);
 				}
@@ -1140,7 +1140,7 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 		}
 		for (i = 1; i < *nargs; i++) {
 			/* Look for empty disk slot. */
-			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+			TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 				pd = (struct g_raid_md_intel_perdisk *)
 				    disk->d_md_data;
 				if (pd->pd_disk_pos < 0)
@@ -1196,7 +1196,7 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 			g_topology_unlock();
 
 			/* Make sure disk is big enough. */
-			LIST_FOREACH(sd, &disk->d_subdisks, sd_next) {
+			TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
 				if (sd->sd_offset + sd->sd_size + 4096 >
 				    pp->mediasize) {
 					gctl_error(req,
@@ -1236,7 +1236,7 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 
 			/* Welcome the "new" disk. */
 			g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
-			LIST_FOREACH(sd, &disk->d_subdisks, sd_next) {
+			TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
 				g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
 				    G_RAID_EVENT_SUBDISK);
 			}
@@ -1273,7 +1273,7 @@ g_raid_md_write_intel(struct g_raid_md_o
 
 	/* Count number of disks. */
 	numdisks = 0;
-	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 		pd = (struct g_raid_md_intel_perdisk *)disk->d_md_data;
 		if (pd->pd_disk_pos < 0)
 			continue;
@@ -1304,7 +1304,7 @@ g_raid_md_write_intel(struct g_raid_md_o
 	meta->generation = mdi->mdio_generation;
 	meta->attributes = INTEL_ATTR_CHECKSUM;
 	meta->total_disks = numdisks;
-	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 		pd = (struct g_raid_md_intel_perdisk *)disk->d_md_data;
 		if (pd->pd_disk_pos < 0)
 			continue;
@@ -1314,7 +1314,7 @@ g_raid_md_write_intel(struct g_raid_md_o
 	/* Fill volumes and maps. */
 	vi = 0;
 	version = INTEL_VERSION_1000;
-	LIST_FOREACH(vol, &sc->sc_volumes, v_next) {
+	TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
 		mvol = intel_get_volume(meta, vi);
 		mmap = intel_get_map(mvol, 0);
 
@@ -1405,7 +1405,7 @@ g_raid_md_write_intel(struct g_raid_md_o
 	if (mdi->mdio_meta != NULL)
 		free(mdi->mdio_meta, M_MD_INTEL);
 	mdi->mdio_meta = meta;
-	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
+	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 		pd = (struct g_raid_md_intel_perdisk *)disk->d_md_data;
 		if (disk->d_state != G_RAID_DISK_S_ACTIVE)
 			continue;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201101141642.p0EGgDsM094159>