From owner-svn-src-projects@FreeBSD.ORG Wed Mar 23 15:43:31 2011 Return-Path: Delivered-To: svn-src-projects@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 5A5771065670; Wed, 23 Mar 2011 15:43:31 +0000 (UTC) (envelope-from mav@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 4830B8FC14; Wed, 23 Mar 2011 15:43:31 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id p2NFhVQ4099058; Wed, 23 Mar 2011 15:43:31 GMT (envelope-from mav@svn.freebsd.org) Received: (from mav@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id p2NFhV4R099056; Wed, 23 Mar 2011 15:43:31 GMT (envelope-from mav@svn.freebsd.org) Message-Id: <201103231543.p2NFhV4R099056@svn.freebsd.org> From: Alexander Motin Date: Wed, 23 Mar 2011 15:43:31 +0000 (UTC) To: src-committers@freebsd.org, svn-src-projects@freebsd.org X-SVN-Group: projects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r219910 - projects/graid/head/sys/geom/raid X-BeenThere: svn-src-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the src " projects" tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 23 Mar 2011 15:43:31 -0000 Author: mav Date: Wed Mar 23 15:43:31 2011 New Revision: 219910 URL: http://svn.freebsd.org/changeset/base/219910 Log: As soon as NVidia metadata have no generation numbers, bump volume ID each time array started incomplete after timeout waiting or when one of disks was hot-disconnected. If lost disk reappear later, it won't corrupt data, but will be reported as separate array. Windows driver does the same. As soon as volume ID is not persistent now (in addition to being too long) and so not very suitable for management, use sequentional numbering for generating GEOM node names. Modified: projects/graid/head/sys/geom/raid/md_nvidia.c Modified: projects/graid/head/sys/geom/raid/md_nvidia.c ============================================================================== --- projects/graid/head/sys/geom/raid/md_nvidia.c Wed Mar 23 15:22:59 2011 (r219909) +++ projects/graid/head/sys/geom/raid/md_nvidia.c Wed Mar 23 15:43:31 2011 (r219910) @@ -144,6 +144,8 @@ static struct g_raid_md_class g_raid_md_ .mdc_priority = 100 }; +static int NVidiaNodeID = 1; + static void g_raid_md_nvidia_print(struct nvidia_raid_conf *meta) { @@ -420,7 +422,7 @@ g_raid_md_nvidia_start_disk(struct g_rai /* Find disk position in metadata by it's serial. */ if (pd->pd_meta != NULL) { disk_pos = pd->pd_meta->disk_number; - if (disk_pos >= meta->total_disks) + if (disk_pos >= meta->total_disks || mdi->mdio_started) disk_pos = -3; } else disk_pos = -3; @@ -518,13 +520,6 @@ nofit: /* New or ex-spare disk. */ g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NEW); - } else if (mdi->mdio_started) { - /* - * As soon as we have no generations -- - * treat every hot-plugged disk as new. - */ - g_raid_change_subdisk_state(sd, - G_RAID_SUBDISK_S_NEW); } else if (meta->state == NVIDIA_S_REBUILD && (pd->pd_meta->disk_status & 0x100)) { /* Rebuilding disk. */ @@ -793,8 +788,8 @@ g_raid_md_create_nvidia(struct g_raid_md mdi = (struct g_raid_md_nvidia_object *)md; arc4rand(&mdi->mdio_volume_id, 16, 0); - snprintf(name, sizeof(name), "NVidia-%08x", - (uint32_t)mdi->mdio_volume_id[0]); + snprintf(name, sizeof(name), "NVidia-%d", + atomic_fetchadd_int(&NVidiaNodeID, 1)); sc = g_raid_create_node(mp, name, md); if (sc == NULL) return (G_RAID_MD_TASTE_FAIL); @@ -900,8 +895,8 @@ search: } else { /* Not found matching node -- create one. */ result = G_RAID_MD_TASTE_NEW; memcpy(&mdi->mdio_volume_id, &meta->volume_id, 16); - snprintf(name, sizeof(name), "NVidia-%08x", - (uint32_t)mdi->mdio_volume_id[0]); + snprintf(name, sizeof(name), "NVidia-%d", + atomic_fetchadd_int(&NVidiaNodeID, 1)); sc = g_raid_create_node(mp, name, md); md->mdo_softc = sc; geom = sc->sc_geom; @@ -967,8 +962,11 @@ g_raid_md_event_nvidia(struct g_raid_md_ if (disk == NULL) { switch (event) { case G_RAID_NODE_E_START: - if (!mdi->mdio_started) + if (!mdi->mdio_started) { + /* Bump volume ID to drop missing disks. */ + arc4rand(&mdi->mdio_volume_id, 16, 0); g_raid_md_nvidia_start(sc); + } return (0); } return (-1); @@ -995,8 +993,14 @@ g_raid_md_event_nvidia(struct g_raid_md_ g_raid_destroy_disk(disk); } - /* Write updated metadata to all disks. */ - g_raid_md_write_nvidia(md, NULL, NULL, NULL); + if (mdi->mdio_started) { + /* Bump volume ID to prevent disk resurrection. */ + if (pd->pd_disk_pos >= 0) + arc4rand(&mdi->mdio_volume_id, 16, 0); + + /* Write updated metadata to all disks. */ + g_raid_md_write_nvidia(md, NULL, NULL, NULL); + } /* Check if anything left except placeholders. */ if (g_raid_ndisks(sc, -1) ==