Date: Tue, 4 Oct 2011 17:09:10 +0000 (UTC) From: "Lev A. Serebryakov" <lev@FreeBSD.org> To: src-committers@freebsd.org, svn-src-projects@freebsd.org Subject: svn commit: r226012 - projects/geom-events/sys/geom/raid Message-ID: <201110041709.p94H9A18070147@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: lev (ports committer) Date: Tue Oct 4 17:09:10 2011 New Revision: 226012 URL: http://svn.freebsd.org/changeset/base/226012 Log: Add geom_notify_*() calls to geom_raid. It is not a trivial change. To determine state of volume(s) after disk disconnection, new method to raid transformation object interface (g_raid_tr) was added: METHOD int getvolstatus { struct g_raid_tr_object *tr; struct g_raid_volume *volume; }; This method should return state (G_RAID_VOLUME_S_XXX) of the given voulme. Also, some trnasformation modules have status detection code factored out from g_raid_tr_update_state_NAME code. And some hasn't, as they perform active actions in this method and getvolstatus() method should be passive one. Modified: projects/geom-events/sys/geom/raid/g_raid.c projects/geom-events/sys/geom/raid/g_raid.h projects/geom-events/sys/geom/raid/g_raid_tr_if.m projects/geom-events/sys/geom/raid/md_intel.c projects/geom-events/sys/geom/raid/md_jmicron.c projects/geom-events/sys/geom/raid/md_nvidia.c projects/geom-events/sys/geom/raid/md_promise.c projects/geom-events/sys/geom/raid/md_sii.c projects/geom-events/sys/geom/raid/tr_concat.c projects/geom-events/sys/geom/raid/tr_raid0.c projects/geom-events/sys/geom/raid/tr_raid1.c projects/geom-events/sys/geom/raid/tr_raid1e.c Modified: projects/geom-events/sys/geom/raid/g_raid.c ============================================================================== --- projects/geom-events/sys/geom/raid/g_raid.c Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/g_raid.c Tue Oct 4 17:09:10 2011 (r226012) @@ -585,6 +585,36 @@ g_raid_get_subdisk(struct g_raid_volume return (NULL); } +/* + * Send event about disk disconnection for volume with proper status + */ +void +g_raid_notify_volume(struct g_raid_volume *vol, struct g_raid_disk *disk) +{ + if (!vol->v_tr) { + g_notify_disconnect(vol->v_provider, disk->d_consumer, G_NOTIFY_DISCONNECT_UNKNOWN); + return; + } + switch (G_RAID_TR_GETVOLSTATUS(vol->v_tr, vol)) { + case G_RAID_VOLUME_S_BROKEN: + g_notify_disconnect(vol->v_provider, disk->d_consumer, G_NOTIFY_DISCONNECT_DEAD); + break; + case G_RAID_VOLUME_S_DEGRADED: + case G_RAID_VOLUME_S_SUBOPTIMAL: + g_notify_disconnect(vol->v_provider, disk->d_consumer, G_NOTIFY_DISCONNECT_FIXABLE); + break; + case G_RAID_VOLUME_S_OPTIMAL: + g_notify_disconnect(vol->v_provider, disk->d_consumer, G_NOTIFY_DISCONNECT_ALIVE); + break; + case G_RAID_VOLUME_S_STARTING: + case G_RAID_VOLUME_S_UNSUPPORTED: + case G_RAID_VOLUME_S_STOPPED: + default: + g_notify_disconnect(vol->v_provider, disk->d_consumer, G_NOTIFY_DISCONNECT_UNKNOWN); + break; + } +} + struct g_consumer * g_raid_open_consumer(struct g_raid_softc *sc, const char *name) { Modified: projects/geom-events/sys/geom/raid/g_raid.h ============================================================================== --- projects/geom-events/sys/geom/raid/g_raid.h Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/g_raid.h Tue Oct 4 17:09:10 2011 (r226012) @@ -397,6 +397,8 @@ int g_raid_lock_range(struct g_raid_volu struct bio *ignore, void *argp); int g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len); +void g_raid_notify_volume(struct g_raid_volume *vol, struct g_raid_disk *disk); + g_ctl_req_t g_raid_ctl; #endif /* _KERNEL */ Modified: projects/geom-events/sys/geom/raid/g_raid_tr_if.m ============================================================================== --- projects/geom-events/sys/geom/raid/g_raid_tr_if.m Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/g_raid_tr_if.m Tue Oct 4 17:09:10 2011 (r226012) @@ -100,6 +100,12 @@ METHOD int kerneldump { size_t length; } DEFAULT g_raid_tr_kerneldump_common; +# getvolstatus() - check status of volume, based on status of subdisks +METHOD int getvolstatus { + struct g_raid_tr_object *tr; + struct g_raid_volume *volume; +}; + # locked() - callback method for lock(). METHOD int locked { struct g_raid_tr_object *tr; Modified: projects/geom-events/sys/geom/raid/md_intel.c ============================================================================== --- projects/geom-events/sys/geom/raid/md_intel.c Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/md_intel.c Tue Oct 4 17:09:10 2011 (r226012) @@ -1323,20 +1323,23 @@ g_raid_md_event_intel(struct g_raid_md_o /* If disk was assigned, just update statuses. */ if (pd->pd_disk_pos >= 0) { g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE); - if (disk->d_consumer) { - g_raid_kill_consumer(sc, disk->d_consumer); - disk->d_consumer = NULL; - } TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE); g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED, G_RAID_EVENT_SUBDISK); + /* Notify about changes in volume */ + g_raid_notify_volume(sd->sd_volume, disk); + } + if (disk->d_consumer) { + g_raid_kill_consumer(sc, disk->d_consumer); + disk->d_consumer = NULL; } } else { /* Otherwise -- delete. */ g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE); g_raid_destroy_disk(disk); + /* Disk was not assigned, so no volumes, nothing to report */ } /* Write updated metadata to all disks. */ Modified: projects/geom-events/sys/geom/raid/md_jmicron.c ============================================================================== --- projects/geom-events/sys/geom/raid/md_jmicron.c Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/md_jmicron.c Tue Oct 4 17:09:10 2011 (r226012) @@ -986,20 +986,24 @@ g_raid_md_event_jmicron(struct g_raid_md /* If disk was assigned, just update statuses. */ if (pd->pd_disk_pos >= 0) { g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE); - if (disk->d_consumer) { - g_raid_kill_consumer(sc, disk->d_consumer); - disk->d_consumer = NULL; - } TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE); g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED, G_RAID_EVENT_SUBDISK); + + /* Notify about changes in volume */ + g_raid_notify_volume(sd->sd_volume, disk); + } + if (disk->d_consumer) { + g_raid_kill_consumer(sc, disk->d_consumer); + disk->d_consumer = NULL; } } else { /* Otherwise -- delete. */ g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE); g_raid_destroy_disk(disk); + /* Disk was not assigned, so no volumes, nothing to report */ } /* Write updated metadata to all disks. */ Modified: projects/geom-events/sys/geom/raid/md_nvidia.c ============================================================================== --- projects/geom-events/sys/geom/raid/md_nvidia.c Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/md_nvidia.c Tue Oct 4 17:09:10 2011 (r226012) @@ -982,20 +982,24 @@ g_raid_md_event_nvidia(struct g_raid_md_ /* If disk was assigned, just update statuses. */ if (pd->pd_disk_pos >= 0) { g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE); - if (disk->d_consumer) { - g_raid_kill_consumer(sc, disk->d_consumer); - disk->d_consumer = NULL; - } TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE); g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED, G_RAID_EVENT_SUBDISK); + + /* Notify about changes in volume */ + g_raid_notify_volume(sd->sd_volume, disk); + } + if (disk->d_consumer) { + g_raid_kill_consumer(sc, disk->d_consumer); + disk->d_consumer = NULL; } } else { /* Otherwise -- delete. */ g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE); g_raid_destroy_disk(disk); + /* Disk was not assigned, so no volumes, nothing to report */ } if (mdi->mdio_started) { Modified: projects/geom-events/sys/geom/raid/md_promise.c ============================================================================== --- projects/geom-events/sys/geom/raid/md_promise.c Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/md_promise.c Tue Oct 4 17:09:10 2011 (r226012) @@ -1154,12 +1154,17 @@ g_raid_md_event_promise(struct g_raid_md struct g_raid_disk *disk, u_int event) { struct g_raid_softc *sc; + struct g_raid_subdisk *sd; sc = md->mdo_softc; if (disk == NULL) return (-1); switch (event) { case G_RAID_DISK_E_DISCONNECTED: + TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { + /* Notify about changes in volume */ + g_raid_notify_volume(sd->sd_volume, disk); + } /* Delete disk. */ g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE); g_raid_destroy_disk(disk); Modified: projects/geom-events/sys/geom/raid/md_sii.c ============================================================================== --- projects/geom-events/sys/geom/raid/md_sii.c Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/md_sii.c Tue Oct 4 17:09:10 2011 (r226012) @@ -1073,20 +1073,23 @@ g_raid_md_event_sii(struct g_raid_md_obj /* If disk was assigned, just update statuses. */ if (pd->pd_disk_pos >= 0) { g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE); - if (disk->d_consumer) { - g_raid_kill_consumer(sc, disk->d_consumer); - disk->d_consumer = NULL; - } TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE); g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED, G_RAID_EVENT_SUBDISK); + /* Notify about changes in volume */ + g_raid_notify_volume(sd->sd_volume, disk); + } + if (disk->d_consumer) { + g_raid_kill_consumer(sc, disk->d_consumer); + disk->d_consumer = NULL; } } else { /* Otherwise -- delete. */ g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE); g_raid_destroy_disk(disk); + /* Disk was not assigned, so no volumes, nothing to report */ } /* Write updated metadata to all disks. */ Modified: projects/geom-events/sys/geom/raid/tr_concat.c ============================================================================== --- projects/geom-events/sys/geom/raid/tr_concat.c Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/tr_concat.c Tue Oct 4 17:09:10 2011 (r226012) @@ -55,6 +55,7 @@ static g_raid_tr_stop_t g_raid_tr_stop_c static g_raid_tr_iostart_t g_raid_tr_iostart_concat; static g_raid_tr_iodone_t g_raid_tr_iodone_concat; static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_concat; +static g_raid_tr_getvolstatus_t g_raid_tr_getvolstatus_concat; static g_raid_tr_free_t g_raid_tr_free_concat; static kobj_method_t g_raid_tr_concat_methods[] = { @@ -65,6 +66,7 @@ static kobj_method_t g_raid_tr_concat_me KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_concat), KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_concat), KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_concat), + KOBJMETHOD(g_raid_tr_getvolstatus, g_raid_tr_getvolstatus_concat), KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_concat), { 0, 0 } }; @@ -98,25 +100,11 @@ g_raid_tr_update_state_concat(struct g_r struct g_raid_softc *sc; off_t size; u_int s; - int i, n, f; + int i; sc = vol->v_softc; trs = (struct g_raid_tr_concat_object *)vol->v_tr; - if (trs->trso_stopped) - s = G_RAID_VOLUME_S_STOPPED; - else if (trs->trso_starting) - s = G_RAID_VOLUME_S_STARTING; - else { - n = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE); - f = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_FAILED); - if (n + f == vol->v_disks_count) { - if (f == 0) - s = G_RAID_VOLUME_S_OPTIMAL; - else - s = G_RAID_VOLUME_S_SUBOPTIMAL; - } else - s = G_RAID_VOLUME_S_BROKEN; - } + s = g_raid_tr_getvolstatus_concat(vol->v_tr, vol); if (s != vol->v_state) { /* @@ -316,6 +304,31 @@ g_raid_tr_kerneldump_concat(struct g_rai return (0); } +static int +g_raid_tr_getvolstatus_concat(struct g_raid_tr_object *tr, struct g_raid_volume *volume) +{ + struct g_raid_tr_concat_object *trs; + int n, f; + + trs = (struct g_raid_tr_concat_object *)tr; + KASSERT(tr == volume->v_tr, ("Invalid transformation object")); + if (trs->trso_stopped) + return G_RAID_VOLUME_S_STOPPED; + else if (trs->trso_starting) + return G_RAID_VOLUME_S_STARTING; + else { + n = g_raid_nsubdisks(volume, G_RAID_SUBDISK_S_ACTIVE); + f = g_raid_nsubdisks(volume, G_RAID_SUBDISK_S_FAILED); + if (n + f == volume->v_disks_count) { + if (f == 0) + return G_RAID_VOLUME_S_OPTIMAL; + else + return G_RAID_VOLUME_S_SUBOPTIMAL; + } + } + return G_RAID_VOLUME_S_BROKEN; +} + static void g_raid_tr_iodone_concat(struct g_raid_tr_object *tr, struct g_raid_subdisk *sd,struct bio *bp) Modified: projects/geom-events/sys/geom/raid/tr_raid0.c ============================================================================== --- projects/geom-events/sys/geom/raid/tr_raid0.c Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/tr_raid0.c Tue Oct 4 17:09:10 2011 (r226012) @@ -55,6 +55,7 @@ static g_raid_tr_stop_t g_raid_tr_stop_r static g_raid_tr_iostart_t g_raid_tr_iostart_raid0; static g_raid_tr_iodone_t g_raid_tr_iodone_raid0; static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid0; +static g_raid_tr_getvolstatus_t g_raid_tr_getvolstatus_raid0; static g_raid_tr_free_t g_raid_tr_free_raid0; static kobj_method_t g_raid_tr_raid0_methods[] = { @@ -65,6 +66,7 @@ static kobj_method_t g_raid_tr_raid0_met KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid0), KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid0), KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid0), + KOBJMETHOD(g_raid_tr_getvolstatus, g_raid_tr_getvolstatus_raid0), KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid0), { 0, 0 } }; @@ -95,25 +97,10 @@ g_raid_tr_update_state_raid0(struct g_ra struct g_raid_tr_raid0_object *trs; struct g_raid_softc *sc; u_int s; - int n, f; sc = vol->v_softc; trs = (struct g_raid_tr_raid0_object *)vol->v_tr; - if (trs->trso_stopped) - s = G_RAID_VOLUME_S_STOPPED; - else if (trs->trso_starting) - s = G_RAID_VOLUME_S_STARTING; - else { - n = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE); - f = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_FAILED); - if (n + f == vol->v_disks_count) { - if (f == 0) - s = G_RAID_VOLUME_S_OPTIMAL; - else - s = G_RAID_VOLUME_S_SUBOPTIMAL; - } else - s = G_RAID_VOLUME_S_BROKEN; - } + s = g_raid_tr_getvolstatus_raid0(vol->v_tr, vol); if (s != vol->v_state) { g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ? G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN, @@ -299,6 +286,31 @@ g_raid_tr_kerneldump_raid0(struct g_raid return (0); } +static int +g_raid_tr_getvolstatus_raid0(struct g_raid_tr_object *tr, struct g_raid_volume *volume) +{ + struct g_raid_tr_raid0_object *trs; + int n, f; + + trs = (struct g_raid_tr_raid0_object *)tr; + KASSERT(tr == volume->v_tr, ("Invalid transformation object")); + if (trs->trso_stopped) + return G_RAID_VOLUME_S_STOPPED; + else if (trs->trso_starting) + return G_RAID_VOLUME_S_STARTING; + else { + n = g_raid_nsubdisks(volume, G_RAID_SUBDISK_S_ACTIVE); + f = g_raid_nsubdisks(volume, G_RAID_SUBDISK_S_FAILED); + if (n + f == volume->v_disks_count) { + if (f == 0) + return G_RAID_VOLUME_S_OPTIMAL; + else + return G_RAID_VOLUME_S_SUBOPTIMAL; + } + } + return G_RAID_VOLUME_S_BROKEN; +} + static void g_raid_tr_iodone_raid0(struct g_raid_tr_object *tr, struct g_raid_subdisk *sd,struct bio *bp) Modified: projects/geom-events/sys/geom/raid/tr_raid1.c ============================================================================== --- projects/geom-events/sys/geom/raid/tr_raid1.c Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/tr_raid1.c Tue Oct 4 17:09:10 2011 (r226012) @@ -109,6 +109,7 @@ static g_raid_tr_stop_t g_raid_tr_stop_r static g_raid_tr_iostart_t g_raid_tr_iostart_raid1; static g_raid_tr_iodone_t g_raid_tr_iodone_raid1; static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1; +static g_raid_tr_getvolstatus_t g_raid_tr_getvolstatus_raid1; static g_raid_tr_locked_t g_raid_tr_locked_raid1; static g_raid_tr_idle_t g_raid_tr_idle_raid1; static g_raid_tr_free_t g_raid_tr_free_raid1; @@ -120,7 +121,8 @@ static kobj_method_t g_raid_tr_raid1_met KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1), KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1), KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1), - KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1), + KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1), + KOBJMETHOD(g_raid_tr_getvolstatus, g_raid_tr_getvolstatus_raid1), KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1), KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1), KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1), @@ -306,6 +308,7 @@ g_raid_tr_raid1_rebuild_finish(struct g_ g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE); sd->sd_rebuild_pos = 0; g_raid_tr_raid1_rebuild_done(trs); + g_notify_sync_stop(sd->sd_volume->v_provider, 1); } static void @@ -340,6 +343,7 @@ g_raid_tr_raid1_rebuild_abort(struct g_r } g_raid_tr_raid1_rebuild_done(trs); } + g_notify_sync_stop(vol->v_provider, 0); } static void @@ -402,6 +406,9 @@ g_raid_tr_raid1_rebuild_start(struct g_r trs->trso_type = TR_RAID1_REBUILD; trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK); trs->trso_meta_update = g_raid1_rebuild_meta_update; + + g_notify_sync_start(vol->v_provider); + g_raid_tr_raid1_rebuild_some(tr); } @@ -947,6 +954,33 @@ g_raid_tr_kerneldump_raid1(struct g_raid } static int +g_raid_tr_getvolstatus_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *volume) +{ + struct g_raid_tr_raid1_object *trs; + int na, ns; + + trs = (struct g_raid_tr_raid1_object *)tr; + KASSERT(tr == volume->v_tr, ("Invalid transformation object")); + if (trs->trso_stopping && + (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0) + return G_RAID_VOLUME_S_STOPPED; + else if (trs->trso_starting) + return G_RAID_VOLUME_S_STARTING; + else { + na = g_raid_nsubdisks(volume, G_RAID_SUBDISK_S_ACTIVE); + ns = g_raid_nsubdisks(volume, G_RAID_SUBDISK_S_STALE) + + g_raid_nsubdisks(volume, G_RAID_SUBDISK_S_RESYNC); + if (na == volume->v_disks_count) + return G_RAID_VOLUME_S_OPTIMAL; + else if (na + ns == volume->v_disks_count) + return G_RAID_VOLUME_S_SUBOPTIMAL; + else if (na > 0) + return G_RAID_VOLUME_S_DEGRADED; + } + return G_RAID_VOLUME_S_BROKEN; +} + +static int g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp) { struct bio *bp; Modified: projects/geom-events/sys/geom/raid/tr_raid1e.c ============================================================================== --- projects/geom-events/sys/geom/raid/tr_raid1e.c Tue Oct 4 17:08:24 2011 (r226011) +++ projects/geom-events/sys/geom/raid/tr_raid1e.c Tue Oct 4 17:09:10 2011 (r226012) @@ -113,6 +113,7 @@ static g_raid_tr_stop_t g_raid_tr_stop_r static g_raid_tr_iostart_t g_raid_tr_iostart_raid1e; static g_raid_tr_iodone_t g_raid_tr_iodone_raid1e; static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1e; +static g_raid_tr_getvolstatus_t g_raid_tr_getvolstatus_raid1e; static g_raid_tr_locked_t g_raid_tr_locked_raid1e; static g_raid_tr_idle_t g_raid_tr_idle_raid1e; static g_raid_tr_free_t g_raid_tr_free_raid1e; @@ -124,7 +125,8 @@ static kobj_method_t g_raid_tr_raid1e_me KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1e), KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1e), KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1e), - KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1e), + KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1e), + KOBJMETHOD(g_raid_tr_getvolstatus, g_raid_tr_getvolstatus_raid1e), KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1e), KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1e), KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1e), @@ -194,6 +196,88 @@ g_raid_tr_taste_raid1e(struct g_raid_tr_ } static int +g_raid_tr_getvolstatus_raid1e_even(struct g_raid_volume *vol) +{ + struct g_raid_softc *sc; + struct g_raid_subdisk *sd, *bestsd, *worstsd; + int i, j, state, sstate; + + sc = vol->v_softc; + state = G_RAID_VOLUME_S_OPTIMAL; + for (i = 0; i < vol->v_disks_count / N; i++) { + bestsd = &vol->v_subdisks[i * N]; + for (j = 1; j < N; j++) { + sd = &vol->v_subdisks[i * N + j]; + if (sd->sd_state > bestsd->sd_state) + bestsd = sd; + else if (sd->sd_state == bestsd->sd_state && + (sd->sd_state == G_RAID_SUBDISK_S_REBUILD || + sd->sd_state == G_RAID_SUBDISK_S_RESYNC) && + sd->sd_rebuild_pos > bestsd->sd_rebuild_pos) + bestsd = sd; + } + worstsd = &vol->v_subdisks[i * N]; + for (j = 1; j < N; j++) { + sd = &vol->v_subdisks[i * N + j]; + if (sd->sd_state < worstsd->sd_state) + worstsd = sd; + } + if (worstsd->sd_state == G_RAID_SUBDISK_S_ACTIVE) + sstate = G_RAID_VOLUME_S_OPTIMAL; + else if (worstsd->sd_state >= G_RAID_SUBDISK_S_STALE) + sstate = G_RAID_VOLUME_S_SUBOPTIMAL; + else if (bestsd->sd_state == G_RAID_SUBDISK_S_ACTIVE) + sstate = G_RAID_VOLUME_S_DEGRADED; + else + sstate = G_RAID_VOLUME_S_BROKEN; + if (sstate < state) + state = sstate; + } + return (state); +} + +static int +g_raid_tr_getvolstatus_raid1e_odd(struct g_raid_volume *vol) +{ + struct g_raid_softc *sc; + struct g_raid_subdisk *sd, *bestsd, *worstsd; + int i, j, state, sstate; + + sc = vol->v_softc; + if (g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE) == + vol->v_disks_count) + return (G_RAID_VOLUME_S_OPTIMAL); + state = G_RAID_VOLUME_S_OPTIMAL; + for (i = 0; i < vol->v_disks_count; i++) { + bestsd = &vol->v_subdisks[i]; + worstsd = &vol->v_subdisks[i]; + for (j = 1; j < N; j++) { + sd = &vol->v_subdisks[(i + j) % vol->v_disks_count]; + if (sd->sd_state > bestsd->sd_state) + bestsd = sd; + else if (sd->sd_state == bestsd->sd_state && + (sd->sd_state == G_RAID_SUBDISK_S_REBUILD || + sd->sd_state == G_RAID_SUBDISK_S_RESYNC) && + sd->sd_rebuild_pos > bestsd->sd_rebuild_pos) + bestsd = sd; + if (sd->sd_state < worstsd->sd_state) + worstsd = sd; + } + if (worstsd->sd_state == G_RAID_SUBDISK_S_ACTIVE) + sstate = G_RAID_VOLUME_S_OPTIMAL; + else if (worstsd->sd_state >= G_RAID_SUBDISK_S_STALE) + sstate = G_RAID_VOLUME_S_SUBOPTIMAL; + else if (bestsd->sd_state >= G_RAID_SUBDISK_S_STALE) + sstate = G_RAID_VOLUME_S_DEGRADED; + else + sstate = G_RAID_VOLUME_S_BROKEN; + if (sstate < state) + state = sstate; + } + return (state); +} + +static int g_raid_tr_update_state_raid1e_even(struct g_raid_volume *vol) { struct g_raid_softc *sc; @@ -386,6 +470,7 @@ g_raid_tr_raid1e_rebuild_finish(struct g g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE); sd->sd_rebuild_pos = 0; g_raid_tr_raid1e_rebuild_done(trs); + g_notify_sync_stop(sd->sd_volume->v_provider, 1); } static void @@ -417,6 +502,7 @@ g_raid_tr_raid1e_rebuild_abort(struct g_ } g_raid_tr_raid1e_rebuild_done(trs); } + g_notify_sync_stop(vol->v_provider, 0); } static void @@ -548,6 +634,9 @@ g_raid_tr_raid1e_rebuild_start(struct g_ trs->trso_type = TR_RAID1E_REBUILD; trs->trso_buffer = malloc(g_raid1e_rebuild_slab, M_TR_RAID1E, M_WAITOK); trs->trso_meta_update = g_raid1e_rebuild_meta_update; + + g_notify_sync_start(vol->v_provider); + g_raid_tr_raid1e_rebuild_some(tr); } @@ -1173,6 +1262,26 @@ nextdisk: } static int +g_raid_tr_getvolstatus_raid1e(struct g_raid_tr_object *tr, struct g_raid_volume *volume) +{ + struct g_raid_tr_raid1e_object *trs; + + trs = (struct g_raid_tr_raid1e_object *)tr; + KASSERT(tr == volume->v_tr, ("Invalid transformation object")); + if (trs->trso_stopping && + (trs->trso_flags & TR_RAID1E_F_DOING_SOME) == 0) + return G_RAID_VOLUME_S_STOPPED; + else if (trs->trso_starting) + return G_RAID_VOLUME_S_STARTING; + else { + if ((volume->v_disks_count % N) == 0) + return g_raid_tr_getvolstatus_raid1e_even(volume); + else + return g_raid_tr_getvolstatus_raid1e_odd(volume); + } +} + +static int g_raid_tr_locked_raid1e(struct g_raid_tr_object *tr, void *argp) { struct bio *bp;
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201110041709.p94H9A18070147>