Date: Sun, 25 Aug 2013 11:21:10 +0000 (UTC) From: Alexander Motin <mav@FreeBSD.org> To: src-committers@freebsd.org, svn-src-projects@freebsd.org Subject: svn commit: r254846 - projects/camlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs Message-ID: <201308251121.r7PBLA3v033536@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mav Date: Sun Aug 25 11:21:10 2013 New Revision: 254846 URL: http://svnweb.freebsd.org/changeset/base/254846 Log: Allow GEOM direct dispatch for zvol providers. Skip request handover to the worker thread if current context allows sleeping (thanks to the direct dispatch in action we are not in GEOM thread). Together this doubles zvol performance, reaching up to 300K IOPS on my tests. If there would be unmapped I/O support for zvols, the above value could be even bigger. Modified: projects/camlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c Modified: projects/camlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c ============================================================================== --- projects/camlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c Sun Aug 25 11:21:03 2013 (r254845) +++ projects/camlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c Sun Aug 25 11:21:10 2013 (r254846) @@ -2068,6 +2068,7 @@ zvol_geom_create(const char *name) gp->start = zvol_geom_start; gp->access = zvol_geom_access; pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name); + pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND; pp->sectorsize = DEV_BSIZE; zv = kmem_zalloc(sizeof(*zv), KM_SLEEP); @@ -2171,18 +2172,20 @@ zvol_geom_start(struct bio *bp) zvol_state_t *zv; boolean_t first; + zv = bp->bio_to->private; + ASSERT(zv != NULL); switch (bp->bio_cmd) { + case BIO_FLUSH: + if (curthread->td_no_sleeping != 0) + goto enqueue; + zil_commit(zv->zv_zilog, ZVOL_OBJ); + g_io_deliver(bp, 0); + break; case BIO_READ: case BIO_WRITE: - case BIO_FLUSH: - zv = bp->bio_to->private; - ASSERT(zv != NULL); - mtx_lock(&zv->zv_queue_mtx); - first = (bioq_first(&zv->zv_queue) == NULL); - bioq_insert_tail(&zv->zv_queue, bp); - mtx_unlock(&zv->zv_queue_mtx); - if (first) - wakeup_one(&zv->zv_queue); + if (curthread->td_no_sleeping != 0) + goto enqueue; + zvol_strategy(bp); break; case BIO_GETATTR: case BIO_DELETE: @@ -2190,6 +2193,15 @@ zvol_geom_start(struct bio *bp) g_io_deliver(bp, EOPNOTSUPP); break; } + return; + +enqueue: + mtx_lock(&zv->zv_queue_mtx); + first = (bioq_first(&zv->zv_queue) == NULL); + bioq_insert_tail(&zv->zv_queue, bp); + mtx_unlock(&zv->zv_queue_mtx); + if (first) + wakeup_one(&zv->zv_queue); } static void @@ -2364,6 +2376,7 @@ zvol_rename_minor(struct g_geom *gp, con g_wither_provider(pp, ENXIO); pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname); + pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND; pp->sectorsize = DEV_BSIZE; pp->mediasize = zv->zv_volsize; pp->private = zv;
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201308251121.r7PBLA3v033536>