Date: Mon, 24 Nov 2014 11:37:28 +0000 (UTC) From: Alexander Motin <mav@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r274962 - head/sys/cam/ctl Message-ID: <201411241137.sAOBbSeF097180@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: mav Date: Mon Nov 24 11:37:27 2014 New Revision: 274962 URL: https://svnweb.freebsd.org/changeset/base/274962 Log: Replace home-grown CTL IO allocator with UMA. Old allocator created significant lock congestion protecting its lists of preallocated I/Os, while UMA provides much better SMP scalability. The downside of UMA is lack of reliable preallocation, that could guarantee successful allocation in non-sleepable environments. But careful code review shown, that only CAM target frontend really has that requirement. Fix that making that frontend preallocate and statically bind CTL I/O for every ATIO/INOT it preallocates any way. That allows to avoid allocations in hot I/O path. Other frontends either may sleep in allocation context or can properly handle allocation errors. On 40-core server with 6 ZVOL-backed LUNs and 7 iSCSI client connections this change increases peak performance from ~700K to >1M IOPS! Yay! :) MFC after: 1 month Sponsored by: iXsystems, Inc. Modified: head/sys/cam/ctl/ctl.c head/sys/cam/ctl/ctl_frontend.c head/sys/cam/ctl/ctl_frontend_cam_sim.c head/sys/cam/ctl/ctl_frontend_internal.c head/sys/cam/ctl/ctl_frontend_iscsi.c head/sys/cam/ctl/ctl_io.h head/sys/cam/ctl/ctl_private.h head/sys/cam/ctl/ctl_tpc.c head/sys/cam/ctl/scsi_ctl.c Modified: head/sys/cam/ctl/ctl.c ============================================================================== --- head/sys/cam/ctl/ctl.c Mon Nov 24 11:16:52 2014 (r274961) +++ head/sys/cam/ctl/ctl.c Mon Nov 24 11:37:27 2014 (r274962) @@ -64,6 +64,7 @@ __FBSDID("$FreeBSD$"); #include <sys/smp.h> #include <sys/endian.h> #include <sys/sysctl.h> +#include <vm/uma.h> #include <cam/cam.h> #include <cam/scsi/scsi_all.h> @@ -644,7 +645,7 @@ ctl_isc_event_handler(ctl_ha_channel cha #if 0 printf("Serialize\n"); #endif - io = ctl_alloc_io((void *)ctl_softc->othersc_pool); + io = ctl_alloc_io_nowait(ctl_softc->othersc_pool); if (io == NULL) { printf("ctl_isc_event_handler: can't allocate " "ctl_io!\n"); @@ -889,8 +890,8 @@ ctl_isc_event_handler(ctl_ha_channel cha /* Handle resets sent from the other side */ case CTL_MSG_MANAGE_TASKS: { struct ctl_taskio *taskio; - taskio = (struct ctl_taskio *)ctl_alloc_io( - (void *)ctl_softc->othersc_pool); + taskio = (struct ctl_taskio *)ctl_alloc_io_nowait( + ctl_softc->othersc_pool); if (taskio == NULL) { printf("ctl_isc_event_handler: can't allocate " "ctl_io!\n"); @@ -918,8 +919,8 @@ ctl_isc_event_handler(ctl_ha_channel cha } /* Persistent Reserve action which needs attention */ case CTL_MSG_PERS_ACTION: - presio = (struct ctl_prio *)ctl_alloc_io( - (void *)ctl_softc->othersc_pool); + presio = (struct ctl_prio *)ctl_alloc_io_nowait( + ctl_softc->othersc_pool); if (presio == NULL) { printf("ctl_isc_event_handler: can't allocate " "ctl_io!\n"); @@ -1003,7 +1004,7 @@ static int ctl_init(void) { struct ctl_softc *softc; - struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool; + void *other_pool; struct ctl_port *port; int i, error, retval; //int isc_retval; @@ -1049,7 +1050,8 @@ ctl_init(void) "Report no lun possible for invalid LUNs"); mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); - mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF); + softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), + NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); softc->open_count = 0; /* @@ -1086,36 +1088,15 @@ ctl_init(void) STAILQ_INIT(&softc->fe_list); STAILQ_INIT(&softc->port_list); STAILQ_INIT(&softc->be_list); - STAILQ_INIT(&softc->io_pools); ctl_tpc_init(softc); - if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL, - &internal_pool)!= 0){ - printf("ctl: can't allocate %d entry internal pool, " - "exiting\n", CTL_POOL_ENTRIES_INTERNAL); - return (ENOMEM); - } - - if (ctl_pool_create(softc, CTL_POOL_EMERGENCY, - CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) { - printf("ctl: can't allocate %d entry emergency pool, " - "exiting\n", CTL_POOL_ENTRIES_EMERGENCY); - ctl_pool_free(internal_pool); - return (ENOMEM); - } - - if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC, + if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, &other_pool) != 0) { printf("ctl: can't allocate %d entry other SC pool, " "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); - ctl_pool_free(internal_pool); - ctl_pool_free(emergency_pool); return (ENOMEM); } - - softc->internal_pool = internal_pool; - softc->emergency_pool = emergency_pool; softc->othersc_pool = other_pool; if (worker_threads <= 0) @@ -1137,8 +1118,6 @@ ctl_init(void) &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); if (error != 0) { printf("error creating CTL work thread!\n"); - ctl_pool_free(internal_pool); - ctl_pool_free(emergency_pool); ctl_pool_free(other_pool); return (error); } @@ -1147,8 +1126,6 @@ ctl_init(void) &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); if (error != 0) { printf("error creating CTL lun thread!\n"); - ctl_pool_free(internal_pool); - ctl_pool_free(emergency_pool); ctl_pool_free(other_pool); return (error); } @@ -1156,8 +1133,6 @@ ctl_init(void) &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); if (error != 0) { printf("error creating CTL threshold thread!\n"); - ctl_pool_free(internal_pool); - ctl_pool_free(emergency_pool); ctl_pool_free(other_pool); return (error); } @@ -1210,7 +1185,6 @@ ctl_shutdown(void) { struct ctl_softc *softc; struct ctl_lun *lun, *next_lun; - struct ctl_io_pool *pool; softc = (struct ctl_softc *)control_softc; @@ -1231,24 +1205,13 @@ ctl_shutdown(void) ctl_frontend_deregister(&ioctl_frontend); - /* - * This will rip the rug out from under any FETDs or anyone else - * that has a pool allocated. Since we increment our module - * refcount any time someone outside the main CTL module allocates - * a pool, we shouldn't have any problems here. The user won't be - * able to unload the CTL module until client modules have - * successfully unloaded. - */ - while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL) - ctl_pool_free(pool); - #if 0 ctl_shutdown_thread(softc->work_thread); mtx_destroy(&softc->queue_lock); #endif ctl_tpc_shutdown(softc); - mtx_destroy(&softc->pool_lock); + uma_zdestroy(softc->io_zone); mtx_destroy(&softc->ctl_lock); destroy_dev(softc->dev); @@ -2371,21 +2334,15 @@ ctl_ioctl(struct cdev *dev, u_long cmd, } io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref); - if (io == NULL) { - printf("ctl_ioctl: can't allocate ctl_io!\n"); - retval = ENOSPC; - break; - } /* * Need to save the pool reference so it doesn't get * spammed by the user's ctl_io. */ pool_tmp = io->io_hdr.pool; - memcpy(io, (void *)addr, sizeof(*io)); - io->io_hdr.pool = pool_tmp; + /* * No status yet, so make sure the status is set properly. */ @@ -3729,285 +3686,95 @@ ctl_kfree_io(union ctl_io *io) #endif /* unused */ /* - * ctl_softc, pool_type, total_ctl_io are passed in. + * ctl_softc, pool_name, total_ctl_io are passed in. * npool is passed out. */ int -ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, - uint32_t total_ctl_io, struct ctl_io_pool **npool) +ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, + uint32_t total_ctl_io, void **npool) { - uint32_t i; - union ctl_io *cur_io, *next_io; +#ifdef IO_POOLS struct ctl_io_pool *pool; - int retval; - - retval = 0; pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, M_NOWAIT | M_ZERO); - if (pool == NULL) { - retval = ENOMEM; - goto bailout; - } + if (pool == NULL) + return (ENOMEM); - pool->type = pool_type; + snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); pool->ctl_softc = ctl_softc; - - mtx_lock(&ctl_softc->pool_lock); - pool->id = ctl_softc->cur_pool_id++; - mtx_unlock(&ctl_softc->pool_lock); - - pool->flags = CTL_POOL_FLAG_NONE; - pool->refcount = 1; /* Reference for validity. */ - STAILQ_INIT(&pool->free_queue); - - /* - * XXX KDM other options here: - * - allocate a page at a time - * - allocate one big chunk of memory. - * Page allocation might work well, but would take a little more - * tracking. - */ - for (i = 0; i < total_ctl_io; i++) { - cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTLIO, - M_NOWAIT); - if (cur_io == NULL) { - retval = ENOMEM; - break; - } - cur_io->io_hdr.pool = pool; - STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links); - pool->total_ctl_io++; - pool->free_ctl_io++; - } - - if (retval != 0) { - for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); - cur_io != NULL; cur_io = next_io) { - next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr, - links); - STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, - ctl_io_hdr, links); - free(cur_io, M_CTLIO); - } - - free(pool, M_CTL); - goto bailout; - } - mtx_lock(&ctl_softc->pool_lock); - ctl_softc->num_pools++; - STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links); - /* - * Increment our usage count if this is an external consumer, so we - * can't get unloaded until the external consumer (most likely a - * FETD) unloads and frees his pool. - * - * XXX KDM will this increment the caller's module use count, or - * mine? - */ -#if 0 - if ((pool_type != CTL_POOL_EMERGENCY) - && (pool_type != CTL_POOL_INTERNAL) - && (pool_type != CTL_POOL_4OTHERSC)) - MOD_INC_USE_COUNT; -#endif - - mtx_unlock(&ctl_softc->pool_lock); + pool->zone = uma_zsecond_create(pool->name, NULL, + NULL, NULL, NULL, ctl_softc->io_zone); + /* uma_prealloc(pool->zone, total_ctl_io); */ *npool = pool; - -bailout: - - return (retval); -} - -static int -ctl_pool_acquire(struct ctl_io_pool *pool) -{ - - mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED); - - if (pool->flags & CTL_POOL_FLAG_INVALID) - return (EINVAL); - - pool->refcount++; - - return (0); -} - -static void -ctl_pool_release(struct ctl_io_pool *pool) -{ - struct ctl_softc *ctl_softc = pool->ctl_softc; - union ctl_io *io; - - mtx_assert(&ctl_softc->pool_lock, MA_OWNED); - - if (--pool->refcount != 0) - return; - - while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) { - STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr, - links); - free(io, M_CTLIO); - } - - STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links); - ctl_softc->num_pools--; - - /* - * XXX KDM will this decrement the caller's usage count or mine? - */ -#if 0 - if ((pool->type != CTL_POOL_EMERGENCY) - && (pool->type != CTL_POOL_INTERNAL) - && (pool->type != CTL_POOL_4OTHERSC)) - MOD_DEC_USE_COUNT; +#else + *npool = ctl_softc->io_zone; #endif - - free(pool, M_CTL); + return (0); } void ctl_pool_free(struct ctl_io_pool *pool) { - struct ctl_softc *ctl_softc; if (pool == NULL) return; - ctl_softc = pool->ctl_softc; - mtx_lock(&ctl_softc->pool_lock); - pool->flags |= CTL_POOL_FLAG_INVALID; - ctl_pool_release(pool); - mtx_unlock(&ctl_softc->pool_lock); +#ifdef IO_POOLS + uma_zdestroy(pool->zone); + free(pool, M_CTL); +#endif } -/* - * This routine does not block (except for spinlocks of course). - * It tries to allocate a ctl_io union from the caller's pool as quickly as - * possible. - */ union ctl_io * ctl_alloc_io(void *pool_ref) { union ctl_io *io; - struct ctl_softc *ctl_softc; - struct ctl_io_pool *pool, *npool; - struct ctl_io_pool *emergency_pool; +#ifdef IO_POOLS + struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; - pool = (struct ctl_io_pool *)pool_ref; - - if (pool == NULL) { - printf("%s: pool is NULL\n", __func__); - return (NULL); - } - - emergency_pool = NULL; - - ctl_softc = pool->ctl_softc; - - mtx_lock(&ctl_softc->pool_lock); - /* - * First, try to get the io structure from the user's pool. - */ - if (ctl_pool_acquire(pool) == 0) { - io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); - if (io != NULL) { - STAILQ_REMOVE_HEAD(&pool->free_queue, links); - pool->total_allocated++; - pool->free_ctl_io--; - mtx_unlock(&ctl_softc->pool_lock); - return (io); - } else - ctl_pool_release(pool); - } - /* - * If he doesn't have any io structures left, search for an - * emergency pool and grab one from there. - */ - STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) { - if (npool->type != CTL_POOL_EMERGENCY) - continue; - - if (ctl_pool_acquire(npool) != 0) - continue; - - emergency_pool = npool; - - io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue); - if (io != NULL) { - STAILQ_REMOVE_HEAD(&npool->free_queue, links); - npool->total_allocated++; - npool->free_ctl_io--; - mtx_unlock(&ctl_softc->pool_lock); - return (io); - } else - ctl_pool_release(npool); - } - - /* Drop the spinlock before we malloc */ - mtx_unlock(&ctl_softc->pool_lock); + io = uma_zalloc(pool->zone, M_WAITOK); +#else + io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); +#endif + if (io != NULL) + io->io_hdr.pool = pool_ref; + return (io); +} - /* - * The emergency pool (if it exists) didn't have one, so try an - * atomic (i.e. nonblocking) malloc and see if we get lucky. - */ - io = (union ctl_io *)malloc(sizeof(*io), M_CTLIO, M_NOWAIT); - if (io != NULL) { - /* - * If the emergency pool exists but is empty, add this - * ctl_io to its list when it gets freed. - */ - if (emergency_pool != NULL) { - mtx_lock(&ctl_softc->pool_lock); - if (ctl_pool_acquire(emergency_pool) == 0) { - io->io_hdr.pool = emergency_pool; - emergency_pool->total_ctl_io++; - /* - * Need to bump this, otherwise - * total_allocated and total_freed won't - * match when we no longer have anything - * outstanding. - */ - emergency_pool->total_allocated++; - } - mtx_unlock(&ctl_softc->pool_lock); - } else - io->io_hdr.pool = NULL; - } +union ctl_io * +ctl_alloc_io_nowait(void *pool_ref) +{ + union ctl_io *io; +#ifdef IO_POOLS + struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; + io = uma_zalloc(pool->zone, M_NOWAIT); +#else + io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); +#endif + if (io != NULL) + io->io_hdr.pool = pool_ref; return (io); } void ctl_free_io(union ctl_io *io) { +#ifdef IO_POOLS + struct ctl_io_pool *pool; +#endif + if (io == NULL) return; - /* - * If this ctl_io has a pool, return it to that pool. - */ - if (io->io_hdr.pool != NULL) { - struct ctl_io_pool *pool; - - pool = (struct ctl_io_pool *)io->io_hdr.pool; - mtx_lock(&pool->ctl_softc->pool_lock); - io->io_hdr.io_type = 0xff; - STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links); - pool->total_freed++; - pool->free_ctl_io++; - ctl_pool_release(pool); - mtx_unlock(&pool->ctl_softc->pool_lock); - } else { - /* - * Otherwise, just free it. We probably malloced it and - * the emergency pool wasn't available. - */ - free(io, M_CTLIO); - } - +#ifdef IO_POOLS + pool = (struct ctl_io_pool *)io->io_hdr.pool; + uma_zfree(pool->zone, io); +#else + uma_zfree((uma_zone_t)io->io_hdr.pool, io); +#endif } void @@ -4022,9 +3789,7 @@ ctl_zero_io(union ctl_io *io) * May need to preserve linked list pointers at some point too. */ pool_ref = io->io_hdr.pool; - memset(io, 0, sizeof(*io)); - io->io_hdr.pool = pool_ref; } @@ -5657,16 +5422,10 @@ ctl_start_stop(struct ctl_scsiio *ctsio) union ctl_io *new_io; new_io = ctl_alloc_io(ctsio->io_hdr.pool); - if (new_io == NULL) { - ctl_set_busy(ctsio); - ctl_done((union ctl_io *)ctsio); - } else { - ctl_copy_io((union ctl_io *)ctsio, - new_io); - retval = lun->backend->config_write(new_io); - ctl_set_success(ctsio); - ctl_done((union ctl_io *)ctsio); - } + ctl_copy_io((union ctl_io *)ctsio, new_io); + retval = lun->backend->config_write(new_io); + ctl_set_success(ctsio); + ctl_done((union ctl_io *)ctsio); } else { retval = lun->backend->config_write( (union ctl_io *)ctsio); Modified: head/sys/cam/ctl/ctl_frontend.c ============================================================================== --- head/sys/cam/ctl/ctl_frontend.c Mon Nov 24 11:16:52 2014 (r274961) +++ head/sys/cam/ctl/ctl_frontend.c Mon Nov 24 11:37:27 2014 (r274962) @@ -138,7 +138,7 @@ ctl_frontend_find(char *frontend_name) int ctl_port_register(struct ctl_port *port) { - struct ctl_io_pool *pool; + void *pool; int port_num; int retval; @@ -176,7 +176,7 @@ ctl_port_register(struct ctl_port *port) * pending sense queue on the next command, whether or not it is * a REQUEST SENSE. */ - retval = ctl_pool_create(control_softc, CTL_POOL_FETD, + retval = ctl_pool_create(control_softc, port->port_name, port->num_requested_ctl_io + 20, &pool); if (retval != 0) { free(port->wwpn_iid, M_CTL); Modified: head/sys/cam/ctl/ctl_frontend_cam_sim.c ============================================================================== --- head/sys/cam/ctl/ctl_frontend_cam_sim.c Mon Nov 24 11:16:52 2014 (r274961) +++ head/sys/cam/ctl/ctl_frontend_cam_sim.c Mon Nov 24 11:37:27 2014 (r274962) @@ -545,7 +545,7 @@ cfcs_action(struct cam_sim *sim, union c return; } - io = ctl_alloc_io(softc->port.ctl_pool_ref); + io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { printf("%s: can't allocate ctl_io\n", __func__); ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; @@ -642,7 +642,7 @@ cfcs_action(struct cam_sim *sim, union c return; } - io = ctl_alloc_io(softc->port.ctl_pool_ref); + io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); @@ -737,7 +737,7 @@ cfcs_action(struct cam_sim *sim, union c return; } - io = ctl_alloc_io(softc->port.ctl_pool_ref); + io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); Modified: head/sys/cam/ctl/ctl_frontend_internal.c ============================================================================== --- head/sys/cam/ctl/ctl_frontend_internal.c Mon Nov 24 11:16:52 2014 (r274961) +++ head/sys/cam/ctl/ctl_frontend_internal.c Mon Nov 24 11:37:27 2014 (r274962) @@ -761,11 +761,6 @@ cfi_done(union ctl_io *io) struct cfi_lun_io *new_lun_io; new_io = ctl_alloc_io(softc->port.ctl_pool_ref); - if (new_io == NULL) { - printf("%s: unable to allocate ctl_io for " - "error recovery\n", __func__); - goto done; - } ctl_zero_io(new_io); new_io->io_hdr.io_type = CTL_IO_TASK; @@ -967,12 +962,6 @@ cfi_lun_probe(struct cfi_lun *lun, int h union ctl_io *io; io = ctl_alloc_io(lun->softc->port.ctl_pool_ref); - if (io == NULL) { - printf("%s: unable to alloc ctl_io for target %ju " - "lun %d probe\n", __func__, - (uintmax_t)lun->target_id.id, lun->lun_id); - return; - } ctl_scsi_inquiry(io, /*data_ptr*/(uint8_t *)&lun->inq_data, /*data_len*/ sizeof(lun->inq_data), @@ -1014,12 +1003,6 @@ cfi_lun_probe(struct cfi_lun *lun, int h union ctl_io *io; io = ctl_alloc_io(lun->softc->port.ctl_pool_ref); - if (io == NULL) { - printf("%s: unable to alloc ctl_io for target %ju " - "lun %d probe\n", __func__, - (uintmax_t)lun->target_id.id, lun->lun_id); - return; - } dataptr = malloc(sizeof(struct scsi_read_capacity_data_long), M_CTL_CFI, M_NOWAIT); @@ -1394,7 +1377,7 @@ cfi_action(struct cfi_metatask *metatask if (SID_TYPE(&lun->inq_data) != T_DIRECT) continue; da_luns++; - io = ctl_alloc_io(softc->port.ctl_pool_ref); + io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io != NULL) { ios_allocated++; STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr, @@ -1548,7 +1531,7 @@ cfi_action(struct cfi_metatask *metatask } - io = ctl_alloc_io(softc->port.ctl_pool_ref); + io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { metatask->status = CFI_MT_ERROR; metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM; Modified: head/sys/cam/ctl/ctl_frontend_iscsi.c ============================================================================== --- head/sys/cam/ctl/ctl_frontend_iscsi.c Mon Nov 24 11:16:52 2014 (r274961) +++ head/sys/cam/ctl/ctl_frontend_iscsi.c Mon Nov 24 11:37:27 2014 (r274962) @@ -542,13 +542,6 @@ cfiscsi_pdu_handle_scsi_command(struct i return; } io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); - if (io == NULL) { - CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io; " - "dropping connection"); - icl_pdu_free(request); - cfiscsi_session_terminate(cs); - return; - } ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request; io->io_hdr.io_type = CTL_IO_SCSI; @@ -606,13 +599,6 @@ cfiscsi_pdu_handle_task_request(struct i cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); - if (io == NULL) { - CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io;" - "dropping connection"); - icl_pdu_free(request); - cfiscsi_session_terminate(cs); - return; - } ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request; io->io_hdr.io_type = CTL_IO_TASK; @@ -1063,10 +1049,6 @@ cfiscsi_session_terminate_tasks(struct c if (cs->cs_target == NULL) return; /* No target yet, so nothing to do. */ io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); - if (io == NULL) { - CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io"); - return; - } ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = cs; io->io_hdr.io_type = CTL_IO_TASK; Modified: head/sys/cam/ctl/ctl_io.h ============================================================================== --- head/sys/cam/ctl/ctl_io.h Mon Nov 24 11:16:52 2014 (r274961) +++ head/sys/cam/ctl/ctl_io.h Mon Nov 24 11:37:27 2014 (r274962) @@ -511,6 +511,7 @@ union ctl_io { #ifdef _KERNEL union ctl_io *ctl_alloc_io(void *pool_ref); +union ctl_io *ctl_alloc_io_nowait(void *pool_ref); void ctl_free_io(union ctl_io *io); void ctl_zero_io(union ctl_io *io); void ctl_copy_io(union ctl_io *src, union ctl_io *dest); Modified: head/sys/cam/ctl/ctl_private.h ============================================================================== --- head/sys/cam/ctl/ctl_private.h Mon Nov 24 11:16:52 2014 (r274961) +++ head/sys/cam/ctl/ctl_private.h Mon Nov 24 11:37:27 2014 (r274962) @@ -71,34 +71,13 @@ struct ctl_fe_ioctl_params { ctl_fe_ioctl_state state; }; -#define CTL_POOL_ENTRIES_INTERNAL 200 -#define CTL_POOL_ENTRIES_EMERGENCY 300 #define CTL_POOL_ENTRIES_OTHER_SC 200 -typedef enum { - CTL_POOL_INTERNAL, - CTL_POOL_FETD, - CTL_POOL_EMERGENCY, - CTL_POOL_4OTHERSC -} ctl_pool_type; - -typedef enum { - CTL_POOL_FLAG_NONE = 0x00, - CTL_POOL_FLAG_INVALID = 0x01 -} ctl_pool_flags; - struct ctl_io_pool { - ctl_pool_type type; - ctl_pool_flags flags; + char name[64]; uint32_t id; struct ctl_softc *ctl_softc; - uint32_t refcount; - uint64_t total_allocated; - uint64_t total_freed; - int32_t total_ctl_io; - int32_t free_ctl_io; - STAILQ_HEAD(, ctl_io_hdr) free_queue; - STAILQ_ENTRY(ctl_io_pool) links; + struct uma_zone *zone; }; typedef enum { @@ -475,9 +454,7 @@ struct ctl_softc { struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct ctl_ioctl_info ioctl_info; - struct ctl_io_pool *internal_pool; - struct ctl_io_pool *emergency_pool; - struct ctl_io_pool *othersc_pool; + void *othersc_pool; struct proc *ctl_proc; int targ_online; uint32_t ctl_lun_mask[(CTL_MAX_LUNS + 31) / 32]; @@ -492,10 +469,8 @@ struct ctl_softc { struct ctl_port *ctl_ports[CTL_MAX_PORTS]; uint32_t num_backends; STAILQ_HEAD(, ctl_backend_driver) be_list; - struct mtx pool_lock; - uint32_t num_pools; + struct uma_zone *io_zone; uint32_t cur_pool_id; - STAILQ_HEAD(, ctl_io_pool) io_pools; struct ctl_thread threads[CTL_MAX_THREADS]; TAILQ_HEAD(tpc_tokens, tpc_token) tpc_tokens; struct callout tpc_timeout; @@ -508,8 +483,8 @@ extern const struct ctl_cmd_entry ctl_cm uint32_t ctl_get_initindex(struct ctl_nexus *nexus); uint32_t ctl_get_resindex(struct ctl_nexus *nexus); uint32_t ctl_port_idx(int port_num); -int ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, - uint32_t total_ctl_io, struct ctl_io_pool **npool); +int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, + uint32_t total_ctl_io, void **npool); void ctl_pool_free(struct ctl_io_pool *pool); int ctl_scsi_release(struct ctl_scsiio *ctsio); int ctl_scsi_reserve(struct ctl_scsiio *ctsio); Modified: head/sys/cam/ctl/ctl_tpc.c ============================================================================== --- head/sys/cam/ctl/ctl_tpc.c Mon Nov 24 11:16:52 2014 (r274961) +++ head/sys/cam/ctl/ctl_tpc.c Mon Nov 24 11:37:27 2014 (r274962) @@ -812,7 +812,6 @@ tpc_process_b2b(struct tpc_list *list) uint32_t srcblock, dstblock; if (list->stage == 1) { -complete: while ((tior = TAILQ_FIRST(&list->allio)) != NULL) { TAILQ_REMOVE(&list->allio, tior, links); ctl_free_io(tior->io); @@ -886,10 +885,6 @@ complete: tior->list = list; TAILQ_INSERT_TAIL(&list->allio, tior, links); tior->io = tpcl_alloc_io(); - if (tior->io == NULL) { - list->error = 1; - goto complete; - } ctl_scsi_read_write(tior->io, /*data_ptr*/ &list->buf[donebytes], /*data_len*/ roundbytes, @@ -909,10 +904,6 @@ complete: tiow->list = list; TAILQ_INSERT_TAIL(&list->allio, tiow, links); tiow->io = tpcl_alloc_io(); - if (tiow->io == NULL) { - list->error = 1; - goto complete; - } ctl_scsi_read_write(tiow->io, /*data_ptr*/ &list->buf[donebytes], /*data_len*/ roundbytes, @@ -951,7 +942,6 @@ tpc_process_verify(struct tpc_list *list uint64_t sl; if (list->stage == 1) { -complete: while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { TAILQ_REMOVE(&list->allio, tio, links); ctl_free_io(tio->io); @@ -990,10 +980,6 @@ complete: tio->list = list; TAILQ_INSERT_TAIL(&list->allio, tio, links); tio->io = tpcl_alloc_io(); - if (tio->io == NULL) { - list->error = 1; - goto complete; - } ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); tio->io->io_hdr.retries = 3; tio->lun = sl; @@ -1013,7 +999,6 @@ tpc_process_register_key(struct tpc_list int datalen; if (list->stage == 1) { -complete: while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { TAILQ_REMOVE(&list->allio, tio, links); ctl_free_io(tio->io); @@ -1050,10 +1035,6 @@ complete: tio->list = list; TAILQ_INSERT_TAIL(&list->allio, tio, links); tio->io = tpcl_alloc_io(); - if (tio->io == NULL) { - list->error = 1; - goto complete; - } datalen = sizeof(struct scsi_per_res_out_parms); list->buf = malloc(datalen, M_CTL, M_WAITOK); ctl_scsi_persistent_res_out(tio->io, @@ -1112,7 +1093,6 @@ tpc_process_wut(struct tpc_list *list) uint32_t srcblock, dstblock; if (list->stage > 0) { -complete: /* Cleanup after previous rounds. */ while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { TAILQ_REMOVE(&list->allio, tio, links); @@ -1184,10 +1164,6 @@ complete: tior->list = list; TAILQ_INSERT_TAIL(&list->allio, tior, links); tior->io = tpcl_alloc_io(); - if (tior->io == NULL) { - list->error = 1; - goto complete; - } ctl_scsi_read_write(tior->io, /*data_ptr*/ &list->buf[donebytes], /*data_len*/ roundbytes, @@ -1207,10 +1183,6 @@ complete: tiow->list = list; TAILQ_INSERT_TAIL(&list->allio, tiow, links); tiow->io = tpcl_alloc_io(); - if (tiow->io == NULL) { - list->error = 1; - goto complete; - } ctl_scsi_read_write(tiow->io, /*data_ptr*/ &list->buf[donebytes], /*data_len*/ roundbytes, @@ -1289,10 +1261,6 @@ complete: tiow->list = list; TAILQ_INSERT_TAIL(&list->allio, tiow, links); tiow->io = tpcl_alloc_io(); - if (tiow->io == NULL) { - list->error = 1; - goto complete; - } ctl_scsi_write_same(tiow->io, /*data_ptr*/ list->buf, /*data_len*/ dstblock, Modified: head/sys/cam/ctl/scsi_ctl.c ============================================================================== --- head/sys/cam/ctl/scsi_ctl.c Mon Nov 24 11:16:52 2014 (r274961) +++ head/sys/cam/ctl/scsi_ctl.c Mon Nov 24 11:37:27 2014 (r274962) @@ -72,10 +72,6 @@ __FBSDID("$FreeBSD$"); #include <cam/ctl/ctl_util.h> #include <cam/ctl/ctl_error.h> -typedef enum { - CTLFE_CCB_DEFAULT = 0x00 -} ctlfe_ccb_types; - struct ctlfe_softc { struct ctl_port port; path_id_t path_id; @@ -189,9 +185,7 @@ SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_ &ctlfe_dma_enabled, 0, "DMA enabled"); MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); -#define ccb_type ppriv_field0 -/* This is only used in the ATIO */ -#define io_ptr ppriv_ptr1 +#define io_ptr ppriv_ptr0 /* This is only used in the CTIO */ #define ccb_atio ppriv_ptr1 @@ -546,6 +540,7 @@ ctlferegister(struct cam_periph *periph, for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { union ccb *new_ccb; + union ctl_io *new_io; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); @@ -553,6 +548,14 @@ ctlferegister(struct cam_periph *periph, status = CAM_RESRC_UNAVAIL; break; } + new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); + if (new_io == NULL) { + free(new_ccb, M_CTLFE); + status = CAM_RESRC_UNAVAIL; + break; + } + new_ccb->ccb_h.io_ptr = new_io; + xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; new_ccb->ccb_h.cbfcnp = ctlfedone; @@ -561,6 +564,7 @@ ctlferegister(struct cam_periph *periph, softc->atios_sent++; status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { + ctl_free_io(new_io); free(new_ccb, M_CTLFE); break; } @@ -581,6 +585,7 @@ ctlferegister(struct cam_periph *periph, for (i = 0; i < CTLFE_IN_PER_LUN; i++) { union ccb *new_ccb; + union ctl_io *new_io; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); @@ -588,6 +593,13 @@ ctlferegister(struct cam_periph *periph, status = CAM_RESRC_UNAVAIL; break; } + new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); + if (new_io == NULL) { + free(new_ccb, M_CTLFE); + status = CAM_RESRC_UNAVAIL; + break; + } + new_ccb->ccb_h.io_ptr = new_io; xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; @@ -766,8 +778,6 @@ ctlfestart(struct cam_periph *periph, un softc->ccbs_alloced++; - start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT; - ccb_h = TAILQ_FIRST(&softc->work_queue); if (ccb_h == NULL) { softc->ccbs_freed++; @@ -812,7 +822,6 @@ ctlfestart(struct cam_periph *periph, un xpt_print(periph->path, "%s: aborted " "command 0x%04x discarded\n", __func__, io->scsiio.tag_num); - ctl_free_io(io); /* * For a wildcard attachment, commands can * come in with a specific target/lun. Reset @@ -1038,6 +1047,7 @@ ctlfe_free_ccb(struct cam_periph *periph break; } + ctl_free_io(ccb->ccb_h.io_ptr); free(ccb, M_CTLFE); KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: " @@ -1139,8 +1149,8 @@ ctlfedone(struct cam_periph *periph, uni KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, ("CCB in ctlfedone() without CAM_UNLOCKED flag")); #ifdef CTLFE_DEBUG - printf("%s: entered, func_code = %#x, type = %#lx\n", __func__, - done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type); + printf("%s: entered, func_code = %#x\n", __func__, + done_ccb->ccb_h.func_code); *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201411241137.sAOBbSeF097180>