Date: Tue, 4 Dec 2007 20:53:12 GMT From: Steve Wise <swise@FreeBSD.org> To: Perforce Change Reviews <perforce@FreeBSD.org> Subject: PERFORCE change 130191 for review Message-ID: <200712042053.lB4KrC1p012183@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=130191 Change 130191 by swise@swise:vic10:iwarp on 2007/12/04 20:53:00 fixed spin lock usage bugs in rdma core. Affected files ... .. //depot/projects/iwarp/sys/contrib/rdma/rdma_addr.c#4 edit .. //depot/projects/iwarp/sys/contrib/rdma/rdma_cma.c#8 edit .. //depot/projects/iwarp/sys/contrib/rdma/rdma_device.c#5 edit .. //depot/projects/iwarp/sys/contrib/rdma/rdma_iwcm.c#5 edit Differences ... ==== //depot/projects/iwarp/sys/contrib/rdma/rdma_addr.c#4 (text+ko) ==== @@ -94,7 +94,7 @@ void rdma_addr_register_client(struct rdma_addr_client *client) { - mtx_init(&client->lock, "rdma_addr client lock", NULL, MTX_DEF); + mtx_init(&client->lock, "rdma_addr client lock", NULL, MTX_DUPOK|MTX_DEF); cv_init(&client->comp, "rdma_addr cv"); client->refcount = 1; } ==== //depot/projects/iwarp/sys/contrib/rdma/rdma_cma.c#8 (text+ko) ==== @@ -216,9 +216,9 @@ { int ret; - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); ret = (id_priv->state == comp); - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); return ret; } @@ -227,10 +227,10 @@ { int ret; - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); if ((ret = (id_priv->state == comp))) id_priv->state = exch; - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); return ret; } @@ -239,10 +239,10 @@ { enum cma_state old; - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); old = id_priv->state; id_priv->state = exch; - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); return old; } @@ -279,9 +279,9 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { - mtx_lock(&cma_dev->lock); + mtx_lock_spin(&cma_dev->lock); cma_dev->refcount++; - mtx_unlock(&cma_dev->lock); + mtx_unlock_spin(&cma_dev->lock); id_priv->cma_dev = cma_dev; id_priv->id.device = cma_dev->device; LIST_INSERT_HEAD(&cma_dev->id_list, id_priv, list); @@ -289,10 +289,10 @@ static inline void cma_deref_dev(struct cma_device *cma_dev) { - mtx_lock(&cma_dev->lock); + mtx_lock_spin(&cma_dev->lock); if (--cma_dev->refcount == 0) cv_broadcast(&cma_dev->comp); - mtx_unlock(&cma_dev->lock); + mtx_unlock_spin(&cma_dev->lock); } static void cma_detach_from_dev(struct rdma_id_private *id_priv) @@ -366,11 +366,11 @@ static void cma_deref_id(struct rdma_id_private *id_priv) { - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); if (--id_priv->refcount == 0) { cv_broadcast(&id_priv->comp); } - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); } static int cma_disable_remove(struct rdma_id_private *id_priv, @@ -378,22 +378,22 @@ { int ret; - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); if (id_priv->state == state) { id_priv->dev_remove++; ret = 0; } else ret = EINVAL; - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); return ret; } static void cma_enable_remove(struct rdma_id_private *id_priv) { - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); if (--id_priv->dev_remove == 0) cv_broadcast(&id_priv->wait_remove); - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); } static int cma_has_cm_dev(struct rdma_id_private *id_priv) @@ -415,7 +415,7 @@ id_priv->id.context = context; id_priv->id.event_handler = event_handler; id_priv->id.ps = ps; - mtx_init(&id_priv->lock, "rdma_cm_id_priv", NULL, MTX_DUPOK|MTX_DEF); + mtx_init(&id_priv->lock, "rdma_cm_id_priv", NULL, MTX_DUPOK|MTX_SPIN); cv_init(&id_priv->comp, "rdma_cm_id_priv"); id_priv->refcount = 1; cv_init(&id_priv->wait_remove, "id priv wait remove"); @@ -785,10 +785,10 @@ LIST_REMOVE(id_priv, listen_entry); cma_deref_id(id_priv); - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); if (id_priv->refcount) cv_wait(&id_priv->comp, &id_priv->lock); - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); free(id_priv, M_DEVBUF); } @@ -892,11 +892,11 @@ mtx_unlock(&lock); cma_release_port(id_priv); cma_deref_id(id_priv); - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); BUG_ON(id_priv->refcount < 0); if (id_priv->refcount) cv_wait(&id_priv->comp, &id_priv->lock); - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); free(id_priv->id.route.path_rec, M_DEVBUF); free(id_priv, M_DEVBUF); } @@ -1150,9 +1150,9 @@ goto out; } - mtx_lock(&conn_id->lock); + mtx_lock_spin(&conn_id->lock); conn_id->dev_remove++; - mtx_unlock(&conn_id->lock); + mtx_unlock_spin(&conn_id->lock); mtx_lock(&lock); ret = cma_acquire_dev(conn_id); mtx_unlock(&lock); @@ -1321,9 +1321,9 @@ goto out; } conn_id = container_of(new_cm_id, struct rdma_id_private, id); - mtx_lock(&conn_id->lock); + mtx_lock_spin(&conn_id->lock); ++conn_id->dev_remove; - mtx_unlock(&conn_id->lock); + mtx_unlock_spin(&conn_id->lock); conn_id->state = CMA_CONNECT; ifa = ifa_ifwithaddr((struct sockaddr *)&iw_event->local_addr); @@ -1591,9 +1591,9 @@ struct rdma_id_private *id_priv = work->id; int destroy = 0; - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); ++id_priv->dev_remove; - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) goto out; @@ -1698,9 +1698,9 @@ if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) return (EINVAL); - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); id_priv->refcount++; - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); #ifdef IB_SUPPORTED switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: @@ -1775,9 +1775,9 @@ struct rdma_cm_event event; memset(&event, 0, sizeof event); - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); ++id_priv->dev_remove; - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); /* * Grab mutex to block rdma_destroy_id() from removing the device while @@ -1881,9 +1881,9 @@ if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) return (EINVAL); - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); id_priv->refcount++; - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr)); if (cma_any_addr(dst_addr)) ret = cma_resolve_loopback(id_priv); @@ -2740,9 +2740,9 @@ mc->context = context; mc->id_priv = id_priv; - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); LIST_INSERT_HEAD(&id_priv->mc_list, mc, list); - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: @@ -2754,9 +2754,9 @@ } if (ret) { - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); list_del(&mc->list); - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); free(mc, M_DEVBUF); } return ret; @@ -2769,11 +2769,11 @@ struct cma_multicast *mc; id_priv = container_of(id, struct rdma_id_private, id); - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); LIST_FOREACH(mc, &id_priv->mc_list, list) { if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) { list_del(&mc->list); - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); if (id->qp) ib_detach_mcast(id->qp, @@ -2784,7 +2784,7 @@ return; } } - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); } EXPORT_SYMBOL(rdma_leave_multicast); #endif @@ -2794,13 +2794,14 @@ struct cma_device *cma_dev; struct rdma_id_private *id_priv; - cma_dev = malloc(sizeof *cma_dev, M_DEVBUF, M_WAITOK); + cma_dev = malloc(sizeof *cma_dev, M_DEVBUF, M_WAITOK|M_ZERO); if (!cma_dev) return; cma_dev->device = device; cv_init(&cma_dev->comp, "cma_device"); + mtx_init(&cma_dev->lock, "cma_device", NULL, MTX_DUPOK|MTX_SPIN); cma_dev->refcount = 1; LIST_INIT(&cma_dev->id_list); ib_set_client_data(device, &cma_client, cma_dev); @@ -2823,11 +2824,11 @@ return 0; cma_cancel_operation(id_priv, state); - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); BUG_ON(id_priv->dev_remove < 0); if (id_priv->dev_remove) cv_wait(&id_priv->wait_remove, &id_priv->lock); - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); /* Check for destruction from another callback. */ if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) @@ -2853,9 +2854,9 @@ } LIST_REMOVE(id_priv, list); - mtx_lock(&id_priv->lock); + mtx_lock_spin(&id_priv->lock); id_priv->refcount++; - mtx_unlock(&id_priv->lock); + mtx_unlock_spin(&id_priv->lock); mtx_unlock(&lock); ret = cma_remove_id_dev(id_priv); @@ -2868,11 +2869,11 @@ mtx_unlock(&lock); cma_deref_dev(cma_dev); - mtx_lock(&cma_dev->lock); + mtx_lock_spin(&cma_dev->lock); BUG_ON(cma_dev->refcount < 0); if (cma_dev->refcount) cv_wait(&cma_dev->comp, &cma_dev->lock); - mtx_unlock(&cma_dev->lock); + mtx_unlock_spin(&cma_dev->lock); } static void cma_remove_one(struct ib_device *device) @@ -2897,7 +2898,7 @@ LIST_INIT(&listen_any_list); TAILQ_INIT(&dev_list); - mtx_init(&lock, "cma_device", NULL, MTX_DEF); + mtx_init(&lock, "cma_device list", NULL, MTX_DEF); arc4rand(&next_port, sizeof next_port, 0); next_port = ((unsigned int) next_port % ==== //depot/projects/iwarp/sys/contrib/rdma/rdma_device.c#5 (text+ko) ==== @@ -731,7 +731,7 @@ log(LOG_WARNING, "Couldn't create InfiniBand device class\n"); #endif - mtx_init(&device_mutex, "rdma_device mutex", NULL, MTX_DUPOK|MTX_DEF); + mtx_init(&device_mutex, "rdma_device mutex", NULL, MTX_DEF); TAILQ_INIT(&client_list); TAILQ_INIT(&device_list); ret = ib_cache_setup(); ==== //depot/projects/iwarp/sys/contrib/rdma/rdma_iwcm.c#5 (text+ko) ==== @@ -194,15 +194,15 @@ */ static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) { - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); BUG_ON(atomic_read(&cm_id_priv->refcount)==0); if (atomic_dec_and_test(&cm_id_priv->refcount)) { BUG_ON(!TAILQ_EMPTY(&cm_id_priv->work_list)); cv_broadcast(&cm_id_priv->destroy_comp); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return 1; } - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return 0; } @@ -211,9 +211,9 @@ { struct iwcm_id_private *cm_id_priv; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); atomic_inc(&cm_id_priv->refcount); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); } static void rem_ref(struct iw_cm_id *cm_id) @@ -303,7 +303,7 @@ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); /* Wait if we're currently in a connect or accept downcall */ - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)) cv_wait(&cm_id_priv->connect_wait, &cm_id_priv->lock); @@ -336,7 +336,7 @@ default: BUG(); } - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); if (qp) { if (abrupt) @@ -371,24 +371,24 @@ * Wait if we're currently in a connect or accept downcall. A * listening endpoint should never block here. */ - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)) cv_wait(&cm_id_priv->connect_wait, &cm_id_priv->lock); switch (cm_id_priv->state) { case IW_CM_STATE_LISTEN: cm_id_priv->state = IW_CM_STATE_DESTROYING; - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); /* destroy the listening endpoint */ ret = cm_id->device->iwcm->destroy_listen(cm_id); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); break; case IW_CM_STATE_ESTABLISHED: cm_id_priv->state = IW_CM_STATE_DESTROYING; - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); /* Abrupt close of the connection */ (void)iwcm_modify_qp_err(cm_id_priv->qp); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); break; case IW_CM_STATE_IDLE: case IW_CM_STATE_CLOSING: @@ -413,7 +413,7 @@ cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); cm_id_priv->qp = NULL; } - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); (void)iwcm_deref_id(cm_id_priv); } @@ -433,10 +433,10 @@ destroy_cm_id(cm_id); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (atomic_read(&cm_id_priv->refcount)) cv_wait(&cm_id_priv->destroy_comp, &cm_id_priv->lock); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); free_cm_id(cm_id_priv); } @@ -459,20 +459,20 @@ if (ret) return ret; - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); switch (cm_id_priv->state) { case IW_CM_STATE_IDLE: cm_id_priv->state = IW_CM_STATE_LISTEN; - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); ret = cm_id->device->iwcm->create_listen(cm_id, backlog); if (ret) cm_id_priv->state = IW_CM_STATE_IDLE; - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); break; default: ret = EINVAL; } - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return ret; } @@ -493,23 +493,23 @@ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); cv_broadcast(&cm_id_priv->connect_wait); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return (EINVAL); } cm_id_priv->state = IW_CM_STATE_IDLE; - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); ret = cm_id->device->iwcm->reject(cm_id, private_data, private_data_len); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); cv_broadcast(&cm_id_priv->connect_wait); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return ret; } @@ -532,37 +532,37 @@ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); cv_broadcast(&cm_id_priv->connect_wait); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return (EINVAL); } /* Get the ib_qp given the QPN */ qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); if (!qp) { - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return (EINVAL); } cm_id->device->iwcm->add_ref(qp); cm_id_priv->qp = qp; - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); ret = cm_id->device->iwcm->accept(cm_id, iw_param); if (ret) { /* An error on accept precludes provider events */ BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); cm_id_priv->state = IW_CM_STATE_IDLE; - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (cm_id_priv->qp) { cm_id->device->iwcm->rem_ref(qp); cm_id_priv->qp = NULL; } clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); cv_broadcast(&cm_id_priv->connect_wait); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); } return ret; @@ -589,12 +589,12 @@ return ret; set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (cm_id_priv->state != IW_CM_STATE_IDLE) { clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); cv_broadcast(&cm_id_priv->connect_wait); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return (EINVAL); } @@ -602,17 +602,17 @@ /* Get the ib_qp given the QPN */ qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); if (!qp) { - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return (EINVAL); } cm_id->device->iwcm->add_ref(qp); cm_id_priv->qp = qp; cm_id_priv->state = IW_CM_STATE_CONN_SENT; - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); ret = cm_id->device->iwcm->connect(cm_id, iw_param); if (ret) { - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (cm_id_priv->qp) { cm_id->device->iwcm->rem_ref(qp); cm_id_priv->qp = NULL; @@ -621,7 +621,7 @@ cm_id_priv->state = IW_CM_STATE_IDLE; clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); cv_broadcast(&cm_id_priv->connect_wait); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); } @@ -661,12 +661,12 @@ * We could be destroying the listening id. If so, ignore this * upcall. */ - mtx_lock(&listen_id_priv->lock); + mtx_lock_spin(&listen_id_priv->lock); if (listen_id_priv->state != IW_CM_STATE_LISTEN) { - mtx_unlock(&listen_id_priv->lock); + mtx_unlock_spin(&listen_id_priv->lock); goto out; } - mtx_unlock(&listen_id_priv->lock); + mtx_unlock_spin(&listen_id_priv->lock); cm_id = iw_create_cm_id(listen_id_priv->id.device, listen_id_priv->id.cm_handler, @@ -721,7 +721,7 @@ { int ret; - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); /* * We clear the CONNECT_WAIT bit here to allow the callback @@ -733,7 +733,7 @@ cm_id_priv->state = IW_CM_STATE_ESTABLISHED; ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); cv_broadcast(&cm_id_priv->connect_wait); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return ret; } @@ -750,7 +750,7 @@ { int ret; - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); /* * Clear the connect wait bit so a callback function calling * iw_cm_disconnect will not wait and deadlock this thread @@ -767,16 +767,16 @@ cm_id_priv->qp = NULL; cm_id_priv->state = IW_CM_STATE_IDLE; } - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (iw_event->private_data_len) free(iw_event->private_data, M_DEVBUF); /* Wake up waiters on connect complete */ cv_broadcast(&cm_id_priv->connect_wait); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return ret; } @@ -790,10 +790,10 @@ struct iw_cm_event *iw_event) { - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) cm_id_priv->state = IW_CM_STATE_CLOSING; - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); } /* @@ -811,7 +811,7 @@ struct iw_cm_event *iw_event) { int ret = 0; - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); if (cm_id_priv->qp) { cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); @@ -821,16 +821,16 @@ case IW_CM_STATE_ESTABLISHED: case IW_CM_STATE_CLOSING: cm_id_priv->state = IW_CM_STATE_IDLE; - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); break; case IW_CM_STATE_DESTROYING: break; default: BUG(); } - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return ret; } @@ -880,7 +880,7 @@ int empty; int ret = 0; - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); empty = TAILQ_EMPTY(&cm_id_priv->work_list); while (!empty) { work = TAILQ_FIRST(&cm_id_priv->work_list); @@ -888,7 +888,7 @@ empty = TAILQ_EMPTY(&cm_id_priv->work_list); levent = work->event; put_work(work); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); ret = process_event(cm_id_priv, &levent); if (ret) { @@ -904,9 +904,9 @@ } return; } - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); } - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); } /* @@ -933,7 +933,7 @@ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); work = get_work(cm_id_priv); if (!work) { ret = ENOMEM; @@ -954,16 +954,16 @@ } } - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); atomic_inc(&cm_id_priv->refcount); - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); if (TAILQ_EMPTY(&cm_id_priv->work_list)) { TAILQ_INSERT_TAIL(&cm_id_priv->work_list, work, list); taskqueue_enqueue(iwcm_wq, &work->task); } else TAILQ_INSERT_TAIL(&cm_id_priv->work_list, work, list); out: - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return ret; } @@ -973,7 +973,7 @@ { int ret; - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); switch (cm_id_priv->state) { case IW_CM_STATE_IDLE: case IW_CM_STATE_CONN_SENT: @@ -989,7 +989,7 @@ ret = EINVAL; break; } - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return ret; } @@ -999,7 +999,7 @@ { int ret; - mtx_lock(&cm_id_priv->lock); + mtx_lock_spin(&cm_id_priv->lock); switch (cm_id_priv->state) { case IW_CM_STATE_IDLE: case IW_CM_STATE_CONN_SENT: @@ -1012,7 +1012,7 @@ ret = EINVAL; break; } - mtx_unlock(&cm_id_priv->lock); + mtx_unlock_spin(&cm_id_priv->lock); return ret; }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200712042053.lB4KrC1p012183>
