Date: Thu, 4 Jun 2009 14:49:27 +0000 (UTC) From: Rick Macklem <rmacklem@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r193437 - head/sys/rpc Message-ID: <200906041449.n54EnRNj094949@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: rmacklem Date: Thu Jun 4 14:49:27 2009 New Revision: 193437 URL: http://svn.freebsd.org/changeset/base/193437 Log: Fix upcall races in the client side krpc. For the client side upcall, holding SOCKBUF_LOCK() isn't sufficient to guarantee that there is no upcall in progress, since SOCKBUF_LOCK() is released/re-acquired in the upcall. An upcall reference counter was added to the upcall structure that is incremented at the beginning of the upcall and decremented at the end of the upcall. As such, a reference count == 0 when holding the SOCKBUF_LOCK() guarantees there is no upcall in progress. Add a function that is called just after soupcall_clear(), which waits until the reference count == 0. Also, move the mtx_destroy() down to after soupcall_clear(), so that the mutex is not destroyed before upcalls are done. Reviewed by: dfr, jhb Tested by: pho Approved by: kib (mentor) Modified: head/sys/rpc/clnt_dg.c head/sys/rpc/clnt_vc.c Modified: head/sys/rpc/clnt_dg.c ============================================================================== --- head/sys/rpc/clnt_dg.c Thu Jun 4 14:13:06 2009 (r193436) +++ head/sys/rpc/clnt_dg.c Thu Jun 4 14:49:27 2009 (r193437) @@ -123,8 +123,11 @@ struct cu_socket { struct mtx cs_lock; int cs_refs; /* Count of clients */ struct cu_request_list cs_pending; /* Requests awaiting replies */ + int cs_upcallrefs; /* Refcnt of upcalls in prog.*/ }; +static void clnt_dg_upcallsdone(struct socket *, struct cu_socket *); + /* * Private data kept per client handle */ @@ -291,6 +294,7 @@ recheck_socket: } mtx_init(&cs->cs_lock, "cs->cs_lock", NULL, MTX_DEF); cs->cs_refs = 1; + cs->cs_upcallrefs = 0; TAILQ_INIT(&cs->cs_pending); soupcall_set(so, SO_RCV, clnt_dg_soupcall, cs); } @@ -988,10 +992,12 @@ clnt_dg_destroy(CLIENT *cl) cs->cs_refs--; if (cs->cs_refs == 0) { - mtx_destroy(&cs->cs_lock); + mtx_unlock(&cs->cs_lock); SOCKBUF_LOCK(&cu->cu_socket->so_rcv); soupcall_clear(cu->cu_socket, SO_RCV); + clnt_dg_upcallsdone(cu->cu_socket, cs); SOCKBUF_UNLOCK(&cu->cu_socket->so_rcv); + mtx_destroy(&cs->cs_lock); mem_free(cs, sizeof(*cs)); lastsocketref = TRUE; } else { @@ -1036,6 +1042,7 @@ clnt_dg_soupcall(struct socket *so, void int error, rcvflag, foundreq; uint32_t xid; + cs->cs_upcallrefs++; uio.uio_resid = 1000000000; uio.uio_td = curthread; do { @@ -1111,6 +1118,24 @@ clnt_dg_soupcall(struct socket *so, void if (!foundreq) m_freem(m); } while (m); + cs->cs_upcallrefs--; + if (cs->cs_upcallrefs < 0) + panic("rpcdg upcall refcnt"); + if (cs->cs_upcallrefs == 0) + wakeup(&cs->cs_upcallrefs); return (SU_OK); } +/* + * Wait for all upcalls in progress to complete. + */ +static void +clnt_dg_upcallsdone(struct socket *so, struct cu_socket *cs) +{ + + SOCKBUF_LOCK_ASSERT(&so->so_rcv); + + while (cs->cs_upcallrefs > 0) + (void) msleep(&cs->cs_upcallrefs, SOCKBUF_MTX(&so->so_rcv), 0, + "rpcdgup", 0); +} Modified: head/sys/rpc/clnt_vc.c ============================================================================== --- head/sys/rpc/clnt_vc.c Thu Jun 4 14:13:06 2009 (r193436) +++ head/sys/rpc/clnt_vc.c Thu Jun 4 14:49:27 2009 (r193437) @@ -137,8 +137,11 @@ struct ct_data { size_t ct_record_resid; /* how much left of reply to read */ bool_t ct_record_eor; /* true if reading last fragment */ struct ct_request_list ct_pending; + int ct_upcallrefs; /* Ref cnt of upcalls in prog. */ }; +static void clnt_vc_upcallsdone(struct ct_data *); + static const char clnt_vc_errstr[] = "%s : %s"; static const char clnt_vc_str[] = "clnt_vc_create"; static const char clnt_read_vc_str[] = "read_vc"; @@ -184,6 +187,7 @@ clnt_vc_create( ct->ct_threads = 0; ct->ct_closing = FALSE; ct->ct_closed = FALSE; + ct->ct_upcallrefs = 0; if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { error = soconnect(so, raddr, curthread); @@ -753,6 +757,7 @@ clnt_vc_close(CLIENT *cl) SOCKBUF_LOCK(&ct->ct_socket->so_rcv); soupcall_clear(ct->ct_socket, SO_RCV); + clnt_vc_upcallsdone(ct); SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); /* @@ -825,6 +830,7 @@ clnt_vc_soupcall(struct socket *so, void uint32_t xid, header; bool_t do_read; + ct->ct_upcallrefs++; uio.uio_td = curthread; do { /* @@ -845,7 +851,7 @@ clnt_vc_soupcall(struct socket *so, void do_read = TRUE; if (!do_read) - return (SU_OK); + break; SOCKBUF_UNLOCK(&so->so_rcv); uio.uio_resid = sizeof(uint32_t); @@ -898,7 +904,7 @@ clnt_vc_soupcall(struct socket *so, void do_read = TRUE; if (!do_read) - return (SU_OK); + break; /* * We have the record mark. Read as much as @@ -979,5 +985,24 @@ clnt_vc_soupcall(struct socket *so, void } } } while (m); + ct->ct_upcallrefs--; + if (ct->ct_upcallrefs < 0) + panic("rpcvc upcall refcnt"); + if (ct->ct_upcallrefs == 0) + wakeup(&ct->ct_upcallrefs); return (SU_OK); } + +/* + * Wait for all upcalls in progress to complete. + */ +static void +clnt_vc_upcallsdone(struct ct_data *ct) +{ + + SOCKBUF_LOCK_ASSERT(&ct->ct_socket->so_rcv); + + while (ct->ct_upcallrefs > 0) + (void) msleep(&ct->ct_upcallrefs, + SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0); +}
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200906041449.n54EnRNj094949>