Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 02 May 2026 19:37:43 +0000
From:      Rick Macklem <rmacklem@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: a6e527f893df - main - nfscl: Fix handling of gssd upcalls for the NFS client
Message-ID:  <69f65287.3dfac.6c1e7ff2@gitrepo.freebsd.org>

index | next in thread | raw e-mail

The branch main has been updated by rmacklem:

URL: https://cgit.FreeBSD.org/src/commit/?id=a6e527f893df2cbbd941839a93e50ae39ac0db55

commit a6e527f893df2cbbd941839a93e50ae39ac0db55
Author:     Rick Macklem <rmacklem@FreeBSD.org>
AuthorDate: 2026-05-02 19:36:00 +0000
Commit:     Rick Macklem <rmacklem@FreeBSD.org>
CommitDate: 2026-05-02 19:36:00 +0000

    nfscl: Fix handling of gssd upcalls for the NFS client
    
    Without this patch, all upcalls to the gssd daemon are
    done in vnet0 (outside of any vnet jail).  This does
    not work well, because a user principal's credential
    cache can be within the jail (/tmp/krb5cc_NNN in the
    jail's namespace).
    
    This patch modifies the client so that RPCs done
    from within vnet jails does an upcall to a gssd
    daemon running within the vnet jail.  It required
    that the cache of uid->credential shorthands in
    the rpcsec_gss be vnet'd.
    
    The situation is still less than ideal and sec=krb5[ip]
    mounts that are visible within vnet jails is still
    not something I would recommend, but it can work ok
    with this patch.
    
    Vnet'ng the NFS client so that mounts can be done
    within vnet jails is probably more useful, but that
    will require additional work.
    
    Discussed with: glebius
    MFC after:      1 month
---
 sys/fs/nfs/nfs_commonkrpc.c      |  23 +++++++-
 sys/fs/nfs/nfs_commonsubs.c      |   3 +-
 sys/fs/nfsserver/nfs_nfsdserv.c  |   8 ++-
 sys/fs/nfsserver/nfs_nfsdstate.c |   1 +
 sys/rpc/clnt_bck.c               |   7 ++-
 sys/rpc/clnt_dg.c                |   7 ++-
 sys/rpc/clnt_vc.c                |   7 ++-
 sys/rpc/rpcsec_gss/rpcsec_gss.c  | 111 +++++++++++++++++++++------------------
 8 files changed, 107 insertions(+), 60 deletions(-)

diff --git a/sys/fs/nfs/nfs_commonkrpc.c b/sys/fs/nfs/nfs_commonkrpc.c
index 1e4e8506790f..9ea4e5f4c9df 100644
--- a/sys/fs/nfs/nfs_commonkrpc.c
+++ b/sys/fs/nfs/nfs_commonkrpc.c
@@ -561,7 +561,9 @@ newnfs_disconnect(struct nfsmount *nmp, struct nfssockreq *nrp)
 			}
 		}
 		mtx_unlock(&nrp->nr_mtx);
+		CURVNET_SET_QUIET(CRED_TO_VNET(nrp->nr_cred));
 		rpc_gss_secpurge_call(client);
+		CURVNET_RESTORE();
 		CLNT_CLOSE(client);
 		CLNT_RELEASE(client);
 		if (nmp != NULL && nmp->nm_aconnect > 0) {
@@ -685,7 +687,7 @@ newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
 	struct nfsreq *rep = NULL;
 	char *srv_principal = NULL, *clnt_principal = NULL;
 	sigset_t oldset;
-	struct ucred *authcred;
+	struct ucred *authcred, *savcred;
 	struct nfsclsession *sep;
 	uint8_t sessionid[NFSX_V4SESSIONID];
 	bool nextconn_set;
@@ -832,6 +834,11 @@ newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
 		    ((nmp->nm_tprintf_delay)-(nmp->nm_tprintf_initial_delay));
 	}
 
+	/*
+	 * For Kerberos, the upcall needs to be done to the gssd daemon
+	 * running in the correct vnet.
+	 */
+	CURVNET_SET_QUIET(CRED_TO_VNET(authcred));
 	if (nd->nd_procnum == NFSPROC_NULL)
 		auth = authnone_create();
 	else if (usegssname) {
@@ -849,8 +856,9 @@ newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
 	} else
 		auth = nfs_getauth(nrp, secflavour, NULL,
 		    srv_principal, NULL, authcred);
-	crfree(authcred);
+	CURVNET_RESTORE();
 	if (auth == NULL) {
+		crfree(authcred);
 		m_freem(nd->nd_mreq);
 		if (set_sigset)
 			newnfs_restore_sigmask(td, &oldset);
@@ -967,6 +975,13 @@ tryagain:
 		}
 	}
 
+	/*
+	 * In case CLNT_CALL_MBUF()/clnt_bck_call() does an AUTH_REFRESH(),
+	 * the thread's credentials need to be set to authcred, so that the
+	 * correct vnet will be set.
+	 */
+	savcred = curthread->td_ucred;
+	curthread->td_ucred = authcred;
 	nd->nd_mrep = NULL;
 	if (clp != NULL && sep != NULL)
 		stat = clnt_bck_call(nrp->nr_client, &ext, procnum,
@@ -988,6 +1003,7 @@ tryagain:
 		stat = CLNT_CALL_MBUF(nrp->nr_client, &ext, procnum,
 		    nd->nd_mreq, &nd->nd_mrep, timo);
 	NFSCL_DEBUG(2, "clnt call=%d\n", stat);
+	curthread->td_ucred = savcred;
 
 	if (rep != NULL) {
 		/*
@@ -1069,6 +1085,7 @@ tryagain:
 		error = EACCES;
 	}
 	if (error) {
+		crfree(authcred);
 		m_freem(nd->nd_mreq);
 		if (usegssname == 0)
 			AUTH_DESTROY(auth);
@@ -1429,6 +1446,7 @@ tryagain:
 		}
 	}
 out:
+	crfree(authcred);
 
 #ifdef KDTRACE_HOOKS
 	if (nmp != NULL && dtrace_nfscl_nfs234_done_probe != NULL) {
@@ -1460,6 +1478,7 @@ out:
 		newnfs_restore_sigmask(td, &oldset);
 	return (0);
 nfsmout:
+	crfree(authcred);
 	m_freem(nd->nd_mrep);
 	m_freem(nd->nd_mreq);
 	if (usegssname == 0)
diff --git a/sys/fs/nfs/nfs_commonsubs.c b/sys/fs/nfs/nfs_commonsubs.c
index 3bff2737b687..0b7c15a0b54e 100644
--- a/sys/fs/nfs/nfs_commonsubs.c
+++ b/sys/fs/nfs/nfs_commonsubs.c
@@ -4167,7 +4167,7 @@ nfsrv_nfsuserdport(struct nfsuserd_args *nargs, NFSPROC_T *p)
 	rp->nr_sotype = SOCK_DGRAM;
 	rp->nr_soproto = IPPROTO_UDP;
 	rp->nr_lock = (NFSR_RESERVEDPORT | NFSR_LOCALHOST);
-	rp->nr_cred = NULL;
+	rp->nr_cred = crhold(curthread->td_ucred);
 	rp->nr_prog = RPCPROG_NFSUSERD;
 	error = 0;
 	switch (nargs->nuserd_family) {
@@ -4235,6 +4235,7 @@ nfsrv_nfsuserddelport(void)
 	NFSUNLOCKNAMEID();
 	newnfs_disconnect(NULL, &NFSD_VNET(nfsrv_nfsuserdsock));
 	free(NFSD_VNET(nfsrv_nfsuserdsock).nr_nam, M_SONAME);
+	crfree(VNET(nfsrv_nfsuserdsock).nr_cred);
 	NFSLOCKNAMEID();
 	NFSD_VNET(nfsrv_nfsuserd) = NOTRUNNING;
 	NFSUNLOCKNAMEID();
diff --git a/sys/fs/nfsserver/nfs_nfsdserv.c b/sys/fs/nfsserver/nfs_nfsdserv.c
index 7f43654ae06d..855d018e72d2 100644
--- a/sys/fs/nfsserver/nfs_nfsdserv.c
+++ b/sys/fs/nfsserver/nfs_nfsdserv.c
@@ -4269,7 +4269,7 @@ nfsrvd_setclientid(struct nfsrv_descript *nd, __unused int isdgram,
 	/* Allocated large enough for an AF_INET or AF_INET6 socket. */
 	clp->lc_req.nr_nam = malloc(sizeof(struct sockaddr_in6), M_SONAME,
 	    M_WAITOK | M_ZERO);
-	clp->lc_req.nr_cred = NULL;
+	clp->lc_req.nr_cred = crhold(nd->nd_cred);
 	NFSBCOPY(verf, clp->lc_verf, NFSX_VERF);
 	clp->lc_idlen = idlen;
 	error = nfsrv_mtostr(nd, clp->lc_id, idlen);
@@ -4359,6 +4359,7 @@ nfsrvd_setclientid(struct nfsrv_descript *nd, __unused int isdgram,
 	if (clp) {
 		free(clp->lc_req.nr_nam, M_SONAME);
 		NFSFREEMUTEX(&clp->lc_req.nr_mtx);
+		crfree(clp->lc_req.nr_cred);
 		free(clp->lc_stateid, M_NFSDCLIENT);
 		free(clp, M_NFSDCLIENT);
 	}
@@ -4377,6 +4378,7 @@ nfsmout:
 	if (clp) {
 		free(clp->lc_req.nr_nam, M_SONAME);
 		NFSFREEMUTEX(&clp->lc_req.nr_mtx);
+		crfree(clp->lc_req.nr_cred);
 		free(clp->lc_stateid, M_NFSDCLIENT);
 		free(clp, M_NFSDCLIENT);
 	}
@@ -4634,7 +4636,7 @@ nfsrvd_exchangeid(struct nfsrv_descript *nd, __unused int isdgram,
 		break;
 #endif
 	}
-	clp->lc_req.nr_cred = NULL;
+	clp->lc_req.nr_cred = crhold(nd->nd_cred);
 	NFSBCOPY(verf, clp->lc_verf, NFSX_VERF);
 	clp->lc_idlen = idlen;
 	error = nfsrv_mtostr(nd, clp->lc_id, idlen);
@@ -4707,6 +4709,7 @@ nfsrvd_exchangeid(struct nfsrv_descript *nd, __unused int isdgram,
 	if (clp != NULL) {
 		free(clp->lc_req.nr_nam, M_SONAME);
 		NFSFREEMUTEX(&clp->lc_req.nr_mtx);
+		crfree(clp->lc_req.nr_cred);
 		free(clp->lc_stateid, M_NFSDCLIENT);
 		free(clp, M_NFSDCLIENT);
 	}
@@ -4750,6 +4753,7 @@ nfsmout:
 	if (clp != NULL) {
 		free(clp->lc_req.nr_nam, M_SONAME);
 		NFSFREEMUTEX(&clp->lc_req.nr_mtx);
+		crfree(clp->lc_req.nr_cred);
 		free(clp->lc_stateid, M_NFSDCLIENT);
 		free(clp, M_NFSDCLIENT);
 	}
diff --git a/sys/fs/nfsserver/nfs_nfsdstate.c b/sys/fs/nfsserver/nfs_nfsdstate.c
index ccee9187bfec..18967a00583a 100644
--- a/sys/fs/nfsserver/nfs_nfsdstate.c
+++ b/sys/fs/nfsserver/nfs_nfsdstate.c
@@ -1504,6 +1504,7 @@ nfsrv_zapclient(struct nfsclient *clp, NFSPROC_T *p)
 	newnfs_disconnect(NULL, &clp->lc_req);
 	free(clp->lc_req.nr_nam, M_SONAME);
 	NFSFREEMUTEX(&clp->lc_req.nr_mtx);
+	crfree(clp->lc_req.nr_cred);
 	free(clp->lc_stateid, M_NFSDCLIENT);
 	free(clp, M_NFSDCLIENT);
 	NFSLOCKSTATE();
diff --git a/sys/rpc/clnt_bck.c b/sys/rpc/clnt_bck.c
index c5cbbf045bdc..9ff85b1fa2c0 100644
--- a/sys/rpc/clnt_bck.c
+++ b/sys/rpc/clnt_bck.c
@@ -58,6 +58,7 @@
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/jail.h>
 #include <sys/ktls.h>
 #include <sys/lock.h>
 #include <sys/malloc.h>
@@ -440,15 +441,19 @@ got_reply:
 		 * If unsuccessful AND error is an authentication error
 		 * then refresh credentials and try again, else break
 		 */
-		else if (stat == RPC_AUTHERROR)
+		else if (stat == RPC_AUTHERROR) {
 			/* maybe our credentials need to be refreshed ... */
+			CURVNET_SET_QUIET(TD_TO_VNET(curthread));
 			if (nrefreshes > 0 && AUTH_REFRESH(auth, &reply_msg)) {
+				CURVNET_RESTORE();
 				nrefreshes--;
 				XDR_DESTROY(&xdrs);
 				mtx_lock(&ct->ct_lock);
 				goto call_again;
 			}
+			CURVNET_RESTORE();
 			/* end of unsuccessful completion */
+		}
 		/* end of valid reply message */
 	} else
 		errp->re_status = stat = RPC_CANTDECODERES;
diff --git a/sys/rpc/clnt_dg.c b/sys/rpc/clnt_dg.c
index b6a8cdce0d76..d7870aef5674 100644
--- a/sys/rpc/clnt_dg.c
+++ b/sys/rpc/clnt_dg.c
@@ -39,6 +39,7 @@
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/jail.h>
 #include <sys/kernel.h>
 #include <sys/lock.h>
 #include <sys/malloc.h>
@@ -738,15 +739,19 @@ got_reply:
 		 * If unsuccessful AND error is an authentication error
 		 * then refresh credentials and try again, else break
 		 */
-		else if (stat == RPC_AUTHERROR)
+		else if (stat == RPC_AUTHERROR) {
 			/* maybe our credentials need to be refreshed ... */
+			CURVNET_SET_QUIET(TD_TO_VNET(curthread));
 			if (nrefreshes > 0 &&
 			    AUTH_REFRESH(auth, &reply_msg)) {
+				CURVNET_RESTORE();
 				nrefreshes--;
 				XDR_DESTROY(&xdrs);
 				mtx_lock(&cs->cs_lock);
 				goto call_again;
 			}
+			CURVNET_RESTORE();
+		}
 		/* end of unsuccessful completion */
 	}	/* end of valid reply message */
 	else {
diff --git a/sys/rpc/clnt_vc.c b/sys/rpc/clnt_vc.c
index ecd5fdd04f34..e395cd27ccaa 100644
--- a/sys/rpc/clnt_vc.c
+++ b/sys/rpc/clnt_vc.c
@@ -54,6 +54,7 @@
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/jail.h>
 #include <sys/kernel.h>
 #include <sys/kthread.h>
 #include <sys/ktls.h>
@@ -559,15 +560,19 @@ got_reply:
 		 * If unsuccessful AND error is an authentication error
 		 * then refresh credentials and try again, else break
 		 */
-		else if (stat == RPC_AUTHERROR)
+		else if (stat == RPC_AUTHERROR) {
 			/* maybe our credentials need to be refreshed ... */
+			CURVNET_SET_QUIET(TD_TO_VNET(curthread));
 			if (nrefreshes > 0 &&
 			    AUTH_REFRESH(auth, &reply_msg)) {
+				CURVNET_RESTORE();
 				nrefreshes--;
 				XDR_DESTROY(&xdrs);
 				mtx_lock(&ct->ct_lock);
 				goto call_again;
 			}
+			CURVNET_RESTORE();
+		}
 		/* end of unsuccessful completion */
 	}	/* end of valid reply message */
 	else {
diff --git a/sys/rpc/rpcsec_gss/rpcsec_gss.c b/sys/rpc/rpcsec_gss/rpcsec_gss.c
index d99e3a3090df..de690bbbd1d1 100644
--- a/sys/rpc/rpcsec_gss/rpcsec_gss.c
+++ b/sys/rpc/rpcsec_gss/rpcsec_gss.c
@@ -150,26 +150,42 @@ static struct timeval AUTH_TIMEOUT = { 25, 0 };
 
 #define RPC_GSS_HASH_SIZE	11
 #define RPC_GSS_MAX		256
-static struct rpc_gss_data_list rpc_gss_cache[RPC_GSS_HASH_SIZE];
-static struct rpc_gss_data_list rpc_gss_all;
-static struct sx rpc_gss_lock;
-static int rpc_gss_count;
+
+VNET_DEFINE_STATIC(struct rpc_gss_data_list *, rpc_gss_cache);
+VNET_DEFINE_STATIC(struct rpc_gss_data_list, rpc_gss_all);
+VNET_DEFINE_STATIC(struct sx, rpc_gss_lock);
+VNET_DEFINE_STATIC(int, rpc_gss_count);
 
 static AUTH *rpc_gss_seccreate_int(CLIENT *, struct ucred *, const char *,
     const char *, gss_OID, rpc_gss_service_t, u_int, rpc_gss_options_req_t *,
     rpc_gss_options_ret_t *);
 
 static void
-rpc_gss_hashinit(void *dummy)
+rpc_gss_hashinit(void *dummy __unused)
 {
 	int i;
 
+	VNET(rpc_gss_cache) = mem_alloc(sizeof(struct rpc_gss_data_list) *
+	    RPC_GSS_HASH_SIZE);
 	for (i = 0; i < RPC_GSS_HASH_SIZE; i++)
-		TAILQ_INIT(&rpc_gss_cache[i]);
-	TAILQ_INIT(&rpc_gss_all);
-	sx_init(&rpc_gss_lock, "rpc_gss_lock");
+		TAILQ_INIT(&VNET(rpc_gss_cache)[i]);
+	TAILQ_INIT(&VNET(rpc_gss_all));
+	sx_init(&VNET(rpc_gss_lock), "rpc_gss_lock");
 }
-SYSINIT(rpc_gss_hashinit, SI_SUB_KMEM, SI_ORDER_ANY, rpc_gss_hashinit, NULL);
+VNET_SYSINIT(rpc_gss_hashinit, SI_SUB_VNET_DONE, SI_ORDER_ANY,
+    rpc_gss_hashinit, NULL);
+
+static void
+rpc_gss_hashinit_cleanup(void *dummy __unused)
+{
+
+	rpc_gss_secpurge(NULL);
+	mem_free(VNET(rpc_gss_cache), sizeof(struct rpc_gss_data_list) *
+	    RPC_GSS_HASH_SIZE);
+	sx_destroy(&VNET(rpc_gss_lock));
+}
+VNET_SYSUNINIT(rpc_gss_hashinit_cleanup, SI_SUB_VNET_DONE, SI_ORDER_ANY,
+    rpc_gss_hashinit_cleanup, NULL);
 
 static uint32_t
 rpc_gss_hash(const char *principal, gss_OID mech,
@@ -198,15 +214,16 @@ rpc_gss_secfind(CLIENT *clnt, struct ucred *cred, const char *principal,
 	struct rpc_gss_data	*gd, *tgd;
 	rpc_gss_options_ret_t	options;
 
-	if (rpc_gss_count > RPC_GSS_MAX) {
-		while (rpc_gss_count > RPC_GSS_MAX) {
-			sx_xlock(&rpc_gss_lock);
-			tgd = TAILQ_FIRST(&rpc_gss_all);
+	CURVNET_ASSERT_SET();
+	if (VNET(rpc_gss_count) > RPC_GSS_MAX) {
+		while (VNET(rpc_gss_count) > RPC_GSS_MAX) {
+			sx_xlock(&VNET(rpc_gss_lock));
+			tgd = TAILQ_FIRST(&VNET(rpc_gss_all));
 			th = tgd->gd_hash;
-			TAILQ_REMOVE(&rpc_gss_cache[th], tgd, gd_link);
-			TAILQ_REMOVE(&rpc_gss_all, tgd, gd_alllink);
-			rpc_gss_count--;
-			sx_xunlock(&rpc_gss_lock);
+			TAILQ_REMOVE(&VNET(rpc_gss_cache)[th], tgd, gd_link);
+			TAILQ_REMOVE(&VNET(rpc_gss_all), tgd, gd_alllink);
+			VNET(rpc_gss_count)--;
+			sx_xunlock(&VNET(rpc_gss_lock));
 			AUTH_DESTROY(tgd->gd_auth);
 		}
 	}
@@ -217,23 +234,24 @@ rpc_gss_secfind(CLIENT *clnt, struct ucred *cred, const char *principal,
 	h = rpc_gss_hash(principal, mech_oid, cred, service);
 
 again:
-	sx_slock(&rpc_gss_lock);
-	TAILQ_FOREACH(gd, &rpc_gss_cache[h], gd_link) {
+	sx_slock(&VNET(rpc_gss_lock));
+	TAILQ_FOREACH(gd, &VNET(rpc_gss_cache)[h], gd_link) {
 		if (gd->gd_ucred->cr_uid == cred->cr_uid
 		    && !strcmp(gd->gd_principal, principal)
 		    && gd->gd_mech == mech_oid
 		    && gd->gd_cred.gc_svc == service) {
 			refcount_acquire(&gd->gd_refs);
-			if (sx_try_upgrade(&rpc_gss_lock)) {
+			if (sx_try_upgrade(&VNET(rpc_gss_lock))) {
 				/*
 				 * Keep rpc_gss_all LRU sorted.
 				 */
-				TAILQ_REMOVE(&rpc_gss_all, gd, gd_alllink);
-				TAILQ_INSERT_TAIL(&rpc_gss_all, gd,
+				TAILQ_REMOVE(&VNET(rpc_gss_all), gd,
+				    gd_alllink);
+				TAILQ_INSERT_TAIL(&VNET(rpc_gss_all), gd,
 				    gd_alllink);
-				sx_xunlock(&rpc_gss_lock);
+				sx_xunlock(&VNET(rpc_gss_lock));
 			} else {
-				sx_sunlock(&rpc_gss_lock);
+				sx_sunlock(&VNET(rpc_gss_lock));
 			}
 
 			/*
@@ -249,7 +267,7 @@ again:
 			return (gd->gd_auth);
 		}
 	}
-	sx_sunlock(&rpc_gss_lock);
+	sx_sunlock(&VNET(rpc_gss_lock));
 
 	/*
 	 * We missed in the cache - create a new association.
@@ -262,8 +280,8 @@ again:
 	gd = AUTH_PRIVATE(auth);
 	gd->gd_hash = h;
 	
-	sx_xlock(&rpc_gss_lock);
-	TAILQ_FOREACH(tgd, &rpc_gss_cache[h], gd_link) {
+	sx_xlock(&VNET(rpc_gss_lock));
+	TAILQ_FOREACH(tgd, &VNET(rpc_gss_cache)[h], gd_link) {
 		if (tgd->gd_ucred->cr_uid == cred->cr_uid
 		    && !strcmp(tgd->gd_principal, principal)
 		    && tgd->gd_mech == mech_oid
@@ -272,17 +290,17 @@ again:
 			 * We lost a race to create the AUTH that
 			 * matches this cred.
 			 */
-			sx_xunlock(&rpc_gss_lock);
+			sx_xunlock(&VNET(rpc_gss_lock));
 			AUTH_DESTROY(auth);
 			goto again;
 		}
 	}
 
-	rpc_gss_count++;
-	TAILQ_INSERT_TAIL(&rpc_gss_cache[h], gd, gd_link);
-	TAILQ_INSERT_TAIL(&rpc_gss_all, gd, gd_alllink);
+	VNET(rpc_gss_count)++;
+	TAILQ_INSERT_TAIL(&VNET(rpc_gss_cache)[h], gd, gd_link);
+	TAILQ_INSERT_TAIL(&VNET(rpc_gss_all), gd, gd_alllink);
 	refcount_acquire(&gd->gd_refs);	/* one for the cache, one for user */
-	sx_xunlock(&rpc_gss_lock);
+	sx_xunlock(&VNET(rpc_gss_lock));
 
 	return (auth);
 }
@@ -293,14 +311,15 @@ rpc_gss_secpurge(CLIENT *clnt)
 	uint32_t		h;
 	struct rpc_gss_data	*gd, *tgd;
 
-	TAILQ_FOREACH_SAFE(gd, &rpc_gss_all, gd_alllink, tgd) {
-		if (gd->gd_clnt == clnt) {
-			sx_xlock(&rpc_gss_lock);
+	CURVNET_ASSERT_SET();
+	TAILQ_FOREACH_SAFE(gd, &VNET(rpc_gss_all), gd_alllink, tgd) {
+		if (clnt == NULL || gd->gd_clnt == clnt) {
+			sx_xlock(&VNET(rpc_gss_lock));
 			h = gd->gd_hash;
-			TAILQ_REMOVE(&rpc_gss_cache[h], gd, gd_link);
-			TAILQ_REMOVE(&rpc_gss_all, gd, gd_alllink);
-			rpc_gss_count--;
-			sx_xunlock(&rpc_gss_lock);
+			TAILQ_REMOVE(&VNET(rpc_gss_cache)[h], gd, gd_link);
+			TAILQ_REMOVE(&VNET(rpc_gss_all), gd, gd_alllink);
+			VNET(rpc_gss_count)--;
+			sx_xunlock(&VNET(rpc_gss_lock));
 			AUTH_DESTROY(gd->gd_auth);
 		}
 	}
@@ -748,6 +767,7 @@ rpc_gss_init(AUTH *auth, rpc_gss_options_ret_t *options_ret)
 	gss_OID_set		mechlist;
 	static enum krb_imp	my_krb_imp = KRBIMP_UNKNOWN;
 
+	CURVNET_ASSERT_SET();
 	rpc_gss_log_debug("in rpc_gss_refresh()");
 	
 	gd = AUTH_PRIVATE(auth);
@@ -773,17 +793,6 @@ rpc_gss_init(AUTH *auth, rpc_gss_options_ret_t *options_ret)
 	gd->gd_cred.gc_proc = RPCSEC_GSS_INIT;
 	gd->gd_cred.gc_seq = 0;
 
-	/*
-	 * XXX Threads from inside jails can get here via calls
-	 * to clnt_vc_call()->AUTH_REFRESH()->rpc_gss_refresh()
-	 * but the NFS mount is always done outside of the
-	 * jails in vnet0.  Since the thread credentials won't
-	 * necessarily have cr_prison == vnet0 and this function
-	 * has no access to the socket, using vnet0 seems the
-	 * only option.  This is broken if NFS mounts are enabled
-	 * within vnet prisons.
-	 */
-	CURVNET_SET_QUIET(vnet0);
 	/*
 	 * For KerberosV, if there is a client principal name, that implies
 	 * that this is a host based initiator credential in the default
@@ -1030,14 +1039,12 @@ out:
 			gss_delete_sec_context(&min_stat, &gd->gd_ctx,
 				GSS_C_NO_BUFFER);
 		}
-		CURVNET_RESTORE();
 		mtx_lock(&gd->gd_lock);
 		gd->gd_state = RPCSEC_GSS_START;
 		wakeup(gd);
 		mtx_unlock(&gd->gd_lock);
 		return (FALSE);
 	}
-	CURVNET_RESTORE();
 	
 	mtx_lock(&gd->gd_lock);
 	gd->gd_state = RPCSEC_GSS_ESTABLISHED;


home | help

Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?69f65287.3dfac.6c1e7ff2>