Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 6 Mar 2020 10:24:12 +0000 (UTC)
From:      Mathieu Arnold <mat@FreeBSD.org>
To:        ports-committers@freebsd.org, svn-ports-all@freebsd.org, svn-ports-head@freebsd.org
Subject:   svn commit: r527873 - in head/dns/bind916: . files
Message-ID:  <202003061024.026AOCFb065972@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mat
Date: Fri Mar  6 10:24:11 2020
New Revision: 527873
URL: https://svnweb.freebsd.org/changeset/ports/527873

Log:
  Import patch from upstream to fixa tcp client quota exhaustion issue.
  
  Obtained from:	https://lists.isc.org/pipermail/bind-announce/2020-March/001150.html

Added:
  head/dns/bind916/files/bind-v9.16.0-tcp_quota_fix.patch   (contents, props changed)
Modified:
  head/dns/bind916/Makefile   (contents, props changed)

Modified: head/dns/bind916/Makefile
==============================================================================
--- head/dns/bind916/Makefile	Fri Mar  6 10:14:45 2020	(r527872)
+++ head/dns/bind916/Makefile	Fri Mar  6 10:24:11 2020	(r527873)
@@ -8,7 +8,7 @@ PORTVERSION=	${ISCVERSION:S/-P/P/:S/b/.b/:S/a/.a/:S/rc
 PORTREVISION=	0
 .else
 # dns/bind916 here
-PORTREVISION=	1
+PORTREVISION=	2
 .endif
 CATEGORIES=	dns net
 MASTER_SITES=	ISC/bind9/${ISCVERSION}
@@ -65,7 +65,8 @@ EXTRA_PATCHES=		${PATCHDIR}/extrapatch-bind-tools
 .else
 USE_RC_SUBR=		named
 SUB_FILES=		named.conf pkg-message
-EXTRA_PATCHES=		${PATCHDIR}/extrapatch-no-bind-tools
+EXTRA_PATCHES=		${PATCHDIR}/extrapatch-no-bind-tools \
+			${PATCHDIR}/bind-v9.16.0-tcp_quota_fix.patch:-p1
 
 PORTDOCS=	*
 

Added: head/dns/bind916/files/bind-v9.16.0-tcp_quota_fix.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/dns/bind916/files/bind-v9.16.0-tcp_quota_fix.patch	Fri Mar  6 10:24:11 2020	(r527873)
@@ -0,0 +1,341 @@
+diff --git a/lib/isc/netmgr/netmgr-int.h b/lib/isc/netmgr/netmgr-int.h
+index ae83f943d3..c85065f39d 100644
+--- a/lib/isc/netmgr/netmgr-int.h
++++ b/lib/isc/netmgr/netmgr-int.h
+@@ -356,7 +356,16 @@ struct isc_nmsocket {
+ 	 */
+ 	isc_quota_t *quota;
+ 	isc_quota_t *pquota;
+-	bool	     overquota;
++
++	/*%
++	 * How many connections we have not accepted due to quota?
++	 * When we close a connection we need to accept a new one.
++	 */
++	int overquota;
++	/*%
++	 * How many active connections we have?
++	 */
++	int conns;
+ 
+ 	/*%
+ 	 * Socket statistics
+diff --git a/lib/isc/netmgr/netmgr.c b/lib/isc/netmgr/netmgr.c
+index f4361575cc..26728c1ba6 100644
+--- a/lib/isc/netmgr/netmgr.c
++++ b/lib/isc/netmgr/netmgr.c
+@@ -727,6 +727,11 @@ nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree)
+ 		for (int i = 0; i < sock->nchildren; i++) {
+ 			if (!atomic_load(&sock->children[i].destroying)) {
+ 				nmsocket_cleanup(&sock->children[i], false);
++				if (sock->statsindex != NULL) {
++					isc__nm_decstats(
++						sock->mgr,
++						sock->statsindex[STATID_ACTIVE]);
++				}
+ 			}
+ 		}
+ 
+@@ -738,6 +743,9 @@ nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree)
+ 		sock->children = NULL;
+ 		sock->nchildren = 0;
+ 	}
++	if (sock->statsindex != NULL) {
++		isc__nm_decstats(sock->mgr, sock->statsindex[STATID_ACTIVE]);
++	}
+ 
+ 	if (sock->tcphandle != NULL) {
+ 		isc_nmhandle_unref(sock->tcphandle);
+@@ -854,8 +862,6 @@ isc__nmsocket_prep_destroy(isc_nmsocket_t *sock)
+ 	if (sock->children != NULL) {
+ 		for (int i = 0; i < sock->nchildren; i++) {
+ 			atomic_store(&sock->children[i].active, false);
+-			isc__nm_decstats(sock->mgr,
+-					 sock->statsindex[STATID_ACTIVE]);
+ 		}
+ 	}
+ 
+diff --git a/lib/isc/netmgr/tcp.c b/lib/isc/netmgr/tcp.c
+index a83fede0d2..58ffd3c404 100644
+--- a/lib/isc/netmgr/tcp.c
++++ b/lib/isc/netmgr/tcp.c
+@@ -26,12 +26,28 @@
+ #include <isc/region.h>
+ #include <isc/result.h>
+ #include <isc/sockaddr.h>
++#include <isc/stdtime.h>
+ #include <isc/thread.h>
+ #include <isc/util.h>
+ 
+ #include "netmgr-int.h"
+ #include "uv-compat.h"
+ 
++static atomic_uint_fast32_t last_tcpquota_log = ATOMIC_VAR_INIT(0);
++
++static bool
++can_log_tcp_quota() {
++	isc_stdtime_t now, last;
++
++	isc_stdtime_get(&now);
++	last = atomic_exchange_relaxed(&last_tcpquota_log, now);
++	if (now != last) {
++		return (true);
++	}
++
++	return (false);
++}
++
+ static int
+ tcp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req);
+ 
+@@ -668,9 +684,6 @@ read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf)
+ 	}
+ 
+ 	isc__nm_free_uvbuf(sock, buf);
+-	if (sock->quota) {
+-		isc_quota_detach(&sock->quota);
+-	}
+ 
+ 	/*
+ 	 * This might happen if the inner socket is closing.  It means that
+@@ -699,6 +712,7 @@ accept_connection(isc_nmsocket_t *ssock)
+ 	struct sockaddr_storage ss;
+ 	isc_sockaddr_t		local;
+ 	int			r;
++	bool overquota = false;
+ 
+ 	REQUIRE(VALID_NMSOCK(ssock));
+ 	REQUIRE(ssock->tid == isc_nm_tid());
+@@ -711,10 +725,25 @@ accept_connection(isc_nmsocket_t *ssock)
+ 
+ 	if (ssock->pquota != NULL) {
+ 		result = isc_quota_attach(ssock->pquota, &quota);
++
++		/*
++		 * We share the quota between all TCP sockets. Others
++		 * may have used up all the quota slots, in which case
++		 * this socket could starve. So we only fail here if we
++		 * already had at least one active connection on this
++		 * socket. This guarantees that we'll maintain some level
++		 * of service while over quota, and will resume normal
++		 * service when the quota comes back down.
++		 */
+ 		if (result != ISC_R_SUCCESS) {
+-			isc__nm_incstats(ssock->mgr,
+-					 ssock->statsindex[STATID_ACCEPTFAIL]);
+-			return (result);
++			ssock->overquota++;
++			overquota = true;
++			if (ssock->conns > 0) {
++				isc__nm_incstats(
++					ssock->mgr,
++					ssock->statsindex[STATID_ACCEPTFAIL]);
++				return (result);
++			}
+ 		}
+ 	}
+ 
+@@ -761,6 +790,7 @@ accept_connection(isc_nmsocket_t *ssock)
+ 	}
+ 
+ 	isc_nmsocket_attach(ssock, &csock->server);
++	ssock->conns++;
+ 
+ 	handle = isc__nmhandle_get(csock, NULL, &local);
+ 
+@@ -779,6 +809,9 @@ error:
+ 	if (csock->quota != NULL) {
+ 		isc_quota_detach(&csock->quota);
+ 	}
++	if (overquota) {
++		ssock->overquota--;
++	}
+ 	/* We need to detach it properly to make sure uv_close is called. */
+ 	isc_nmsocket_detach(&csock);
+ 	return (result);
+@@ -793,14 +826,14 @@ tcp_connection_cb(uv_stream_t *server, int status)
+ 	UNUSED(status);
+ 
+ 	result = accept_connection(ssock);
+-	if (result != ISC_R_SUCCESS) {
+-		if (result == ISC_R_QUOTA || result == ISC_R_SOFTQUOTA) {
+-			ssock->overquota = true;
++	if (result != ISC_R_SUCCESS && result != ISC_R_NOCONN) {
++		if ((result != ISC_R_QUOTA && result != ISC_R_SOFTQUOTA) ||
++		    can_log_tcp_quota()) {
++			isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
++				      ISC_LOGMODULE_NETMGR, ISC_LOG_ERROR,
++				      "TCP connection failed: %s",
++				      isc_result_totext(result));
+ 		}
+-		isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
+-			      ISC_LOGMODULE_NETMGR, ISC_LOG_ERROR,
+-			      "TCP connection failed: %s",
+-			      isc_result_totext(result));
+ 	}
+ }
+ 
+@@ -936,17 +969,27 @@ tcp_close_direct(isc_nmsocket_t *sock)
+ 	REQUIRE(VALID_NMSOCK(sock));
+ 	REQUIRE(sock->tid == isc_nm_tid());
+ 	REQUIRE(sock->type == isc_nm_tcpsocket);
++	isc_nmsocket_t *ssock = sock->server;
+ 
+ 	if (sock->quota != NULL) {
+-		isc_nmsocket_t *ssock = sock->server;
+-
+ 		isc_quota_detach(&sock->quota);
+-
+-		if (ssock->overquota) {
++	}
++	if (ssock != NULL) {
++		ssock->conns--;
++		while (ssock->conns == 0 && ssock->overquota > 0) {
++			ssock->overquota--;
+ 			isc_result_t result = accept_connection(ssock);
+-			if (result != ISC_R_QUOTA &&
+-			    result != ISC_R_SOFTQUOTA) {
+-				ssock->overquota = false;
++			if (result == ISC_R_SUCCESS || result == ISC_R_NOCONN) {
++				continue;
++			}
++			if ((result != ISC_R_QUOTA &&
++			     result != ISC_R_SOFTQUOTA) ||
++			    can_log_tcp_quota()) {
++				isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
++					      ISC_LOGMODULE_NETMGR,
++					      ISC_LOG_ERROR,
++					      "TCP connection failed: %s",
++					      isc_result_totext(result));
+ 			}
+ 		}
+ 	}
+diff --git a/lib/isc/netmgr/tcpdns.c b/lib/isc/netmgr/tcpdns.c
+index e384b73be9..f89eb359af 100644
+--- a/lib/isc/netmgr/tcpdns.c
++++ b/lib/isc/netmgr/tcpdns.c
+@@ -43,6 +43,9 @@ dnslisten_readcb(isc_nmhandle_t *handle, isc_region_t *region, void *arg);
+ static void
+ resume_processing(void *arg);
+ 
++static void
++tcpdns_close_direct(isc_nmsocket_t *sock);
++
+ static inline size_t
+ dnslen(unsigned char *base)
+ {
+@@ -82,7 +85,6 @@ timer_close_cb(uv_handle_t *handle)
+ {
+ 	isc_nmsocket_t *sock = (isc_nmsocket_t *)uv_handle_get_data(handle);
+ 	INSIST(VALID_NMSOCK(sock));
+-	atomic_store(&sock->closed, true);
+ 	isc_nmsocket_detach(&sock);
+ }
+ 
+@@ -94,9 +96,7 @@ dnstcp_readtimeout(uv_timer_t *timer)
+ 
+ 	REQUIRE(VALID_NMSOCK(sock));
+ 	REQUIRE(sock->tid == isc_nm_tid());
+-
+-	isc_nmsocket_detach(&sock->outer);
+-	uv_close((uv_handle_t *)&sock->timer, timer_close_cb);
++	tcpdns_close_direct(sock);
+ }
+ 
+ /*
+@@ -252,7 +252,9 @@ dnslisten_readcb(isc_nmhandle_t *handle, isc_region_t *region, void *arg)
+ 		 * We have a packet: stop timeout timers
+ 		 */
+ 		atomic_store(&dnssock->outer->processing, true);
+-		uv_timer_stop(&dnssock->timer);
++		if (dnssock->timer_initialized) {
++			uv_timer_stop(&dnssock->timer);
++		}
+ 
+ 		if (atomic_load(&dnssock->sequential)) {
+ 			/*
+@@ -399,8 +401,10 @@ resume_processing(void *arg)
+ 	if (atomic_load(&sock->ah) == 0) {
+ 		/* Nothing is active; sockets can timeout now */
+ 		atomic_store(&sock->outer->processing, false);
+-		uv_timer_start(&sock->timer, dnstcp_readtimeout,
+-			       sock->read_timeout, 0);
++		if (sock->timer_initialized) {
++			uv_timer_start(&sock->timer, dnstcp_readtimeout,
++				       sock->read_timeout, 0);
++		}
+ 	}
+ 
+ 	/*
+@@ -413,7 +417,9 @@ resume_processing(void *arg)
+ 		result = processbuffer(sock, &handle);
+ 		if (result == ISC_R_SUCCESS) {
+ 			atomic_store(&sock->outer->processing, true);
+-			uv_timer_stop(&sock->timer);
++			if (sock->timer_initialized) {
++				uv_timer_stop(&sock->timer);
++			}
+ 			isc_nmhandle_unref(handle);
+ 		} else if (sock->outer != NULL) {
+ 			isc_nm_resumeread(sock->outer);
+@@ -441,7 +447,9 @@ resume_processing(void *arg)
+ 			break;
+ 		}
+ 
+-		uv_timer_stop(&sock->timer);
++		if (sock->timer_initialized) {
++			uv_timer_stop(&sock->timer);
++		}
+ 		atomic_store(&sock->outer->processing, true);
+ 		isc_nmhandle_unref(dnshandle);
+ 	} while (atomic_load(&sock->ah) < TCPDNS_CLIENTS_PER_CONN);
+@@ -507,18 +515,29 @@ static void
+ tcpdns_close_direct(isc_nmsocket_t *sock)
+ {
+ 	REQUIRE(sock->tid == isc_nm_tid());
+-	if (sock->outer != NULL) {
+-		sock->outer->rcb.recv = NULL;
+-		isc_nmsocket_detach(&sock->outer);
+-	}
+-	if (sock->listener != NULL) {
+-		isc_nmsocket_detach(&sock->listener);
+-	}
+ 	/* We don't need atomics here, it's all in single network thread */
+ 	if (sock->timer_initialized) {
++		/*
++		 * We need to fire the timer callback to clean it up,
++		 * it will then call us again (via detach) so that we
++		 * can finally close the socket.
++		 */
+ 		sock->timer_initialized = false;
+ 		uv_timer_stop(&sock->timer);
+ 		uv_close((uv_handle_t *)&sock->timer, timer_close_cb);
++	} else {
++		/*
++		 * At this point we're certain that there are no external
++		 * references, we can close everything.
++		 */
++		if (sock->outer != NULL) {
++			sock->outer->rcb.recv = NULL;
++			isc_nmsocket_detach(&sock->outer);
++		}
++		if (sock->listener != NULL) {
++			isc_nmsocket_detach(&sock->listener);
++		}
++		atomic_store(&sock->closed, true);
+ 	}
+ }
+ 
+diff --git a/lib/isc/netmgr/uverr2result.c b/lib/isc/netmgr/uverr2result.c
+index b6a8065e3e..9781454ca6 100644
+--- a/lib/isc/netmgr/uverr2result.c
++++ b/lib/isc/netmgr/uverr2result.c
+@@ -38,6 +38,8 @@ isc___nm_uverr2result(int uverr, bool dolog, const char *file,
+ 		return (ISC_R_INVALIDFILE);
+ 	case UV_ENOENT:
+ 		return (ISC_R_FILENOTFOUND);
++	case UV_EAGAIN:
++		return (ISC_R_NOCONN);
+ 	case UV_EACCES:
+ 	case UV_EPERM:
+ 		return (ISC_R_NOPERM);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202003061024.026AOCFb065972>