From owner-svn-src-all@freebsd.org Fri Nov 2 15:32:06 2018 Return-Path: Delivered-To: svn-src-all@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id DE51910FB21F; Fri, 2 Nov 2018 15:32:05 +0000 (UTC) (envelope-from kp@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 914FC71201; Fri, 2 Nov 2018 15:32:05 +0000 (UTC) (envelope-from kp@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 71A1F4B58; Fri, 2 Nov 2018 15:32:05 +0000 (UTC) (envelope-from kp@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id wA2FW5Mk088179; Fri, 2 Nov 2018 15:32:05 GMT (envelope-from kp@FreeBSD.org) Received: (from kp@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id wA2FW58o088177; Fri, 2 Nov 2018 15:32:05 GMT (envelope-from kp@FreeBSD.org) Message-Id: <201811021532.wA2FW58o088177@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: kp set sender to kp@FreeBSD.org using -f From: Kristof Provost Date: Fri, 2 Nov 2018 15:32:05 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r340062 - in head/sys: net netpfil/pf X-SVN-Group: head X-SVN-Commit-Author: kp X-SVN-Commit-Paths: in head/sys: net netpfil/pf X-SVN-Commit-Revision: 340062 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 02 Nov 2018 15:32:06 -0000 Author: kp Date: Fri Nov 2 15:32:04 2018 New Revision: 340062 URL: https://svnweb.freebsd.org/changeset/base/340062 Log: pf: Limit the fragment entry queue length to 64 per bucket. So we have a global limit of 1024 fragments, but it is fine grained to the region of the packet. Smaller packets may have less fragments. This costs another 16 bytes of memory per reassembly and devides the worst case for searching by 8. Obtained from: OpenBSD Differential Revision: https://reviews.freebsd.org/D17734 Modified: head/sys/net/pfvar.h head/sys/netpfil/pf/pf_norm.c Modified: head/sys/net/pfvar.h ============================================================================== --- head/sys/net/pfvar.h Fri Nov 2 15:26:51 2018 (r340061) +++ head/sys/net/pfvar.h Fri Nov 2 15:32:04 2018 (r340062) @@ -1211,6 +1211,13 @@ struct pf_divert { #define PF_FRAG_ENTRY_POINTS 16 /* + * The number of entries in the fragment queue must be limited + * to avoid DoS by linear seaching. Instead of a global limit, + * use a limit per entry point. For large packets these sum up. + */ +#define PF_FRAG_ENTRY_LIMIT 64 + +/* * ioctl parameter structures */ Modified: head/sys/netpfil/pf/pf_norm.c ============================================================================== --- head/sys/netpfil/pf/pf_norm.c Fri Nov 2 15:26:51 2018 (r340061) +++ head/sys/netpfil/pf/pf_norm.c Fri Nov 2 15:32:04 2018 (r340062) @@ -87,7 +87,10 @@ struct pf_fragment { #define fr_af fr_key.frc_af #define fr_proto fr_key.frc_proto + /* pointers to queue element */ struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS]; + /* count entries between pointers */ + uint8_t fr_entries[PF_FRAG_ENTRY_POINTS]; RB_ENTRY(pf_fragment) fr_entry; TAILQ_ENTRY(pf_fragment) frag_next; uint32_t fr_timeout; @@ -138,7 +141,7 @@ static int pf_frent_holes(struct pf_frent *frent); static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree); static inline int pf_frent_index(struct pf_frent *); -static void pf_frent_insert(struct pf_fragment *, +static int pf_frent_insert(struct pf_fragment *, struct pf_frent *, struct pf_frent *); void pf_frent_remove(struct pf_fragment *, struct pf_frent *); @@ -392,12 +395,24 @@ pf_frent_index(struct pf_frent *frent) return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS); } -static void +static int pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent, struct pf_frent *prev) { int index; + CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff); + + /* + * A packet has at most 65536 octets. With 16 entry points, each one + * spawns 4096 octets. We limit these to 64 fragments each, which + * means on average every fragment must have at least 64 octets. + */ + index = pf_frent_index(frent); + if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT) + return ENOBUFS; + frag->fr_entries[index]++; + if (prev == NULL) { TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); } else { @@ -406,7 +421,6 @@ pf_frent_insert(struct pf_fragment *frag, struct pf_fr TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next); } - index = pf_frent_index(frent); if (frag->fr_firstoff[index] == NULL) { KASSERT(prev == NULL || pf_frent_index(prev) < index, ("prev == NULL || pf_frent_index(pref) < index")); @@ -424,6 +438,8 @@ pf_frent_insert(struct pf_fragment *frag, struct pf_fr } frag->fr_holes += pf_frent_holes(frent); + + return 0; } void @@ -460,6 +476,9 @@ pf_frent_remove(struct pf_fragment *frag, struct pf_fr } TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); + + KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining")); + frag->fr_entries[index]--; } struct pf_frent * @@ -567,6 +586,7 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct *(struct pf_fragment_cmp *)frag = *key; memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff)); + memset(frag->fr_entries, 0, sizeof(frag->fr_entries)); frag->fr_timeout = time_uptime; frag->fr_maxlen = frent->fe_len; frag->fr_holes = 1; @@ -575,7 +595,7 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag); TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); - /* We do not have a previous fragment. */ + /* We do not have a previous fragment, cannot fail. */ pf_frent_insert(frag, frent, NULL); return (frag); @@ -646,7 +666,11 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct uma_zfree(V_pf_frent_z, after); } - pf_frent_insert(frag, frent, prev); + /* If part of the queue gets too long, there is not way to recover. */ + if (pf_frent_insert(frag, frent, prev)) { + DPFPRINTF(("fragment queue limit exceeded")); + goto bad_fragment; + } return (frag);