From owner-svn-src-stable-11@freebsd.org Fri Mar 29 14:34:56 2019 Return-Path: Delivered-To: svn-src-stable-11@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id E51B7156AD48; Fri, 29 Mar 2019 14:34:55 +0000 (UTC) (envelope-from kp@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) server-signature RSA-PSS (4096 bits) client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 8F4DC962E9; Fri, 29 Mar 2019 14:34:55 +0000 (UTC) (envelope-from kp@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 5B5DC22F; Fri, 29 Mar 2019 14:34:51 +0000 (UTC) (envelope-from kp@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id x2TEYpbx053221; Fri, 29 Mar 2019 14:34:51 GMT (envelope-from kp@FreeBSD.org) Received: (from kp@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id x2TEYpQn053219; Fri, 29 Mar 2019 14:34:51 GMT (envelope-from kp@FreeBSD.org) Message-Id: <201903291434.x2TEYpQn053219@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: kp set sender to kp@FreeBSD.org using -f From: Kristof Provost Date: Fri, 29 Mar 2019 14:34:51 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org Subject: svn commit: r345691 - in stable/11/sys: net netpfil/pf X-SVN-Group: stable-11 X-SVN-Commit-Author: kp X-SVN-Commit-Paths: in stable/11/sys: net netpfil/pf X-SVN-Commit-Revision: 345691 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-Rspamd-Queue-Id: 8F4DC962E9 X-Spamd-Bar: -- Authentication-Results: mx1.freebsd.org X-Spamd-Result: default: False [-2.96 / 15.00]; local_wl_from(0.00)[FreeBSD.org]; NEURAL_HAM_MEDIUM(-1.00)[-0.997,0]; NEURAL_HAM_SHORT(-0.96)[-0.963,0]; ASN(0.00)[asn:11403, ipnet:2610:1c1:1::/48, country:US]; NEURAL_HAM_LONG(-1.00)[-1.000,0] X-BeenThere: svn-src-stable-11@freebsd.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: SVN commit messages for only the 11-stable src tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 29 Mar 2019 14:34:56 -0000 Author: kp Date: Fri Mar 29 14:34:50 2019 New Revision: 345691 URL: https://svnweb.freebsd.org/changeset/base/345691 Log: MFC r345177: pf :Use counter(9) in pf tables. The counters of pf tables are updated outside the rule lock. That means state updates might overwrite each other. Furthermore allocation and freeing of counters happens outside the lock as well. Use counter(9) for the counters, and always allocate the counter table element, so that the race condition cannot happen any more. PR: 230619 Submitted by: Kajetan Staszkiewicz Modified: stable/11/sys/net/pfvar.h stable/11/sys/netpfil/pf/pf_table.c Directory Properties: stable/11/ (props changed) Modified: stable/11/sys/net/pfvar.h ============================================================================== --- stable/11/sys/net/pfvar.h Fri Mar 29 14:19:31 2019 (r345690) +++ stable/11/sys/net/pfvar.h Fri Mar 29 14:34:50 2019 (r345691) @@ -1013,6 +1013,17 @@ struct pfr_tstats { int pfrts_cnt; int pfrts_refcnt[PFR_REFCNT_MAX]; }; + +struct pfr_ktstats { + struct pfr_table pfrts_t; + counter_u64_t pfrkts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; + counter_u64_t pfrkts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; + counter_u64_t pfrkts_match; + counter_u64_t pfrkts_nomatch; + long pfrkts_tzero; + int pfrkts_cnt; + int pfrkts_refcnt[PFR_REFCNT_MAX]; +}; #define pfrts_name pfrts_t.pfrt_name #define pfrts_flags pfrts_t.pfrt_flags @@ -1026,8 +1037,9 @@ union sockaddr_union { #endif /* _SOCKADDR_UNION_DEFINED */ struct pfr_kcounters { - u_int64_t pfrkc_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; - u_int64_t pfrkc_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + counter_u64_t pfrkc_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + counter_u64_t pfrkc_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + long pfrkc_tzero; }; SLIST_HEAD(pfr_kentryworkq, pfr_kentry); @@ -1035,8 +1047,7 @@ struct pfr_kentry { struct radix_node pfrke_node[2]; union sockaddr_union pfrke_sa; SLIST_ENTRY(pfr_kentry) pfrke_workq; - struct pfr_kcounters *pfrke_counters; - long pfrke_tzero; + struct pfr_kcounters pfrke_counters; u_int8_t pfrke_af; u_int8_t pfrke_net; u_int8_t pfrke_not; @@ -1046,7 +1057,7 @@ struct pfr_kentry { SLIST_HEAD(pfr_ktableworkq, pfr_ktable); RB_HEAD(pfr_ktablehead, pfr_ktable); struct pfr_ktable { - struct pfr_tstats pfrkt_ts; + struct pfr_ktstats pfrkt_kts; RB_ENTRY(pfr_ktable) pfrkt_tree; SLIST_ENTRY(pfr_ktable) pfrkt_workq; struct radix_node_head *pfrkt_ip4; @@ -1057,18 +1068,18 @@ struct pfr_ktable { long pfrkt_larg; int pfrkt_nflags; }; -#define pfrkt_t pfrkt_ts.pfrts_t +#define pfrkt_t pfrkt_kts.pfrts_t #define pfrkt_name pfrkt_t.pfrt_name #define pfrkt_anchor pfrkt_t.pfrt_anchor #define pfrkt_ruleset pfrkt_t.pfrt_ruleset #define pfrkt_flags pfrkt_t.pfrt_flags -#define pfrkt_cnt pfrkt_ts.pfrts_cnt -#define pfrkt_refcnt pfrkt_ts.pfrts_refcnt -#define pfrkt_packets pfrkt_ts.pfrts_packets -#define pfrkt_bytes pfrkt_ts.pfrts_bytes -#define pfrkt_match pfrkt_ts.pfrts_match -#define pfrkt_nomatch pfrkt_ts.pfrts_nomatch -#define pfrkt_tzero pfrkt_ts.pfrts_tzero +#define pfrkt_cnt pfrkt_kts.pfrkts_cnt +#define pfrkt_refcnt pfrkt_kts.pfrkts_refcnt +#define pfrkt_packets pfrkt_kts.pfrkts_packets +#define pfrkt_bytes pfrkt_kts.pfrkts_bytes +#define pfrkt_match pfrkt_kts.pfrkts_match +#define pfrkt_nomatch pfrkt_kts.pfrkts_nomatch +#define pfrkt_tzero pfrkt_kts.pfrkts_tzero /* keep synced with pfi_kif, used in RB_FIND */ struct pfi_kif_cmp { Modified: stable/11/sys/netpfil/pf/pf_table.c ============================================================================== --- stable/11/sys/netpfil/pf/pf_table.c Fri Mar 29 14:19:31 2019 (r345690) +++ stable/11/sys/netpfil/pf/pf_table.c Fri Mar 29 14:34:50 2019 (r345691) @@ -111,6 +111,7 @@ struct pfr_walktree { struct pfi_dynaddr *pfrw1_dyn; } pfrw_1; int pfrw_free; + int pfrw_flags; }; #define pfrw_addr pfrw_1.pfrw1_addr #define pfrw_astats pfrw_1.pfrw1_astats @@ -124,15 +125,16 @@ struct pfr_walktree { static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); static VNET_DEFINE(uma_zone_t, pfr_kentry_z); #define V_pfr_kentry_z VNET(pfr_kentry_z) -static VNET_DEFINE(uma_zone_t, pfr_kcounters_z); -#define V_pfr_kcounters_z VNET(pfr_kcounters_z) static struct pf_addr pfr_ffaddr = { .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }; +static void pfr_copyout_astats(struct pfr_astats *, + const struct pfr_kentry *, + const struct pfr_walktree *); static void pfr_copyout_addr(struct pfr_addr *, - struct pfr_kentry *ke); + const struct pfr_kentry *ke); static int pfr_validate_addr(struct pfr_addr *); static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *, int *, int); @@ -140,8 +142,12 @@ static void pfr_mark_addrs(struct pfr_ktable *); static struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, struct pfr_addr *, int); +static bool pfr_create_kentry_counter(struct pfr_kcounters *, + int, int); static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *); static void pfr_destroy_kentries(struct pfr_kentryworkq *); +static void pfr_destroy_kentry_counter(struct pfr_kcounters *, + int, int); static void pfr_destroy_kentry(struct pfr_kentry *); static void pfr_insert_kentries(struct pfr_ktable *, struct pfr_kentryworkq *, long); @@ -195,9 +201,6 @@ pfr_initialize(void) V_pfr_kentry_z = uma_zcreate("pf table entries", sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pfr_kcounters_z = uma_zcreate("pf table counters", - sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL, - UMA_ALIGN_PTR, 0); V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; } @@ -207,7 +210,6 @@ pfr_cleanup(void) { uma_zdestroy(V_pfr_kentry_z); - uma_zdestroy(V_pfr_kcounters_z); } int @@ -601,6 +603,13 @@ pfr_get_astats(struct pfr_table *tbl, struct pfr_astat w.pfrw_op = PFRW_GET_ASTATS; w.pfrw_astats = addr; w.pfrw_free = kt->pfrkt_cnt; + /* + * Flags below are for backward compatibility. It was possible to have + * a table without per-entry counters. Now they are always allocated, + * we just discard data when reading it if table is not configured to + * have counters. + */ + w.pfrw_flags = kt->pfrkt_flags; rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); if (!rv) rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, @@ -767,10 +776,30 @@ pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr return (ke); } +static bool +pfr_create_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op) +{ + kc->pfrkc_packets[pfr_dir][pfr_op] = counter_u64_alloc(M_NOWAIT); + if (! kc->pfrkc_packets[pfr_dir][pfr_op]) + return (false); + + kc->pfrkc_bytes[pfr_dir][pfr_op] = counter_u64_alloc(M_NOWAIT); + if (! kc->pfrkc_bytes[pfr_dir][pfr_op]) { + /* Previous allocation will be freed through + * pfr_destroy_kentry() */ + return (false); + } + + kc->pfrkc_tzero = 0; + + return (true); +} + static struct pfr_kentry * pfr_create_kentry(struct pfr_addr *ad) { struct pfr_kentry *ke; + int pfr_dir, pfr_op; ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); if (ke == NULL) @@ -783,6 +812,14 @@ pfr_create_kentry(struct pfr_addr *ad) ke->pfrke_af = ad->pfra_af; ke->pfrke_net = ad->pfra_net; ke->pfrke_not = ad->pfra_not; + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) + for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) { + if (! pfr_create_kentry_counter(&ke->pfrke_counters, + pfr_dir, pfr_op)) { + pfr_destroy_kentry(ke); + return (NULL); + } + } return (ke); } @@ -798,10 +835,22 @@ pfr_destroy_kentries(struct pfr_kentryworkq *workq) } static void +pfr_destroy_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op) +{ + counter_u64_free(kc->pfrkc_packets[pfr_dir][pfr_op]); + counter_u64_free(kc->pfrkc_bytes[pfr_dir][pfr_op]); +} + +static void pfr_destroy_kentry(struct pfr_kentry *ke) { - if (ke->pfrke_counters) - uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters); + int pfr_dir, pfr_op; + + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) + for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) + pfr_destroy_kentry_counter(&ke->pfrke_counters, + pfr_dir, pfr_op); + uma_zfree(V_pfr_kentry_z, ke); } @@ -819,7 +868,7 @@ pfr_insert_kentries(struct pfr_ktable *kt, "(code=%d).\n", rv); break; } - p->pfrke_tzero = tzero; + p->pfrke_counters.pfrkc_tzero = tzero; n++; } kt->pfrkt_cnt += n; @@ -842,7 +891,7 @@ pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_ad if (rv) return (rv); - p->pfrke_tzero = tzero; + p->pfrke_counters.pfrkc_tzero = tzero; kt->pfrkt_cnt++; return (0); @@ -877,15 +926,20 @@ static void pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) { struct pfr_kentry *p; + int pfr_dir, pfr_op; SLIST_FOREACH(p, workq, pfrke_workq) { if (negchange) p->pfrke_not = !p->pfrke_not; - if (p->pfrke_counters) { - uma_zfree(V_pfr_kcounters_z, p->pfrke_counters); - p->pfrke_counters = NULL; + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { + for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) { + counter_u64_zero(p->pfrke_counters. + pfrkc_packets[pfr_dir][pfr_op]); + counter_u64_zero(p->pfrke_counters. + pfrkc_bytes[pfr_dir][pfr_op]); + } } - p->pfrke_tzero = tzero; + p->pfrke_counters.pfrkc_tzero = tzero; } } @@ -974,7 +1028,7 @@ pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_k } static void -pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) +pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke) { bzero(ad, sizeof(*ad)); if (ke == NULL) @@ -988,6 +1042,33 @@ pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentr ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; } +static void +pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke, + const struct pfr_walktree *w) +{ + int dir, op; + const struct pfr_kcounters *kc = &ke->pfrke_counters; + + pfr_copyout_addr(&as->pfras_a, ke); + as->pfras_tzero = kc->pfrkc_tzero; + + if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS)) { + bzero(as->pfras_packets, sizeof(as->pfras_packets)); + bzero(as->pfras_bytes, sizeof(as->pfras_bytes)); + as->pfras_a.pfra_fback = PFR_FB_NOCOUNT; + return; + } + + for (dir = 0; dir < PFR_DIR_MAX; dir ++) { + for (op = 0; op < PFR_OP_ADDR_MAX; op ++) { + as->pfras_packets[dir][op] = + counter_u64_fetch(kc->pfrkc_packets[dir][op]); + as->pfras_bytes[dir][op] = + counter_u64_fetch(kc->pfrkc_bytes[dir][op]); + } + } +} + static int pfr_walktree(struct radix_node *rn, void *arg) { @@ -1016,20 +1097,8 @@ pfr_walktree(struct radix_node *rn, void *arg) if (w->pfrw_free-- > 0) { struct pfr_astats as; - pfr_copyout_addr(&as.pfras_a, ke); + pfr_copyout_astats(&as, ke, w); - if (ke->pfrke_counters) { - bcopy(ke->pfrke_counters->pfrkc_packets, - as.pfras_packets, sizeof(as.pfras_packets)); - bcopy(ke->pfrke_counters->pfrkc_bytes, - as.pfras_bytes, sizeof(as.pfras_bytes)); - } else { - bzero(as.pfras_packets, sizeof(as.pfras_packets)); - bzero(as.pfras_bytes, sizeof(as.pfras_bytes)); - as.pfras_a.pfra_fback = PFR_FB_NOCOUNT; - } - as.pfras_tzero = ke->pfrke_tzero; - bcopy(&as, w->pfrw_astats, sizeof(as)); w->pfrw_astats++; } @@ -1253,6 +1322,7 @@ pfr_get_tstats(struct pfr_table *filter, struct pfr_ts struct pfr_ktableworkq workq; int n, nn; long tzero = time_second; + int pfr_dir, pfr_op; /* XXX PFR_FLAG_CLSTATS disabled */ ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); @@ -1271,7 +1341,25 @@ pfr_get_tstats(struct pfr_table *filter, struct pfr_ts continue; if (n-- <= 0) continue; - bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl)); + bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t, + sizeof(struct pfr_table)); + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { + for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { + tbl->pfrts_packets[pfr_dir][pfr_op] = + counter_u64_fetch( + p->pfrkt_packets[pfr_dir][pfr_op]); + tbl->pfrts_bytes[pfr_dir][pfr_op] = + counter_u64_fetch( + p->pfrkt_bytes[pfr_dir][pfr_op]); + } + } + tbl->pfrts_match = counter_u64_fetch(p->pfrkt_match); + tbl->pfrts_nomatch = counter_u64_fetch(p->pfrkt_nomatch); + tbl->pfrts_tzero = p->pfrkt_tzero; + tbl->pfrts_cnt = p->pfrkt_cnt; + for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++) + tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op]; + tbl++; SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); } if (flags & PFR_FLAG_CLSTATS) @@ -1605,7 +1693,7 @@ pfr_commit_ktable(struct pfr_ktable *kt, long tzero) q->pfrke_mark = 1; SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); } else { - p->pfrke_tzero = tzero; + p->pfrke_counters.pfrkc_tzero = tzero; SLIST_INSERT_HEAD(&addq, p, pfrke_workq); } } @@ -1789,14 +1877,20 @@ static void pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) { struct pfr_kentryworkq addrq; + int pfr_dir, pfr_op; if (recurse) { pfr_enqueue_addrs(kt, &addrq, NULL, 0); pfr_clstats_kentries(&addrq, tzero, 0); } - bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); - bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); - kt->pfrkt_match = kt->pfrkt_nomatch = 0; + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { + for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { + counter_u64_zero(kt->pfrkt_packets[pfr_dir][pfr_op]); + counter_u64_zero(kt->pfrkt_bytes[pfr_dir][pfr_op]); + } + } + counter_u64_zero(kt->pfrkt_match); + counter_u64_zero(kt->pfrkt_nomatch); kt->pfrkt_tzero = tzero; } @@ -1805,6 +1899,7 @@ pfr_create_ktable(struct pfr_table *tbl, long tzero, i { struct pfr_ktable *kt; struct pf_ruleset *rs; + int pfr_dir, pfr_op; PF_RULES_WASSERT(); @@ -1823,6 +1918,34 @@ pfr_create_ktable(struct pfr_table *tbl, long tzero, i rs->tables++; } + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { + for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { + kt->pfrkt_packets[pfr_dir][pfr_op] = + counter_u64_alloc(M_NOWAIT); + if (! kt->pfrkt_packets[pfr_dir][pfr_op]) { + pfr_destroy_ktable(kt, 0); + return (NULL); + } + kt->pfrkt_bytes[pfr_dir][pfr_op] = + counter_u64_alloc(M_NOWAIT); + if (! kt->pfrkt_bytes[pfr_dir][pfr_op]) { + pfr_destroy_ktable(kt, 0); + return (NULL); + } + } + } + kt->pfrkt_match = counter_u64_alloc(M_NOWAIT); + if (! kt->pfrkt_match) { + pfr_destroy_ktable(kt, 0); + return (NULL); + } + + kt->pfrkt_nomatch = counter_u64_alloc(M_NOWAIT); + if (! kt->pfrkt_nomatch) { + pfr_destroy_ktable(kt, 0); + return (NULL); + } + if (!rn_inithead((void **)&kt->pfrkt_ip4, offsetof(struct sockaddr_in, sin_addr) * 8) || !rn_inithead((void **)&kt->pfrkt_ip6, @@ -1850,6 +1973,7 @@ static void pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) { struct pfr_kentryworkq addrq; + int pfr_dir, pfr_op; if (flushaddr) { pfr_enqueue_addrs(kt, &addrq, NULL, 0); @@ -1866,6 +1990,15 @@ pfr_destroy_ktable(struct pfr_ktable *kt, int flushadd kt->pfrkt_rs->tables--; pf_remove_if_empty_ruleset(kt->pfrkt_rs); } + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { + for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { + counter_u64_free(kt->pfrkt_packets[pfr_dir][pfr_op]); + counter_u64_free(kt->pfrkt_bytes[pfr_dir][pfr_op]); + } + } + counter_u64_free(kt->pfrkt_match); + counter_u64_free(kt->pfrkt_nomatch); + free(kt, M_PFTABLE); } @@ -1934,9 +2067,9 @@ pfr_match_addr(struct pfr_ktable *kt, struct pf_addr * } match = (ke && !ke->pfrke_not); if (match) - kt->pfrkt_match++; + counter_u64_add(kt->pfrkt_match, 1); else - kt->pfrkt_nomatch++; + counter_u64_add(kt->pfrkt_nomatch, 1); return (match); } @@ -1991,17 +2124,14 @@ pfr_update_stats(struct pfr_ktable *kt, struct pf_addr ("pfr_update_stats: assertion failed.\n")); op_pass = PFR_OP_XPASS; } - kt->pfrkt_packets[dir_out][op_pass]++; - kt->pfrkt_bytes[dir_out][op_pass] += len; + counter_u64_add(kt->pfrkt_packets[dir_out][op_pass], 1); + counter_u64_add(kt->pfrkt_bytes[dir_out][op_pass], len); if (ke != NULL && op_pass != PFR_OP_XPASS && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { - if (ke->pfrke_counters == NULL) - ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z, - M_NOWAIT | M_ZERO); - if (ke->pfrke_counters != NULL) { - ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++; - ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len; - } + counter_u64_add(ke->pfrke_counters. + pfrkc_packets[dir_out][op_pass], 1); + counter_u64_add(ke->pfrke_counters. + pfrkc_bytes[dir_out][op_pass], len); } } @@ -2091,7 +2221,7 @@ pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct _next_block: ke = pfr_kentry_byidx(kt, idx, af); if (ke == NULL) { - kt->pfrkt_nomatch++; + counter_u64_add(kt->pfrkt_nomatch, 1); return (1); } pfr_prepare_network(&umask, af, ke->pfrke_net); @@ -2116,7 +2246,7 @@ _next_block: /* this is a single IP address - no possible nested block */ PF_ACPY(counter, addr, af); *pidx = idx; - kt->pfrkt_match++; + counter_u64_add(kt->pfrkt_match, 1); return (0); } for (;;) { @@ -2136,7 +2266,7 @@ _next_block: /* lookup return the same block - perfect */ PF_ACPY(counter, addr, af); *pidx = idx; - kt->pfrkt_match++; + counter_u64_add(kt->pfrkt_match, 1); return (0); }