Date: Fri, 14 Feb 2025 15:10:59 GMT From: Mark Johnston <markj@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: 031800c78682 - main - gve: Do minor cleanup and bump version Message-ID: <202502141510.51EFAxE0063356@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch main has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=031800c786823a9ad4c4d2f79f217d42dad3f5d1 commit 031800c786823a9ad4c4d2f79f217d42dad3f5d1 Author: Jasper Tran O'Leary <jtranoleary@google.com> AuthorDate: 2025-02-14 15:04:16 +0000 Commit: Mark Johnston <markj@FreeBSD.org> CommitDate: 2025-02-14 15:08:23 +0000 gve: Do minor cleanup and bump version This commit fixes several minor issues: - Removes an unnecessary function pointer parameter on gve_start_tx_ring - Adds a presubmit check against style(9) - Replaces mb() and rmb() macros with native atomic_thread_fence_seq_cst() and atomic_thread_fence_acq() respectively - Fixes various typos throughout - Increments the version number to 1.3.2 Co-authored-by: Vee Agarwal <veethebee@google.com> Signed-off-by: Vee Agarwal <veethebee@google.com> Signed-off-by: Jasper Tran O'Leary <jtranoleary@google.com> Reviewed by: delphij, markj MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D48969 --- sys/dev/gve/gve.h | 6 +++--- sys/dev/gve/gve_desc.h | 4 ++-- sys/dev/gve/gve_main.c | 6 +++--- sys/dev/gve/gve_rx.c | 2 +- sys/dev/gve/gve_rx_dqo.c | 2 +- sys/dev/gve/gve_sysctl.c | 4 ++-- sys/dev/gve/gve_tx.c | 16 +++++++--------- sys/dev/gve/gve_tx_dqo.c | 2 +- 8 files changed, 20 insertions(+), 22 deletions(-) diff --git a/sys/dev/gve/gve.h b/sys/dev/gve/gve.h index 92ab6838d5bb..39965c8669cf 100644 --- a/sys/dev/gve/gve.h +++ b/sys/dev/gve/gve.h @@ -303,7 +303,7 @@ struct gve_rx_ring { SLIST_HEAD(, gve_rx_buf_dqo) free_bufs; /* - * Only used in QPL mode. Pages refered to by if_input-ed mbufs + * Only used in QPL mode. Pages referred to by if_input-ed mbufs * stay parked here till their wire count comes back to 1. * Pages are moved here after there aren't any pending completions. */ @@ -450,7 +450,7 @@ struct gve_tx_ring { /* * The completion taskqueue moves pending-packet objects to this * list after freeing the mbuf. The "_prd" denotes that this is - * a producer list. The trasnmit taskqueue steals this list once + * a producer list. The transmit taskqueue steals this list once * its consumer list, with the "_csm" suffix, is depleted. */ int32_t free_pending_pkts_prd; @@ -458,7 +458,7 @@ struct gve_tx_ring { /* * The completion taskqueue moves the QPL pages corresponding to a * completed packet into this list. It is only used in QPL mode. - * The "_prd" denotes that this is a producer list. The trasnmit + * The "_prd" denotes that this is a producer list. The transmit * taskqueue steals this list once its consumer list, with the "_csm" * suffix, is depleted. * diff --git a/sys/dev/gve/gve_desc.h b/sys/dev/gve/gve_desc.h index 5f09cc8b77b8..48c4ac27596b 100644 --- a/sys/dev/gve/gve_desc.h +++ b/sys/dev/gve/gve_desc.h @@ -130,10 +130,10 @@ union gve_rx_data_slot { __be64 addr; }; -/* GVE Recive Packet Descriptor Seq No */ +/* GVE Receive Packet Descriptor Seq No */ #define GVE_SEQNO(x) (be16toh(x) & 0x7) -/* GVE Recive Packet Descriptor Flags */ +/* GVE Receive Packet Descriptor Flags */ #define GVE_RXFLG(x) htobe16(1 << (3 + (x))) #define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */ #define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */ diff --git a/sys/dev/gve/gve_main.c b/sys/dev/gve/gve_main.c index aa0866c5984b..8e764f9660d7 100644 --- a/sys/dev/gve/gve_main.c +++ b/sys/dev/gve/gve_main.c @@ -32,10 +32,10 @@ #include "gve_adminq.h" #include "gve_dqo.h" -#define GVE_DRIVER_VERSION "GVE-FBSD-1.3.1\n" +#define GVE_DRIVER_VERSION "GVE-FBSD-1.3.2\n" #define GVE_VERSION_MAJOR 1 #define GVE_VERSION_MINOR 3 -#define GVE_VERSION_SUB 1 +#define GVE_VERSION_SUB 2 #define GVE_DEFAULT_RX_COPYBREAK 256 @@ -391,7 +391,7 @@ gve_setup_ifnet(device_t dev, struct gve_priv *priv) /* * Set TSO limits, must match the arguments to bus_dma_tag_create * when creating tx->dqo.buf_dmatag. Only applies to the RDA mode - * because in QPL we copy the entire pakcet into the bounce buffer + * because in QPL we copy the entire packet into the bounce buffer * and thus it does not matter how fragmented the mbuf is. */ if (!gve_is_gqi(priv) && !gve_is_qpl(priv)) { diff --git a/sys/dev/gve/gve_rx.c b/sys/dev/gve/gve_rx.c index 35f22f2308f0..e540ad6f4c11 100644 --- a/sys/dev/gve/gve_rx.c +++ b/sys/dev/gve/gve_rx.c @@ -706,7 +706,7 @@ gve_rx_cleanup_tq(void *arg, int pending) * interrupt but they will still be handled by the enqueue below. * Fragments received after the barrier WILL trigger an interrupt. */ - mb(); + atomic_thread_fence_seq_cst(); if (gve_rx_work_pending(rx)) { gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK); diff --git a/sys/dev/gve/gve_rx_dqo.c b/sys/dev/gve/gve_rx_dqo.c index 6c5d656aaa04..6ce9ddd887d0 100644 --- a/sys/dev/gve/gve_rx_dqo.c +++ b/sys/dev/gve/gve_rx_dqo.c @@ -972,7 +972,7 @@ gve_rx_cleanup_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, int budget) * Prevent generation bit from being read after the rest of the * descriptor. */ - rmb(); + atomic_thread_fence_acq(); rx->cnt++; rx->dqo.tail = (rx->dqo.tail + 1) & rx->dqo.mask; diff --git a/sys/dev/gve/gve_sysctl.c b/sys/dev/gve/gve_sysctl.c index 7a091d9caa43..c96d082837a4 100644 --- a/sys/dev/gve/gve_sysctl.c +++ b/sys/dev/gve/gve_sysctl.c @@ -94,7 +94,7 @@ gve_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, "rx_mbuf_dmamap_err", CTLFLAG_RD, &stats->rx_mbuf_dmamap_err, - "Number of rx mbufs which couldnt be dma mapped"); + "Number of rx mbufs which could not be dma mapped"); SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO, "rx_mbuf_mclget_null", CTLFLAG_RD, &stats->rx_mbuf_mclget_null, @@ -170,7 +170,7 @@ gve_setup_txq_sysctl(struct sysctl_ctx_list *ctx, SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "tx_mbuf_collpase", CTLFLAG_RD, &stats->tx_mbuf_collapse, - "tx mbufs that had to be collpased"); + "tx mbufs that had to be collapsed"); SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO, "tx_mbuf_defrag", CTLFLAG_RD, &stats->tx_mbuf_defrag, diff --git a/sys/dev/gve/gve_tx.c b/sys/dev/gve/gve_tx.c index e7e10e526cb9..04dde4f1a79b 100644 --- a/sys/dev/gve/gve_tx.c +++ b/sys/dev/gve/gve_tx.c @@ -240,15 +240,16 @@ gve_clear_tx_ring(struct gve_priv *priv, int i) } static void -gve_start_tx_ring(struct gve_priv *priv, int i, - void (cleanup) (void *arg, int pending)) +gve_start_tx_ring(struct gve_priv *priv, int i) { struct gve_tx_ring *tx = &priv->tx[i]; struct gve_ring_com *com = &tx->com; atomic_store_bool(&tx->stopped, false); - - NET_TASK_INIT(&com->cleanup_task, 0, cleanup, tx); + if (gve_is_gqi(priv)) + NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq, tx); + else + NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq_dqo, tx); com->cleanup_tq = taskqueue_create_fast("gve tx", M_WAITOK, taskqueue_thread_enqueue, &com->cleanup_tq); taskqueue_start_threads(&com->cleanup_tq, 1, PI_NET, "%s txq %d", @@ -297,10 +298,7 @@ gve_create_tx_rings(struct gve_priv *priv) com->db_offset = 4 * be32toh(com->q_resources->db_index); com->counter_idx = be32toh(com->q_resources->counter_index); - if (gve_is_gqi(priv)) - gve_start_tx_ring(priv, i, gve_tx_cleanup_tq); - else - gve_start_tx_ring(priv, i, gve_tx_cleanup_tq_dqo); + gve_start_tx_ring(priv, i); } gve_set_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK); @@ -421,7 +419,7 @@ gve_tx_cleanup_tq(void *arg, int pending) * interrupt but they will still be handled by the enqueue below. * Completions born after the barrier WILL trigger an interrupt. */ - mb(); + atomic_thread_fence_seq_cst(); nic_done = gve_tx_load_event_counter(priv, tx); todo = nic_done - tx->done; diff --git a/sys/dev/gve/gve_tx_dqo.c b/sys/dev/gve/gve_tx_dqo.c index fab2d6d0f613..b4bcb1fd01c5 100644 --- a/sys/dev/gve/gve_tx_dqo.c +++ b/sys/dev/gve/gve_tx_dqo.c @@ -1031,7 +1031,7 @@ gve_tx_cleanup_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, int budget) * Prevent generation bit from being read after the rest of the * descriptor. */ - rmb(); + atomic_thread_fence_acq(); type = compl_desc->type; if (type == GVE_COMPL_TYPE_DQO_DESC) {
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202502141510.51EFAxE0063356>