Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 2 Dec 2016 21:29:52 +0000 (UTC)
From:      John Baldwin <jhb@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r309442 - in stable/10/sys/dev/cxgbe: . iw_cxgbe tom
Message-ID:  <201612022129.uB2LTq35028199@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jhb
Date: Fri Dec  2 21:29:52 2016
New Revision: 309442
URL: https://svnweb.freebsd.org/changeset/base/309442

Log:
  MFC 302339:
  cxgbe(4): Changes to the CPL-handler registration mechanism and code
  related to "shared" CPLs.
  
  a) Combine t4_set_tcb_field and t4_set_tcb_field_rpl into a single
  function.  Allow callers to direct the response to any iq.  Tidy up
  set_ulp_mode_iscsi while there to use names from t4_tcb.h instead of
  magic constants.
  
  b) Remove all CPL handler tables from struct adapter.  This reduces its
  size by around 2KB.  All handlers are now registered at MOD_LOAD instead
  of attach or some kind of initialization/activation.  The registration
  functions do not need an adapter parameter any more.
  
  c) Add per-iq handlers to deal with CPLs whose destination cannot be
  determined solely from the opcode.  There are 2 such CPLs in use right
  now: SET_TCB_RPL and L2T_WRITE_RPL.  The base driver continues to send
  filter and L2T_WRITEs over the mgmtq and solicits the reply on fwq.
  t4_tom (including the DDP code) now uses the port's ctrlq to send
  L2T_WRITEs and SET_TCB_FIELDs and solicits the reply on an ofld_rxq.
  fwq and ofld_rxq have different handlers that know what kind of tid to
  expect in the reply.  Update t4_write_l2e and callers to to support any
  wrq/iq combination.
  
  Sponsored by:	Chelsio Communications

Modified:
  stable/10/sys/dev/cxgbe/adapter.h
  stable/10/sys/dev/cxgbe/iw_cxgbe/cm.c
  stable/10/sys/dev/cxgbe/iw_cxgbe/device.c
  stable/10/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
  stable/10/sys/dev/cxgbe/t4_l2t.c
  stable/10/sys/dev/cxgbe/t4_l2t.h
  stable/10/sys/dev/cxgbe/t4_main.c
  stable/10/sys/dev/cxgbe/t4_sge.c
  stable/10/sys/dev/cxgbe/tom/t4_connect.c
  stable/10/sys/dev/cxgbe/tom/t4_cpl_io.c
  stable/10/sys/dev/cxgbe/tom/t4_ddp.c
  stable/10/sys/dev/cxgbe/tom/t4_listen.c
  stable/10/sys/dev/cxgbe/tom/t4_tom.c
  stable/10/sys/dev/cxgbe/tom/t4_tom.h
  stable/10/sys/dev/cxgbe/tom/t4_tom_l2t.c
  stable/10/sys/dev/cxgbe/tom/t4_tom_l2t.h
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/dev/cxgbe/adapter.h
==============================================================================
--- stable/10/sys/dev/cxgbe/adapter.h	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/adapter.h	Fri Dec  2 21:29:52 2016	(r309442)
@@ -365,6 +365,13 @@ enum {
 	NM_BUSY	= 2,
 };
 
+struct sge_iq;
+struct rss_header;
+typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
+    struct mbuf *);
+typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *);
+typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
+
 /*
  * Ingress Queue: T4 is producer, driver is consumer.
  */
@@ -372,6 +379,8 @@ struct sge_iq {
 	uint32_t flags;
 	volatile int state;
 	struct adapter *adapter;
+	cpl_handler_t set_tcb_rpl;
+	cpl_handler_t l2t_write_rpl;
 	struct iq_desc  *desc;	/* KVA of descriptor ring */
 	int8_t   intr_pktc_idx;	/* packet count threshold index */
 	uint8_t  gen;		/* generation bit */
@@ -732,12 +741,6 @@ struct sge {
 	struct hw_buf_info hw_buf_info[SGE_FLBUF_SIZES];
 };
 
-struct rss_header;
-typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
-    struct mbuf *);
-typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *);
-typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
-
 struct adapter {
 	SLIST_ENTRY(adapter) link;
 	device_t dev;
@@ -776,6 +779,7 @@ struct adapter {
 
 	struct sge sge;
 	int lro_timeout;
+	int sc_do_rxcopy;
 
 	struct taskqueue *tq[MAX_NCHAN];	/* General purpose taskqueues */
 	struct port_info *port[MAX_NPORTS];
@@ -835,15 +839,9 @@ struct adapter {
 
 	struct memwin memwin[NUM_MEMWIN];	/* memory windows */
 
-	an_handler_t an_handler __aligned(CACHE_LINE_SIZE);
-	fw_msg_handler_t fw_msg_handler[7];	/* NUM_FW6_TYPES */
-	cpl_handler_t cpl_handler[0xef];	/* NUM_CPL_CMDS */
-
 	const char *last_op;
 	const void *last_op_thr;
 	int last_op_flags;
-
-	int sc_do_rxcopy;
 };
 
 #define ADAPTER_LOCK(sc)		mtx_lock(&(sc)->sc_lock)
@@ -1073,9 +1071,6 @@ int t4_os_pci_restore_state(struct adapt
 void t4_os_portmod_changed(const struct adapter *, int);
 void t4_os_link_changed(struct adapter *, int, int, int);
 void t4_iterate(void (*)(struct adapter *, void *), void *);
-int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t);
-int t4_register_an_handler(struct adapter *, an_handler_t);
-int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
 int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *);
 void doom_vi(struct adapter *, struct vi_info *);
@@ -1100,7 +1095,6 @@ void t4_nm_intr(void *);
 void t4_sge_modload(void);
 void t4_sge_modunload(void);
 uint64_t t4_sge_extfree_refs(void);
-void t4_init_sge_cpl_handlers(struct adapter *);
 void t4_tweak_chip_settings(struct adapter *);
 int t4_read_chip_settings(struct adapter *);
 int t4_create_dma_tag(struct adapter *);
@@ -1122,6 +1116,9 @@ int parse_pkt(struct mbuf **);
 void *start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *);
 void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *);
 int tnl_cong(struct port_info *, int);
+int t4_register_an_handler(an_handler_t);
+int t4_register_fw_msg_handler(int, fw_msg_handler_t);
+int t4_register_cpl_handler(int, cpl_handler_t);
 
 /* t4_tracer.c */
 struct t4_tracer;

Modified: stable/10/sys/dev/cxgbe/iw_cxgbe/cm.c
==============================================================================
--- stable/10/sys/dev/cxgbe/iw_cxgbe/cm.c	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/iw_cxgbe/cm.c	Fri Dec  2 21:29:52 2016	(r309442)
@@ -2479,28 +2479,14 @@ static int terminate(struct sge_iq *iq, 
 	return 0;
 }
 
-	void
-c4iw_cm_init_cpl(struct adapter *sc)
-{
-
-	t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, terminate);
-	t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, fw6_wr_rpl);
-	t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, fw6_cqe_handler);
-	t4_register_an_handler(sc, c4iw_ev_handler);
-}
-
-	void
-c4iw_cm_term_cpl(struct adapter *sc)
-{
-
-	t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, NULL);
-	t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, NULL);
-	t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, NULL);
-}
-
 int __init c4iw_cm_init(void)
 {
 
+	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
+	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
+	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
+	t4_register_an_handler(c4iw_ev_handler);
+
 	TAILQ_INIT(&req_list);
 	spin_lock_init(&req_lock);
 	INIT_LIST_HEAD(&timeout_list);
@@ -2512,7 +2498,6 @@ int __init c4iw_cm_init(void)
 	if (!c4iw_taskq)
 		return -ENOMEM;
 
-
 	return 0;
 }
 
@@ -2522,5 +2507,10 @@ void __exit c4iw_cm_term(void)
 	WARN_ON(!list_empty(&timeout_list));
 	flush_workqueue(c4iw_taskq);
 	destroy_workqueue(c4iw_taskq);
+
+	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
+	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
+	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
+	t4_register_an_handler(NULL);
 }
 #endif

Modified: stable/10/sys/dev/cxgbe/iw_cxgbe/device.c
==============================================================================
--- stable/10/sys/dev/cxgbe/iw_cxgbe/device.c	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/iw_cxgbe/device.c	Fri Dec  2 21:29:52 2016	(r309442)
@@ -227,7 +227,6 @@ c4iw_activate(struct adapter *sc)
 	}
 
 	sc->iwarp_softc = iwsc;
-	c4iw_cm_init_cpl(sc);
 
 	rc = -c4iw_register_device(iwsc);
 	if (rc) {

Modified: stable/10/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
==============================================================================
--- stable/10/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h	Fri Dec  2 21:29:52 2016	(r309442)
@@ -974,9 +974,6 @@ int idr_for_each(struct idr *idp,
         return error;
 }
 
-void c4iw_cm_init_cpl(struct adapter *);
-void c4iw_cm_term_cpl(struct adapter *);
-
 void your_reg_device(struct c4iw_dev *dev);
 
 #define SGE_CTRLQ_NUM	0

Modified: stable/10/sys/dev/cxgbe/t4_l2t.c
==============================================================================
--- stable/10/sys/dev/cxgbe/t4_l2t.c	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/t4_l2t.c	Fri Dec  2 21:29:52 2016	(r309442)
@@ -110,27 +110,34 @@ found:
  * The write may be synchronous or asynchronous.
  */
 int
-t4_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync)
+t4_write_l2e(struct l2t_entry *e, int sync)
 {
+	struct sge_wrq *wrq;
+	struct adapter *sc;
 	struct wrq_cookie cookie;
 	struct cpl_l2t_write_req *req;
-	int idx = e->idx + sc->vres.l2t.start;
+	int idx;
 
 	mtx_assert(&e->lock, MA_OWNED);
+	MPASS(e->wrq != NULL);
 
-	req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie);
+	wrq = e->wrq;
+	sc = wrq->adapter;
+
+	req = start_wrq_wr(wrq, howmany(sizeof(*req), 16), &cookie);
 	if (req == NULL)
 		return (ENOMEM);
 
+	idx = e->idx + sc->vres.l2t.start;
 	INIT_TP_WR(req, 0);
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
-	    V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id)));
+	    V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
 	req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync));
 	req->l2t_idx = htons(idx);
 	req->vlan = htons(e->vlan);
 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
 
-	commit_wrq_wr(&sc->sge.mgmtq, req, &cookie);
+	commit_wrq_wr(wrq, req, &cookie);
 
 	if (sync && e->state != L2T_STATE_SWITCHING)
 		e->state = L2T_STATE_SYNC_WRITE;
@@ -172,9 +179,11 @@ t4_l2t_set_switching(struct adapter *sc,
 
 	e->vlan = vlan;
 	e->lport = port;
+	e->wrq = &sc->sge.mgmtq;
+	e->iqid = sc->sge.fwq.abs_id;
 	memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
 	mtx_lock(&e->lock);
-	rc = t4_write_l2e(sc, e, 0);
+	rc = t4_write_l2e(e, 0);
 	mtx_unlock(&e->lock);
 	return (rc);
 }
@@ -210,7 +219,6 @@ t4_init_l2t(struct adapter *sc, int flag
 	}
 
 	sc->l2t = d;
-	t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
 
 	return (0);
 }

Modified: stable/10/sys/dev/cxgbe/t4_l2t.h
==============================================================================
--- stable/10/sys/dev/cxgbe/t4_l2t.h	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/t4_l2t.h	Fri Dec  2 21:29:52 2016	(r309442)
@@ -61,6 +61,8 @@ struct l2t_entry {
 	uint16_t state;			/* entry state */
 	uint16_t idx;			/* entry index */
 	uint32_t addr[4];		/* next hop IP or IPv6 address */
+	uint32_t iqid;			/* iqid for reply to write_l2e */
+	struct sge_wrq *wrq;		/* queue to use for write_l2e */
 	struct ifnet *ifp;		/* outgoing interface */
 	uint16_t smt_idx;		/* SMT index */
 	uint16_t vlan;			/* VLAN TCI (id: 0-11, prio: 13-15) */
@@ -90,7 +92,7 @@ struct l2t_entry *t4_alloc_l2e(struct l2
 struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *);
 int t4_l2t_set_switching(struct adapter *, struct l2t_entry *, uint16_t,
     uint8_t, uint8_t *);
-int t4_write_l2e(struct adapter *, struct l2t_entry *, int);
+int t4_write_l2e(struct l2t_entry *, int);
 int do_l2t_write_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
 
 static inline void

Modified: stable/10/sys/dev/cxgbe/t4_main.c
==============================================================================
--- stable/10/sys/dev/cxgbe/t4_main.c	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/t4_main.c	Fri Dec  2 21:29:52 2016	(r309442)
@@ -459,10 +459,6 @@ static void vi_refresh_stats(struct adap
 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
 static void cxgbe_tick(void *);
 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
-static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
-    struct mbuf *);
-static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
-static int fw_msg_not_handled(struct adapter *, const __be64 *);
 static void t4_sysctls(struct adapter *);
 static void cxgbe_sysctls(struct port_info *);
 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
@@ -526,6 +522,8 @@ static int del_filter(struct adapter *, 
 static void clear_filter(struct filter_entry *);
 static int set_filter_wr(struct adapter *, int);
 static int del_filter_wr(struct adapter *, int);
+static int set_tcb_rpl(struct sge_iq *, const struct rss_header *,
+    struct mbuf *);
 static int get_sge_context(struct adapter *, struct t4_sge_context *);
 static int load_fw(struct adapter *, struct t4_data *);
 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
@@ -590,11 +588,6 @@ struct {
 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
 #endif
-
-/* No easy way to include t4_msg.h before adapter.h so we check this way */
-CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
-CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
-
 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
 
 static int
@@ -740,15 +733,6 @@ t4_attach(device_t dev)
 	sc->mbox = sc->pf;
 
 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
-	sc->an_handler = an_not_handled;
-	for (i = 0; i < nitems(sc->cpl_handler); i++)
-		sc->cpl_handler[i] = cpl_not_handled;
-	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
-		sc->fw_msg_handler[i] = fw_msg_not_handled;
-	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
-	t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
-	t4_register_cpl_handler(sc, CPL_T5_TRACE_PKT, t5_trace_pkt);
-	t4_init_sge_cpl_handlers(sc);
 
 	/* Prepare the adapter for operation. */
 	buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
@@ -4424,98 +4408,6 @@ cxgbe_vlan_config(void *arg, struct ifne
 	VLAN_SETCOOKIE(vlan, ifp);
 }
 
-static int
-cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
-{
-
-#ifdef INVARIANTS
-	panic("%s: opcode 0x%02x on iq %p with payload %p",
-	    __func__, rss->opcode, iq, m);
-#else
-	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
-	    __func__, rss->opcode, iq, m);
-	m_freem(m);
-#endif
-	return (EDOOFUS);
-}
-
-int
-t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
-{
-	uintptr_t *loc, new;
-
-	if (opcode >= nitems(sc->cpl_handler))
-		return (EINVAL);
-
-	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
-	loc = (uintptr_t *) &sc->cpl_handler[opcode];
-	atomic_store_rel_ptr(loc, new);
-
-	return (0);
-}
-
-static int
-an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
-{
-
-#ifdef INVARIANTS
-	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
-#else
-	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
-	    __func__, iq, ctrl);
-#endif
-	return (EDOOFUS);
-}
-
-int
-t4_register_an_handler(struct adapter *sc, an_handler_t h)
-{
-	uintptr_t *loc, new;
-
-	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
-	loc = (uintptr_t *) &sc->an_handler;
-	atomic_store_rel_ptr(loc, new);
-
-	return (0);
-}
-
-static int
-fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
-{
-	const struct cpl_fw6_msg *cpl =
-	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
-
-#ifdef INVARIANTS
-	panic("%s: fw_msg type %d", __func__, cpl->type);
-#else
-	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
-#endif
-	return (EDOOFUS);
-}
-
-int
-t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
-{
-	uintptr_t *loc, new;
-
-	if (type >= nitems(sc->fw_msg_handler))
-		return (EINVAL);
-
-	/*
-	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
-	 * handler dispatch table.  Reject any attempt to install a handler for
-	 * this subtype.
-	 */
-	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
-		return (EINVAL);
-
-	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
-	loc = (uintptr_t *) &sc->fw_msg_handler[type];
-	atomic_store_rel_ptr(loc, new);
-
-	return (0);
-}
-
 /*
  * Should match fw_caps_config_<foo> enums in t4fw_interface.h
  */
@@ -8196,39 +8088,54 @@ t4_filter_rpl(struct sge_iq *iq, const s
 
 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
 	    rss->opcode));
+	MPASS(iq == &sc->sge.fwq);
+	MPASS(is_ftid(sc, idx));
 
-	if (is_ftid(sc, idx)) {
-
-		idx -= sc->tids.ftid_base;
-		f = &sc->tids.ftid_tab[idx];
-		rc = G_COOKIE(rpl->cookie);
-
-		mtx_lock(&sc->tids.ftid_lock);
-		if (rc == FW_FILTER_WR_FLT_ADDED) {
-			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
-			    __func__, idx));
-			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
-			f->pending = 0;  /* asynchronous setup completed */
-			f->valid = 1;
-		} else {
-			if (rc != FW_FILTER_WR_FLT_DELETED) {
-				/* Add or delete failed, display an error */
-				log(LOG_ERR,
-				    "filter %u setup failed with error %u\n",
-				    idx, rc);
-			}
-
-			clear_filter(f);
-			sc->tids.ftids_in_use--;
+	idx -= sc->tids.ftid_base;
+	f = &sc->tids.ftid_tab[idx];
+	rc = G_COOKIE(rpl->cookie);
+
+	mtx_lock(&sc->tids.ftid_lock);
+	if (rc == FW_FILTER_WR_FLT_ADDED) {
+		KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
+		    __func__, idx));
+		f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
+		f->pending = 0;  /* asynchronous setup completed */
+		f->valid = 1;
+	} else {
+		if (rc != FW_FILTER_WR_FLT_DELETED) {
+			/* Add or delete failed, display an error */
+			log(LOG_ERR,
+			    "filter %u setup failed with error %u\n",
+			    idx, rc);
 		}
-		wakeup(&sc->tids.ftid_tab);
-		mtx_unlock(&sc->tids.ftid_lock);
+
+		clear_filter(f);
+		sc->tids.ftids_in_use--;
 	}
+	wakeup(&sc->tids.ftid_tab);
+	mtx_unlock(&sc->tids.ftid_lock);
 
 	return (0);
 }
 
 static int
+set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+
+	MPASS(iq->set_tcb_rpl != NULL);
+	return (iq->set_tcb_rpl(iq, rss, m));
+}
+
+static int
+l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+
+	MPASS(iq->l2t_write_rpl != NULL);
+	return (iq->l2t_write_rpl(iq, rss, m));
+}
+
+static int
 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
 {
 	int rc;
@@ -9403,6 +9310,10 @@ mod_event(module_t mod, int cmd, void *a
 		sx_xlock(&mlu);
 		if (loaded++ == 0) {
 			t4_sge_modload();
+			t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl);
+			t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl);
+			t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
+			t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
 			sx_init(&t4_list_lock, "T4/T5 adapters");
 			SLIST_INIT(&t4_list);
 #ifdef TCP_OFFLOAD

Modified: stable/10/sys/dev/cxgbe/t4_sge.c
==============================================================================
--- stable/10/sys/dev/cxgbe/t4_sge.c	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/t4_sge.c	Fri Dec  2 21:29:52 2016	(r309442)
@@ -67,6 +67,7 @@ __FBSDID("$FreeBSD$");
 #include "common/t4_regs.h"
 #include "common/t4_regs_values.h"
 #include "common/t4_msg.h"
+#include "t4_l2t.h"
 #include "t4_mp_ring.h"
 
 #ifdef T4_PKT_TIMESTAMP
@@ -252,12 +253,110 @@ static int sysctl_tc(SYSCTL_HANDLER_ARGS
 static counter_u64_t extfree_refs;
 static counter_u64_t extfree_rels;
 
+an_handler_t t4_an_handler;
+fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES];
+cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS];
+
+
+static int
+an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
+{
+
+#ifdef INVARIANTS
+	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
+#else
+	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
+	    __func__, iq, ctrl);
+#endif
+	return (EDOOFUS);
+}
+
+int
+t4_register_an_handler(an_handler_t h)
+{
+	uintptr_t *loc, new;
+
+	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
+	loc = (uintptr_t *) &t4_an_handler;
+	atomic_store_rel_ptr(loc, new);
+
+	return (0);
+}
+
+static int
+fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
+{
+	const struct cpl_fw6_msg *cpl =
+	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
+
+#ifdef INVARIANTS
+	panic("%s: fw_msg type %d", __func__, cpl->type);
+#else
+	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
+#endif
+	return (EDOOFUS);
+}
+
+int
+t4_register_fw_msg_handler(int type, fw_msg_handler_t h)
+{
+	uintptr_t *loc, new;
+
+	if (type >= nitems(t4_fw_msg_handler))
+		return (EINVAL);
+
+	/*
+	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
+	 * handler dispatch table.  Reject any attempt to install a handler for
+	 * this subtype.
+	 */
+	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
+		return (EINVAL);
+
+	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
+	loc = (uintptr_t *) &t4_fw_msg_handler[type];
+	atomic_store_rel_ptr(loc, new);
+
+	return (0);
+}
+
+static int
+cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+
+#ifdef INVARIANTS
+	panic("%s: opcode 0x%02x on iq %p with payload %p",
+	    __func__, rss->opcode, iq, m);
+#else
+	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
+	    __func__, rss->opcode, iq, m);
+	m_freem(m);
+#endif
+	return (EDOOFUS);
+}
+
+int
+t4_register_cpl_handler(int opcode, cpl_handler_t h)
+{
+	uintptr_t *loc, new;
+
+	if (opcode >= nitems(t4_cpl_handler))
+		return (EINVAL);
+
+	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
+	loc = (uintptr_t *) &t4_cpl_handler[opcode];
+	atomic_store_rel_ptr(loc, new);
+
+	return (0);
+}
+
 /*
  * Called on MOD_LOAD.  Validates and calculates the SGE tunables.
  */
 void
 t4_sge_modload(void)
 {
+	int i;
 
 	if (fl_pktshift < 0 || fl_pktshift > 7) {
 		printf("Invalid hw.cxgbe.fl_pktshift value (%d),"
@@ -290,6 +389,18 @@ t4_sge_modload(void)
 	extfree_rels = counter_u64_alloc(M_WAITOK);
 	counter_u64_zero(extfree_refs);
 	counter_u64_zero(extfree_rels);
+
+	t4_an_handler = an_not_handled;
+	for (i = 0; i < nitems(t4_fw_msg_handler); i++)
+		t4_fw_msg_handler[i] = fw_msg_not_handled;
+	for (i = 0; i < nitems(t4_cpl_handler); i++)
+		t4_cpl_handler[i] = cpl_not_handled;
+
+	t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg);
+	t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg);
+	t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
+	t4_register_cpl_handler(CPL_RX_PKT, t4_eth_rx);
+	t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
 }
 
 void
@@ -311,17 +422,6 @@ t4_sge_extfree_refs(void)
 	return (refs - rels);
 }
 
-void
-t4_init_sge_cpl_handlers(struct adapter *sc)
-{
-
-	t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_msg);
-	t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_msg);
-	t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
-	t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx);
-	t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
-}
-
 static inline void
 setup_pad_and_pack_boundaries(struct adapter *sc)
 {
@@ -1315,7 +1415,7 @@ service_iq(struct sge_iq *iq, int budget
 				KASSERT(d->rss.opcode < NUM_CPL_CMDS,
 				    ("%s: bad opcode %02x.", __func__,
 				    d->rss.opcode));
-				sc->cpl_handler[d->rss.opcode](iq, &d->rss, m0);
+				t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0);
 				break;
 
 			case X_RSPD_TYPE_INTR:
@@ -1337,7 +1437,7 @@ service_iq(struct sge_iq *iq, int budget
 				 * iWARP async notification.
 				 */
 				if (lq >= 1024) {
-                                        sc->an_handler(iq, &d->rsp);
+                                        t4_an_handler(iq, &d->rsp);
                                         break;
                                 }
 
@@ -2789,6 +2889,8 @@ alloc_fwq(struct adapter *sc)
 	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE);
 	fwq->flags |= IQ_INTR;	/* always */
 	intr_idx = sc->intr_count > 1 ? 1 : 0;
+	fwq->set_tcb_rpl = t4_filter_rpl;
+	fwq->l2t_write_rpl = do_l2t_write_rpl;
 	rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1);
 	if (rc != 0) {
 		device_printf(sc->dev,
@@ -4674,10 +4776,10 @@ handle_fw_msg(struct sge_iq *iq, const s
 		const struct rss_header *rss2;
 
 		rss2 = (const struct rss_header *)&cpl->data[0];
-		return (sc->cpl_handler[rss2->opcode](iq, rss2, m));
+		return (t4_cpl_handler[rss2->opcode](iq, rss2, m));
 	}
 
-	return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0]));
+	return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0]));
 }
 
 static int

Modified: stable/10/sys/dev/cxgbe/tom/t4_connect.c
==============================================================================
--- stable/10/sys/dev/cxgbe/tom/t4_connect.c	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/tom/t4_connect.c	Fri Dec  2 21:29:52 2016	(r309442)
@@ -261,11 +261,11 @@ calc_opt2a(struct socket *so, struct toe
 }
 
 void
-t4_init_connect_cpl_handlers(struct adapter *sc)
+t4_init_connect_cpl_handlers(void)
 {
 
-	t4_register_cpl_handler(sc, CPL_ACT_ESTABLISH, do_act_establish);
-	t4_register_cpl_handler(sc, CPL_ACT_OPEN_RPL, do_act_open_rpl);
+	t4_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
+	t4_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
 }
 
 #define DONT_OFFLOAD_ACTIVE_OPEN(x)	do { \

Modified: stable/10/sys/dev/cxgbe/tom/t4_cpl_io.c
==============================================================================
--- stable/10/sys/dev/cxgbe/tom/t4_cpl_io.c	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/tom/t4_cpl_io.c	Fri Dec  2 21:29:52 2016	(r309442)
@@ -1710,7 +1710,7 @@ do_fw4_ack(struct sge_iq *iq, const stru
 	return (0);
 }
 
-static int
+int
 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
 {
 	struct adapter *sc = iq->adapter;
@@ -1723,9 +1723,7 @@ do_set_tcb_rpl(struct sge_iq *iq, const 
 	KASSERT(opcode == CPL_SET_TCB_RPL,
 	    ("%s: unexpected opcode 0x%x", __func__, opcode));
 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
-
-	if (is_ftid(sc, tid))
-		return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */
+	MPASS(iq != &sc->sge.fwq);
 
 	/*
 	 * TOM and/or other ULPs don't request replies for CPL_SET_TCB or
@@ -1744,23 +1742,27 @@ do_set_tcb_rpl(struct sge_iq *iq, const 
 }
 
 void
-t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl,
-    uint16_t word, uint64_t mask, uint64_t val)
+t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, int tid,
+    uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie, int iqid)
 {
 	struct wrqe *wr;
 	struct cpl_set_tcb_field *req;
 
-	wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq);
+	MPASS((cookie & ~M_COOKIE) == 0);
+	MPASS((iqid & ~M_QUEUENO) == 0);
+
+	wr = alloc_wrqe(sizeof(*req), wrq);
 	if (wr == NULL) {
 		/* XXX */
 		panic("%s: allocation failure.", __func__);
 	}
 	req = wrtod(wr);
 
-	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
-	req->reply_ctrl = htobe16(V_NO_REPLY(1) |
-	    V_QUEUENO(toep->ofld_rxq->iq.abs_id));
-	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
+	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
+	req->reply_ctrl = htobe16(V_QUEUENO(iqid));
+	if (reply == 0)
+		req->reply_ctrl |= htobe16(F_NO_REPLY);
+	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
 	req->mask = htobe64(mask);
 	req->val = htobe64(val);
 
@@ -1768,22 +1770,26 @@ t4_set_tcb_field(struct adapter *sc, str
 }
 
 void
-t4_init_cpl_io_handlers(struct adapter *sc)
+t4_init_cpl_io_handlers(void)
 {
 
-	t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close);
-	t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl);
-	t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req);
-	t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl);
-	t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data);
-	t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack);
-	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl);
+	t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close);
+	t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl);
+	t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req);
+	t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl);
+	t4_register_cpl_handler(CPL_RX_DATA, do_rx_data);
+	t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack);
 }
 
 void
-t4_uninit_cpl_io_handlers(struct adapter *sc)
+t4_uninit_cpl_io_handlers(void)
 {
 
-	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
+	t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close);
+	t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl);
+	t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req);
+	t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl);
+	t4_register_cpl_handler(CPL_RX_DATA, do_rx_data);
+	t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack);
 }
 #endif

Modified: stable/10/sys/dev/cxgbe/tom/t4_ddp.c
==============================================================================
--- stable/10/sys/dev/cxgbe/tom/t4_ddp.c	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/tom/t4_ddp.c	Fri Dec  2 21:29:52 2016	(r309442)
@@ -480,6 +480,8 @@ handle_ddp_close(struct toepcb *toep, st
 	 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\
 	 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR)
 
+extern cpl_handler_t t4_cpl_handler[];
+
 static int
 do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
 {
@@ -501,7 +503,7 @@ do_rx_data_ddp(struct sge_iq *iq, const 
 	}
 
 	if (toep->ulp_mode == ULP_MODE_ISCSI) {
-		sc->cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m);
+		t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m);
 		return (0);
 	}
 
@@ -541,13 +543,14 @@ enable_ddp(struct adapter *sc, struct to
 	    __func__, toep->tid, time_uptime);
 
 	toep->ddp_flags |= DDP_SC_REQ;
-	t4_set_tcb_field(sc, toep, 1, W_TCB_RX_DDP_FLAGS,
+	t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_RX_DDP_FLAGS,
 	    V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) |
 	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) |
 	    V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1),
-	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1));
-	t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS,
-	    V_TF_RCV_COALESCE_ENABLE(1), 0);
+	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1), 0, 0,
+	    toep->ofld_rxq->iq.abs_id);
+	t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS,
+	    V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0, toep->ofld_rxq->iq.abs_id);
 }
 
 static inline void
@@ -562,10 +565,11 @@ disable_ddp(struct adapter *sc, struct t
 	    __func__, toep->tid, time_uptime);
 
 	toep->ddp_flags |= DDP_SC_REQ;
-	t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS,
-	    V_TF_RCV_COALESCE_ENABLE(1), V_TF_RCV_COALESCE_ENABLE(1));
-	t4_set_tcb_field(sc, toep, 1, W_TCB_RX_DDP_FLAGS, V_TF_DDP_OFF(1),
-	    V_TF_DDP_OFF(1));
+	t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS,
+	    V_TF_RCV_COALESCE_ENABLE(1), V_TF_RCV_COALESCE_ENABLE(1), 0, 0,
+	    toep->ofld_rxq->iq.abs_id);
+	t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_RX_DDP_FLAGS,
+	    V_TF_DDP_OFF(1), V_TF_DDP_OFF(1), 0, 0, toep->ofld_rxq->iq.abs_id);
 }
 
 static int
@@ -978,9 +982,6 @@ t4_init_ddp(struct adapter *sc, struct t
 	td->ppod_start = sc->vres.ddp.start;
 	td->ppod_arena = vmem_create("DDP page pods", sc->vres.ddp.start,
 	    sc->vres.ddp.size, 1, 32, M_FIRSTFIT | M_NOWAIT);
-
-	t4_register_cpl_handler(sc, CPL_RX_DATA_DDP, do_rx_data_ddp);
-	t4_register_cpl_handler(sc, CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
 }
 
 void
@@ -1276,4 +1277,20 @@ out:
 	return (error);
 }
 
+int
+t4_ddp_mod_load(void)
+{
+
+	t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp);
+	t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
+	return (0);
+}
+
+void
+t4_ddp_mod_unload(void)
+{
+
+	t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL);
+	t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL);
+}
 #endif

Modified: stable/10/sys/dev/cxgbe/tom/t4_listen.c
==============================================================================
--- stable/10/sys/dev/cxgbe/tom/t4_listen.c	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/tom/t4_listen.c	Fri Dec  2 21:29:52 2016	(r309442)
@@ -1641,12 +1641,12 @@ reset:
 }
 
 void
-t4_init_listen_cpl_handlers(struct adapter *sc)
+t4_init_listen_cpl_handlers(void)
 {
 
-	t4_register_cpl_handler(sc, CPL_PASS_OPEN_RPL, do_pass_open_rpl);
-	t4_register_cpl_handler(sc, CPL_CLOSE_LISTSRV_RPL, do_close_server_rpl);
-	t4_register_cpl_handler(sc, CPL_PASS_ACCEPT_REQ, do_pass_accept_req);
-	t4_register_cpl_handler(sc, CPL_PASS_ESTABLISH, do_pass_establish);
+	t4_register_cpl_handler(CPL_PASS_OPEN_RPL, do_pass_open_rpl);
+	t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_close_server_rpl);
+	t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_pass_accept_req);
+	t4_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish);
 }
 #endif

Modified: stable/10/sys/dev/cxgbe/tom/t4_tom.c
==============================================================================
--- stable/10/sys/dev/cxgbe/tom/t4_tom.c	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/tom/t4_tom.c	Fri Dec  2 21:29:52 2016	(r309442)
@@ -392,8 +392,9 @@ t4_ctloutput(struct toedev *tod, struct 
 
 	switch (name) {
 	case TCP_NODELAY:
-		t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS, V_TF_NAGLE(1),
-		    V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1));
+		t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS,
+		    V_TF_NAGLE(1), V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1),
+		    0, 0, toep->ofld_rxq->iq.abs_id);
 		break;
 	default:
 		break;
@@ -939,8 +940,6 @@ free_tom_data(struct adapter *sc, struct
 	KASSERT(td->lctx_count == 0,
 	    ("%s: lctx hash table is not empty.", __func__));
 
-	t4_uninit_l2t_cpl_handlers(sc);
-	t4_uninit_cpl_io_handlers(sc);
 	t4_uninit_ddp(sc, td);
 	destroy_clip_table(sc, td);
 
@@ -1006,7 +1005,8 @@ t4_tom_activate(struct adapter *sc)
 	struct tom_data *td;
 	struct toedev *tod;
 	struct vi_info *vi;
-	int i, rc, v;
+	struct sge_ofld_rxq *ofld_rxq;
+	int i, j, rc, v;
 
 	ASSERT_SYNCHRONIZED_OP(sc);
 
@@ -1040,12 +1040,6 @@ t4_tom_activate(struct adapter *sc)
 	/* CLIP table for IPv6 offload */
 	init_clip_table(sc, td);
 
-	/* CPL handlers */
-	t4_init_connect_cpl_handlers(sc);
-	t4_init_l2t_cpl_handlers(sc);
-	t4_init_listen_cpl_handlers(sc);
-	t4_init_cpl_io_handlers(sc);
-
 	/* toedev ops */
 	tod = &td->tod;
 	init_toedev(tod);
@@ -1068,6 +1062,10 @@ t4_tom_activate(struct adapter *sc)
 	for_each_port(sc, i) {
 		for_each_vi(sc->port[i], v, vi) {
 			TOEDEV(vi->ifp) = &td->tod;
+			for_each_ofld_rxq(vi, j, ofld_rxq) {
+				ofld_rxq->iq.set_tcb_rpl = do_set_tcb_rpl;
+				ofld_rxq->iq.l2t_write_rpl = do_l2t_write_rpl2;
+			}
 		}
 	}
 
@@ -1136,6 +1134,15 @@ t4_tom_mod_load(void)
 	int rc;
 	struct protosw *tcp_protosw, *tcp6_protosw;
 
+	/* CPL handlers */
+	t4_init_connect_cpl_handlers();
+	t4_init_listen_cpl_handlers();
+	t4_init_cpl_io_handlers();
+
+	rc = t4_ddp_mod_load();
+	if (rc != 0)
+		return (rc);
+
 	tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM);
 	if (tcp_protosw == NULL)
 		return (ENOPROTOOPT);
@@ -1189,6 +1196,8 @@ t4_tom_mod_unload(void)
 		taskqueue_cancel_timeout(taskqueue_thread, &clip_task, NULL);
 	}
 
+	t4_ddp_mod_unload();
+
 	return (0);
 }
 #endif	/* TCP_OFFLOAD */

Modified: stable/10/sys/dev/cxgbe/tom/t4_tom.h
==============================================================================
--- stable/10/sys/dev/cxgbe/tom/t4_tom.h	Fri Dec  2 20:17:41 2016	(r309441)
+++ stable/10/sys/dev/cxgbe/tom/t4_tom.h	Fri Dec  2 21:29:52 2016	(r309442)
@@ -278,13 +278,13 @@ struct clip_entry *hold_lip(struct tom_d
 void release_lip(struct tom_data *, struct clip_entry *);
 
 /* t4_connect.c */
-void t4_init_connect_cpl_handlers(struct adapter *);
+void t4_init_connect_cpl_handlers(void);
 int t4_connect(struct toedev *, struct socket *, struct rtentry *,
     struct sockaddr *);
 void act_open_failure_cleanup(struct adapter *, u_int, u_int);
 
 /* t4_listen.c */

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201612022129.uB2LTq35028199>