Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 28 Mar 2023 21:29:32 GMT
From:      Eric Joyner <erj@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: 35105900c65b - main - irdma(4): Upgrade the driver to 1.1.11-k
Message-ID:  <202303282129.32SLTWoo016501@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by erj:

URL: https://cgit.FreeBSD.org/src/commit/?id=35105900c65bb5adcde05d37ae34ad006970d4f9

commit 35105900c65bb5adcde05d37ae34ad006970d4f9
Author:     Bartosz Sobczak <bartosz.sobczak@intel.com>
AuthorDate: 2023-03-28 21:15:15 +0000
Commit:     Eric Joyner <erj@FreeBSD.org>
CommitDate: 2023-03-28 21:29:07 +0000

    irdma(4): Upgrade the driver to 1.1.11-k
    
    Summary of changes:
    - postpone mtu size assignment during load to avoid race condition
    - refactor some of the debug prints
    - add request reset handler
    - refactor flush scheduler to increase efficiency and avoid racing
    - put correct vlan_tag for UD traffic with PFC
    - suspend QP before going to ERROR state to avoid CQP timout
    - fix arithmetic error on irdma_debug_bugf
    - allow debug flag to be settable during driver load
    - introduce meaningful default values for DCQCN algorithm
    - interrupt naming convention improvements
    - skip unsignaled completions in poll_cmpl
    
    Signed-off-by: Bartosz Sobczak bartosz.sobczak@intel.com
    Signed-off-by: Eric Joyner <erj@FreeBSD.org>
    
    Reviewed by:    hselasky@
    MFC after:      1 week
    Sponsored by:   Intel Corporation
    Differential Revision:  https://reviews.freebsd.org/D39173
---
 contrib/ofed/libirdma/abi.h          |   2 +
 contrib/ofed/libirdma/i40iw_hw.h     |   4 +-
 contrib/ofed/libirdma/irdma-abi.h    |   8 +-
 contrib/ofed/libirdma/irdma_defs.h   |  18 +-
 contrib/ofed/libirdma/irdma_uk.c     |  13 +-
 contrib/ofed/libirdma/irdma_umain.c  |   4 +-
 contrib/ofed/libirdma/irdma_umain.h  |   3 +-
 contrib/ofed/libirdma/irdma_user.h   |   2 -
 contrib/ofed/libirdma/irdma_uverbs.c |   7 +-
 contrib/ofed/libirdma/osdep.h        |   9 +-
 sys/dev/irdma/fbsd_kcompat.c         |  58 ++++-
 sys/dev/irdma/fbsd_kcompat.h         |  16 +-
 sys/dev/irdma/icrdma.c               |  30 ++-
 sys/dev/irdma/icrdma_hw.h            |   2 +-
 sys/dev/irdma/irdma-abi.h            |   6 +
 sys/dev/irdma/irdma_cm.c             | 402 ++++++++++++++++++++++-------------
 sys/dev/irdma/irdma_cm.h             |   3 -
 sys/dev/irdma/irdma_ctrl.c           |  49 +++--
 sys/dev/irdma/irdma_defs.h           |  15 ++
 sys/dev/irdma/irdma_hw.c             | 173 ++++++++-------
 sys/dev/irdma/irdma_kcompat.c        | 219 ++++++++++++-------
 sys/dev/irdma/irdma_main.h           |   8 +-
 sys/dev/irdma/irdma_pble.c           |  18 +-
 sys/dev/irdma/irdma_pble.h           |   4 +-
 sys/dev/irdma/irdma_protos.h         |   2 -
 sys/dev/irdma/irdma_puda.c           |  10 +-
 sys/dev/irdma/irdma_puda.h           |   6 +-
 sys/dev/irdma/irdma_type.h           |   8 +-
 sys/dev/irdma/irdma_uda.c            |  11 +-
 sys/dev/irdma/irdma_uk.c             | 127 +++++------
 sys/dev/irdma/irdma_user.h           |   4 -
 sys/dev/irdma/irdma_utils.c          | 131 +++++++-----
 sys/dev/irdma/irdma_verbs.c          | 225 ++++++++++----------
 sys/dev/irdma/irdma_verbs.h          |  22 +-
 sys/dev/irdma/irdma_ws.c             |   2 -
 sys/dev/irdma/irdma_ws.h             |   3 +-
 sys/dev/irdma/osdep.h                |  11 +-
 37 files changed, 994 insertions(+), 641 deletions(-)

diff --git a/contrib/ofed/libirdma/abi.h b/contrib/ofed/libirdma/abi.h
index e45a7b49caf8..6553ebcbcaca 100644
--- a/contrib/ofed/libirdma/abi.h
+++ b/contrib/ofed/libirdma/abi.h
@@ -115,6 +115,7 @@ struct irdma_get_context {
 	__u32 rsvd32;
 	__u8 userspace_ver;
 	__u8 rsvd8[3];
+	__aligned_u64 comp_mask;
 
 };
 struct irdma_get_context_resp {
@@ -136,6 +137,7 @@ struct irdma_get_context_resp {
 	__u16 max_hw_sq_chunk;
 	__u8 hw_rev;
 	__u8 rsvd2;
+	__aligned_u64 comp_mask;
 
 };
 struct irdma_ureg_mr {
diff --git a/contrib/ofed/libirdma/i40iw_hw.h b/contrib/ofed/libirdma/i40iw_hw.h
index c51d89a0fcb2..d04c37d689cb 100644
--- a/contrib/ofed/libirdma/i40iw_hw.h
+++ b/contrib/ofed/libirdma/i40iw_hw.h
@@ -41,8 +41,8 @@ enum i40iw_device_caps_const {
 	I40IW_MAX_SGE_RD			= 1,
 	I40IW_MAX_PUSH_PAGE_COUNT		= 0,
 	I40IW_MAX_INLINE_DATA_SIZE		= 48,
-	I40IW_MAX_IRD_SIZE			= 63,
-	I40IW_MAX_ORD_SIZE			= 127,
+	I40IW_MAX_IRD_SIZE			= 64,
+	I40IW_MAX_ORD_SIZE			= 64,
 	I40IW_MAX_WQ_ENTRIES			= 2048,
 	I40IW_MAX_WQE_SIZE_RQ			= 128,
 	I40IW_MAX_PDS				= 32768,
diff --git a/contrib/ofed/libirdma/irdma-abi.h b/contrib/ofed/libirdma/irdma-abi.h
index 8a06198608e2..b7d4b61c162d 100644
--- a/contrib/ofed/libirdma/irdma-abi.h
+++ b/contrib/ofed/libirdma/irdma-abi.h
@@ -2,7 +2,7 @@
  * SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB)
  *
  *
- * Copyright (c) 2006 - 2021 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2022 Intel Corporation.  All rights reserved.
  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
@@ -53,10 +53,15 @@ enum irdma_memreg_type {
 	IRDMA_MEMREG_TYPE_CQ   = 2,
 };
 
+enum {
+	IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
+};
+
 struct irdma_alloc_ucontext_req {
 	__u32 rsvd32;
 	__u8 userspace_ver;
 	__u8 rsvd8[3];
+	__aligned_u64 comp_mask;
 };
 
 struct irdma_alloc_ucontext_resp {
@@ -77,6 +82,7 @@ struct irdma_alloc_ucontext_resp {
 	__u16 max_hw_sq_chunk;
 	__u8 hw_rev;
 	__u8 rsvd2;
+	__aligned_u64 comp_mask;
 };
 
 struct irdma_alloc_pd_resp {
diff --git a/contrib/ofed/libirdma/irdma_defs.h b/contrib/ofed/libirdma/irdma_defs.h
index 932993fd44ce..3d8b59c4b78e 100644
--- a/contrib/ofed/libirdma/irdma_defs.h
+++ b/contrib/ofed/libirdma/irdma_defs.h
@@ -75,6 +75,7 @@
 #define IRDMA_CQE_QTYPE_RQ	0
 #define IRDMA_CQE_QTYPE_SQ	1
 
+#define IRDMA_QP_SW_MIN_WQSIZE	8 /* in WRs*/
 #define IRDMA_QP_WQE_MIN_SIZE	32
 #define IRDMA_QP_WQE_MAX_SIZE	256
 #define IRDMA_QP_WQE_MIN_QUANTA 1
@@ -304,6 +305,17 @@
 #define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
 #define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
 
+#define IRDMA_GET_RING_OFFSET(_ring, _i) \
+	( \
+		((_ring).head + (_i)) % (_ring).size \
+	)
+
+#define IRDMA_GET_CQ_ELEM_AT_OFFSET(_cq, _i, _cqe) \
+	{ \
+		register __u32 offset; \
+		offset = IRDMA_GET_RING_OFFSET((_cq)->cq_ring, _i); \
+		(_cqe) = (_cq)->cq_base[offset].buf; \
+	}
 #define IRDMA_GET_CURRENT_CQ_ELEM(_cq) \
 	( \
 		(_cq)->cq_base[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf  \
@@ -437,12 +449,6 @@
 		IRDMA_RING_MOVE_HEAD(_ring, _retcode); \
 	}
 
-enum irdma_protocol_used {
-	IRDMA_ANY_PROTOCOL = 0,
-	IRDMA_IWARP_PROTOCOL_ONLY = 1,
-	IRDMA_ROCE_PROTOCOL_ONLY = 2,
-};
-
 enum irdma_qp_wqe_size {
 	IRDMA_WQE_SIZE_32  = 32,
 	IRDMA_WQE_SIZE_64  = 64,
diff --git a/contrib/ofed/libirdma/irdma_uk.c b/contrib/ofed/libirdma/irdma_uk.c
index 5201ad692dc1..97e3ac553c26 100644
--- a/contrib/ofed/libirdma/irdma_uk.c
+++ b/contrib/ofed/libirdma/irdma_uk.c
@@ -641,7 +641,7 @@ irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
 			sge_len -= bytes_copied;
 
 			if (!quanta_bytes_remaining) {
-				/* Remaining inline bytes reside after the hdr */
+				/* Remaining inline bytes reside after hdr */
 				wqe += 16;
 				quanta_bytes_remaining = 32;
 			}
@@ -710,7 +710,7 @@ irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list, u32 num_sges,
 			if (!quanta_bytes_remaining) {
 				quanta_bytes_remaining = 31;
 
-				/* Remaining inline bytes reside after the hdr */
+				/* Remaining inline bytes reside after hdr */
 				if (first_quanta) {
 					first_quanta = false;
 					wqe += 16;
@@ -1111,7 +1111,6 @@ irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
 	u8 arm_next = 0;
 	u8 arm_seq_num;
 
-	cq->armed = true;
 	get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
 	arm_seq_num++;
@@ -1338,6 +1337,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
 	info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+	get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
+	qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
 	if (info->error) {
 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
@@ -1366,10 +1367,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
 
-	get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
-
 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
-	qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
 	if (!qp || qp->destroy_pending) {
 		ret_code = EFAULT;
 		goto exit;
@@ -1493,7 +1491,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
 				}
 			} while (1);
 
-			if (info->op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
+			if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
+			    info->minor_err == FLUSH_PROT_ERR)
 				info->minor_err = FLUSH_MW_BIND_ERR;
 			qp->sq_flush_seen = true;
 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
diff --git a/contrib/ofed/libirdma/irdma_umain.c b/contrib/ofed/libirdma/irdma_umain.c
index 8d27f648f969..6c823646b375 100644
--- a/contrib/ofed/libirdma/irdma_umain.c
+++ b/contrib/ofed/libirdma/irdma_umain.c
@@ -49,7 +49,7 @@
 /**
  *  Driver version
  */
-char libirdma_version[] = "1.1.5-k";
+char libirdma_version[] = "1.1.11-k";
 
 unsigned int irdma_dbg;
 
@@ -170,7 +170,7 @@ irdma_init_context(struct verbs_device *vdev,
 	iwvctx->uk_attrs.max_hw_sq_chunk = resp.max_hw_sq_chunk;
 	iwvctx->uk_attrs.max_hw_cq_size = resp.max_hw_cq_size;
 	iwvctx->uk_attrs.min_hw_cq_size = resp.min_hw_cq_size;
-	iwvctx->uk_attrs.min_hw_wq_size = IRDMA_MIN_WQ_SIZE_GEN2;
+	iwvctx->uk_attrs.min_hw_wq_size = IRDMA_QP_SW_MIN_WQSIZE;
 	iwvctx->abi_ver = IRDMA_ABI_VER;
 	mmap_key = resp.db_mmap_key;
 
diff --git a/contrib/ofed/libirdma/irdma_umain.h b/contrib/ofed/libirdma/irdma_umain.h
index f6dafc6bf39a..c67c5d7076f9 100644
--- a/contrib/ofed/libirdma/irdma_umain.h
+++ b/contrib/ofed/libirdma/irdma_umain.h
@@ -87,7 +87,8 @@ struct irdma_uvcontext {
 	struct irdma_uk_attrs uk_attrs;
 	void *db;
 	int abi_ver;
-	bool legacy_mode;
+	bool legacy_mode:1;
+	bool use_raw_attrs:1;
 };
 
 struct irdma_uqp;
diff --git a/contrib/ofed/libirdma/irdma_user.h b/contrib/ofed/libirdma/irdma_user.h
index 089619e1e3b1..8eb9ae7990e7 100644
--- a/contrib/ofed/libirdma/irdma_user.h
+++ b/contrib/ofed/libirdma/irdma_user.h
@@ -211,7 +211,6 @@ enum irdma_device_caps_const {
 	IRDMA_Q2_BUF_SIZE =			256,
 	IRDMA_QP_CTX_SIZE =			256,
 	IRDMA_MAX_PDS =				262144,
-	IRDMA_MIN_WQ_SIZE_GEN2 =		8,
 };
 
 enum irdma_addressing_type {
@@ -521,7 +520,6 @@ struct irdma_cq_uk {
 	u32 cq_size;
 	struct irdma_ring cq_ring;
 	u8 polarity;
-	bool armed:1;
 	bool avoid_mem_cflct:1;
 };
 
diff --git a/contrib/ofed/libirdma/irdma_uverbs.c b/contrib/ofed/libirdma/irdma_uverbs.c
index 58c5c6cf617c..14efab96a107 100644
--- a/contrib/ofed/libirdma/irdma_uverbs.c
+++ b/contrib/ofed/libirdma/irdma_uverbs.c
@@ -1566,11 +1566,10 @@ irdma_ucreate_qp(struct ibv_pd *pd,
 	info.sq_size = info.sq_depth >> info.sq_shift;
 	info.rq_size = info.rq_depth >> info.rq_shift;
 	/**
-	 * For older ABI version (less than 6) passes raw sq and rq
-	 * quanta in cap.max_send_wr and cap.max_recv_wr.
-	 * But then kernel had no way of calculating the actual qp size.
+	 * Maintain backward compatibility with older ABI which pass sq
+	 * and rq depth (in quanta) in cap.max_send_wr a cap.max_recv_wr
 	 */
-	if (iwvctx->abi_ver <= 5) {
+	if (!iwvctx->use_raw_attrs) {
 		attr->cap.max_send_wr = info.sq_size;
 		attr->cap.max_recv_wr = info.rq_size;
 	}
diff --git a/contrib/ofed/libirdma/osdep.h b/contrib/ofed/libirdma/osdep.h
index 2cb4f8d57cb5..1bbf6573b800 100644
--- a/contrib/ofed/libirdma/osdep.h
+++ b/contrib/ofed/libirdma/osdep.h
@@ -119,7 +119,7 @@ do {													\
 	irdma_debug(dev, mask, "%s\n", desc);								\
 	irdma_debug(dev, mask, "starting address virt=%p phy=%lxh\n", buf, irdma_get_virt_to_phy(buf));	\
 	for (i = 0; i < size ; i += 8)									\
-		irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)buf)[i / 8]);	\
+		irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)(buf))[i / 8]);	\
 } while(0)
 
 #define irdma_debug(h, m, s, ...)					\
@@ -137,11 +137,12 @@ do {                                                                    \
         if (irdma_dbg)                                                  \
                 printf("libirdma-%s: " fmt, __func__, ##args); \
 } while (0)
-#define irdma_dev_err(a, b, ...) printf(b, ##__VA_ARGS__)
-#define irdma_dev_warn(a, b, ...) printf(b, ##__VA_ARGS__) /*dev_warn(a, b)*/
+#define irdma_dev_err(ibdev, fmt, ...) \
+	pr_err("%s:%s:%d ERR "fmt, (ibdev)->name, __func__, __LINE__, ##__VA_ARGS__)
+#define irdma_dev_warn(ibdev, fmt, ...) \
+	pr_warn("%s:%s:%d WARN "fmt, (ibdev)->name, __func__, __LINE__, ##__VA_ARGS__)
 #define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
 #define irdma_pr_warn printf
-#define ibdev_err(ibdev, fmt, ...)  printf("%s:"fmt, (ibdev)->name, ##__VA_ARGS__)
 
 #define dump_struct(s, sz, name)	\
 do {				\
diff --git a/sys/dev/irdma/fbsd_kcompat.c b/sys/dev/irdma/fbsd_kcompat.c
index e0b3bce5ec4f..7a17b7e5f0f0 100644
--- a/sys/dev/irdma/fbsd_kcompat.c
+++ b/sys/dev/irdma/fbsd_kcompat.c
@@ -93,6 +93,18 @@ irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
 
 }
 
+void
+irdma_request_reset(struct irdma_pci_f *rf)
+{
+	struct ice_rdma_peer *peer = rf->peer_info;
+	struct ice_rdma_request req = {0};
+
+	req.type = ICE_RDMA_EVENT_RESET;
+
+	printf("%s:%d requesting pf-reset\n", __func__, __LINE__);
+	IRDMA_DI_REQ_HANDLER(peer, &req);
+}
+
 int
 irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
 {
@@ -611,32 +623,38 @@ irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
 		      &rf->dcqcn_params.min_rate, 0,
 		      "set minimum rate limit value, in MBits per second, default=0");
 
+	rf->dcqcn_params.dcqcn_f = 5;
 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
 		      OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
-		      "set number of times to stay in each stage of bandwidth recovery, default=0");
+		      "set number of times to stay in each stage of bandwidth recovery, default=5");
 
+	rf->dcqcn_params.dcqcn_t = 0x37;
 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
 		       OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
-		       "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0");
+		       "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0x37");
 
+	rf->dcqcn_params.dcqcn_b = 0x249f0;
 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
 		       OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
-		       "set number of MSS to add to the congestion window in additive increase mode, default=0");
+		       "set number of MSS to add to the congestion window in additive increase mode, default=0x249f0");
 
+	rf->dcqcn_params.rai_factor = 1;
 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
 		       OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
 		       &rf->dcqcn_params.rai_factor, 0,
-		       "set number of MSS to add to the congestion window in additive increase mode, default=0");
+		       "set number of MSS to add to the congestion window in additive increase mode, default=1");
 
+	rf->dcqcn_params.hai_factor = 5;
 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
 		       OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
 		       &rf->dcqcn_params.hai_factor, 0,
-		       "set number of MSS to add to the congestion window in hyperactive increase mode, default=0");
+		       "set number of MSS to add to the congestion window in hyperactive increase mode, default=5");
 
+	rf->dcqcn_params.rreduce_mperiod = 50;
 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
 		       OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
 		       &rf->dcqcn_params.rreduce_mperiod, 0,
-		       "set minimum time between 2 consecutive rate reductions for a single flow, default=0");
+		       "set minimum time between 2 consecutive rate reductions for a single flow, default=50");
 }
 
 /**
@@ -743,3 +761,31 @@ irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
 {
 	kfree(chunk->bitmapmem.va);
 }
+
+void
+irdma_cleanup_dead_qps(struct irdma_sc_vsi *vsi)
+{
+	struct irdma_sc_qp *qp = NULL;
+	struct irdma_qp *iwqp;
+	struct irdma_pci_f *rf;
+	u8 i;
+
+	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+		qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+		while (qp) {
+			if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_UDA) {
+				qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+				continue;
+			}
+			iwqp = qp->qp_uk.back_qp;
+			rf = iwqp->iwdev->rf;
+			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
+			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
+
+			kfree(iwqp->kqp.sq_wrid_mem);
+			kfree(iwqp->kqp.rq_wrid_mem);
+			qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+			kfree(iwqp);
+		}
+	}
+}
diff --git a/sys/dev/irdma/fbsd_kcompat.h b/sys/dev/irdma/fbsd_kcompat.h
index 1fb83f0ac663..0b264b763f7e 100644
--- a/sys/dev/irdma/fbsd_kcompat.h
+++ b/sys/dev/irdma/fbsd_kcompat.h
@@ -40,10 +40,11 @@
 #define TASKLET_DATA_TYPE	unsigned long
 #define TASKLET_FUNC_TYPE	void (*)(TASKLET_DATA_TYPE)
 
+#ifndef tasklet_setup
 #define tasklet_setup(tasklet, callback)				\
 	tasklet_init((tasklet), (TASKLET_FUNC_TYPE)(callback),		\
 		      (TASKLET_DATA_TYPE)(tasklet))
-
+#endif
 #ifndef from_tasklet
 #define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
 	container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
@@ -176,8 +177,7 @@ int irdma_dereg_mr(struct ib_mr *ib_mr);
 #else
 int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
 #endif
-void irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
-				   u8 *active_width);
+int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u8 *speed, u8 *width);
 enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
 					  u8 port_num);
 int irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -197,6 +197,7 @@ int irdma_get_hw_stats(struct ib_device *ibdev,
 		       struct rdma_hw_stats *stats, u8 port_num,
 		       int index);
 
+void irdma_request_reset(struct irdma_pci_f *rf);
 int irdma_register_qset(struct irdma_sc_vsi *vsi,
 			struct irdma_ws_node *tc_node);
 void irdma_unregister_qset(struct irdma_sc_vsi *vsi,
@@ -337,4 +338,13 @@ static inline size_t irdma_ib_umem_num_dma_blocks(struct ib_umem *umem, unsigned
 			 ALIGN_DOWN(iova, pgsz))) / pgsz;
 }
 
+static inline void addrconf_addr_eui48(u8 *deui, const char *const addr)
+{
+	memcpy(deui, addr, 3);
+	deui[3] = 0xFF;
+	deui[4] = 0xFE;
+	memcpy(deui + 5, addr + 3, 3);
+	deui[0] ^= 2;
+}
+
 #endif /* FBSD_KCOMPAT_H */
diff --git a/sys/dev/irdma/icrdma.c b/sys/dev/irdma/icrdma.c
index ed67dfdb8847..a3dee284adce 100644
--- a/sys/dev/irdma/icrdma.c
+++ b/sys/dev/irdma/icrdma.c
@@ -53,7 +53,7 @@
 /**
  *  Driver version
  */
-char irdma_driver_version[] = "1.1.5-k";
+char irdma_driver_version[] = "1.1.11-k";
 
 #define pf_if_d(peer) peer->ifp->if_dunit
 
@@ -223,9 +223,13 @@ static void
 irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
 {
 	if (mtu < IRDMA_MIN_MTU_IPV4)
-		irdma_dev_warn(dev, "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
+		irdma_dev_warn(to_ibdev(dev),
+			       "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n",
+			       mtu);
 	else if (mtu < IRDMA_MIN_MTU_IPV6)
-		irdma_dev_warn(dev, "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
+		irdma_dev_warn(to_ibdev(dev),
+			       "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n",
+			       mtu);
 }
 
 /**
@@ -336,22 +340,25 @@ irdma_finalize_task(void *context, int pending)
 	int status = 0;
 
 	if (iwdev->iw_status) {
-		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Starting deferred closing %d (%d)\n",
+		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
+			    "Starting deferred closing %d (%d)\n",
 			    rf->peer_info->pf_id, pf_if_d(peer));
 		irdma_dereg_ipaddr_event_cb(rf);
 		irdma_ib_unregister_device(iwdev);
 		req.type = ICE_RDMA_EVENT_VSI_FILTER_UPDATE;
 		req.enable_filter = false;
 		IRDMA_DI_REQ_HANDLER(peer, &req);
+		irdma_cleanup_dead_qps(&iwdev->vsi);
 		irdma_rt_deinit_hw(iwdev);
 	} else {
-		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Starting deferred opening %d (%d)\n",
+		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
+			    "Starting deferred opening %d (%d)\n",
 			    rf->peer_info->pf_id, pf_if_d(peer));
-		l2params.mtu = peer->mtu;
 		irdma_get_qos_info(&l2params, &peer->initial_qos_info);
 		if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
 			iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
 
+		l2params.mtu = peer->mtu;
 		status = irdma_rt_init_hw(iwdev, &l2params);
 		if (status) {
 			irdma_pr_err("RT init failed %d\n", status);
@@ -368,7 +375,8 @@ irdma_finalize_task(void *context, int pending)
 		req.enable_filter = true;
 		IRDMA_DI_REQ_HANDLER(peer, &req);
 		irdma_reg_ipaddr_event_cb(rf);
-		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Deferred opening finished %d (%d)\n",
+		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
+			    "Deferred opening finished %d (%d)\n",
 			    rf->peer_info->pf_id, pf_if_d(peer));
 	}
 }
@@ -459,6 +467,7 @@ irdma_fill_device_info(struct irdma_device *iwdev,
 	rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
 	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
 	rf->check_fc = irdma_check_fc_for_qp;
+	rf->gen_ops.request_reset = irdma_request_reset;
 	irdma_set_rf_user_cfg_params(rf);
 
 	rf->default_vsi.vsi_idx = peer->pf_vsi_num;
@@ -483,6 +492,7 @@ irdma_fill_device_info(struct irdma_device *iwdev,
 	iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
 	iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
 	iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
+	iwdev->roce_rtomin = 5;
 
 	if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY) {
 		iwdev->roce_mode = true;
@@ -583,7 +593,8 @@ irdma_remove(struct ice_rdma_peer *peer)
 	struct irdma_handler *hdl;
 	struct irdma_device *iwdev;
 
-	irdma_debug((struct irdma_sc_dev *)NULL, IRDMA_DEBUG_INIT, "removing %s\n", __FUNCTION__);
+	irdma_debug((struct irdma_sc_dev *)NULL, IRDMA_DEBUG_INIT,
+		    "removing %s irdma%d\n", __func__, pf_if_d(peer));
 
 	hdl = irdma_find_handler(peer);
 	if (!hdl)
@@ -614,7 +625,8 @@ irdma_remove(struct ice_rdma_peer *peer)
 	kfree(iwdev->hdl);
 	kfree(iwdev->rf);
 	ib_dealloc_device(&iwdev->ibdev);
-	irdma_pr_info("IRDMA hardware deinitialization complete\n");
+	irdma_pr_info("IRDMA hardware deinitialization complete irdma%d\n",
+		      pf_if_d(peer));
 
 	return 0;
 }
diff --git a/sys/dev/irdma/icrdma_hw.h b/sys/dev/irdma/icrdma_hw.h
index 0b0b46c0d567..bbaf13bde63c 100644
--- a/sys/dev/irdma/icrdma_hw.h
+++ b/sys/dev/irdma/icrdma_hw.h
@@ -122,7 +122,7 @@ enum icrdma_device_caps_const {
 	ICRDMA_MAX_STATS_COUNT = 128,
 
 	ICRDMA_MAX_IRD_SIZE			= 32,
-	ICRDMA_MAX_ORD_SIZE			= 64,
+	ICRDMA_MAX_ORD_SIZE			= 32,
 	ICRDMA_MIN_WQ_SIZE			= 8 /* WQEs */,
 
 };
diff --git a/sys/dev/irdma/irdma-abi.h b/sys/dev/irdma/irdma-abi.h
index 4e4d8e63a9d0..176c838fce95 100644
--- a/sys/dev/irdma/irdma-abi.h
+++ b/sys/dev/irdma/irdma-abi.h
@@ -53,10 +53,15 @@ enum irdma_memreg_type {
 	IRDMA_MEMREG_TYPE_CQ   = 2,
 };
 
+enum {
+	IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
+};
+
 struct irdma_alloc_ucontext_req {
 	__u32 rsvd32;
 	__u8 userspace_ver;
 	__u8 rsvd8[3];
+	__aligned_u64 comp_mask;
 };
 
 struct irdma_alloc_ucontext_resp {
@@ -77,6 +82,7 @@ struct irdma_alloc_ucontext_resp {
 	__u16 max_hw_sq_chunk;
 	__u8 hw_rev;
 	__u8 rsvd2;
+	__aligned_u64 comp_mask;
 };
 
 struct irdma_alloc_pd_resp {
diff --git a/sys/dev/irdma/irdma_cm.c b/sys/dev/irdma/irdma_cm.c
index 8cfd62a790ab..daf116065596 100644
--- a/sys/dev/irdma/irdma_cm.c
+++ b/sys/dev/irdma/irdma_cm.c
@@ -206,9 +206,10 @@ irdma_send_cm_event(struct irdma_cm_node *cm_node,
 	event.event = type;
 	event.status = status;
 
-	irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+	irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
 		    "cm_node %p cm_id=%p state=%d accel=%d event_type=%d status=%d\n",
-		    cm_node, cm_id, cm_node->accelerated, cm_node->state, type, status);
+		    cm_node, cm_id, cm_node->accelerated, cm_node->state, type,
+		    status);
 
 	switch (type) {
 	case IW_CM_EVENT_CONNECT_REQUEST:
@@ -288,8 +289,9 @@ irdma_create_event(struct irdma_cm_node *cm_node,
 	event->cm_info.rem_port = cm_node->rem_port;
 	event->cm_info.loc_port = cm_node->loc_port;
 	event->cm_info.cm_id = cm_node->cm_id;
-	irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-		    "node=%p event=%p type=%u dst=%pI4 src=%pI4\n", cm_node,
+	irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+		    "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
+		    cm_node,
 		    event, type, event->cm_info.loc_addr,
 		    event->cm_info.rem_addr);
 	irdma_cm_post_event(event);
@@ -356,15 +358,13 @@ irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node,
 	u32 hdr_len = 0;
 
 	if (!cm_node->ah || !cm_node->ah->ah_info.ah_valid) {
-		irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-			    "AH invalid\n");
+		irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "AH invalid\n");
 		return NULL;
 	}
 
 	sqbuf = irdma_puda_get_bufpool(vsi->ilq);
 	if (!sqbuf) {
-		irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-			    "SQ buf NULL\n");
+		irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "SQ buf NULL\n");
 		return NULL;
 	}
 
@@ -645,7 +645,7 @@ irdma_send_reset(struct irdma_cm_node *cm_node)
 	if (!sqbuf)
 		return -ENOMEM;
 
-	irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+	irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
 		    "caller: %pS cm_node %p cm_id=%p accel=%d state=%d rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
 		    __builtin_return_address(0), cm_node, cm_node->cm_id,
 		    cm_node->accelerated, cm_node->state, cm_node->rem_port,
@@ -666,8 +666,9 @@ irdma_active_open_err(struct irdma_cm_node *cm_node, bool reset)
 	irdma_cleanup_retrans_entry(cm_node);
 	cm_node->cm_core->stats_connect_errs++;
 	if (reset) {
-		irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-			    "cm_node=%p state=%d\n", cm_node, cm_node->state);
+		irdma_debug(&cm_node->iwdev->rf->sc_dev,
+			    IRDMA_DEBUG_CM, "cm_node=%p state=%d\n", cm_node,
+			    cm_node->state);
 		atomic_inc(&cm_node->refcnt);
 		irdma_send_reset(cm_node);
 	}
@@ -687,8 +688,9 @@ irdma_passive_open_err(struct irdma_cm_node *cm_node, bool reset)
 	irdma_cleanup_retrans_entry(cm_node);
 	cm_node->cm_core->stats_passive_errs++;
 	cm_node->state = IRDMA_CM_STATE_CLOSED;
-	irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-		    "cm_node=%p state =%d\n", cm_node, cm_node->state);
+	irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+		    "cm_node=%p state =%d\n",
+		    cm_node, cm_node->state);
 	if (reset)
 		irdma_send_reset(cm_node);
 	else
@@ -747,8 +749,7 @@ irdma_process_options(struct irdma_cm_node *cm_node, u8 *optionsloc,
 			offset += 1;
 			continue;
 		case OPTION_NUM_MSS:
-			irdma_debug(iwdev_to_idev(cm_node->iwdev),
-				    IRDMA_DEBUG_CM,
+			irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
 				    "MSS Length: %d Offset: %d Size: %d\n",
 				    all_options->mss.len, offset, optionsize);
 			got_mss_option = 1;
@@ -768,8 +769,7 @@ irdma_process_options(struct irdma_cm_node *cm_node, u8 *optionsloc,
 			    all_options->windowscale.shiftcount;
 			break;
 		default:
-			irdma_debug(iwdev_to_idev(cm_node->iwdev),
-				    IRDMA_DEBUG_CM,
+			irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
 				    "Unsupported TCP Option: %x\n",
 				    all_options->base.optionnum);
 			break;
@@ -801,9 +801,9 @@ irdma_handle_tcp_options(struct irdma_cm_node *cm_node,
 		ret = irdma_process_options(cm_node, optionsloc, optionsize,
 					    (u32)tcph->th_flags & TH_SYN);
 		if (ret) {
-			irdma_debug(iwdev_to_idev(cm_node->iwdev),
-				    IRDMA_DEBUG_CM,
-				    "Node %p, Sending Reset\n", cm_node);
+			irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+				    "Node %p, Sending Reset\n",
+				    cm_node);
 			if (passive)
 				irdma_passive_open_err(cm_node, true);
 			else
@@ -950,8 +950,9 @@ irdma_send_mpa_request(struct irdma_cm_node *cm_node)
 							 &cm_node->mpa_hdr,
 							 MPA_KEY_REQUEST);
 	if (!cm_node->mpa_hdr.size) {
-		irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-			    "mpa size = %d\n", cm_node->mpa_hdr.size);
+		irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+			    "mpa size = %d\n",
+			    cm_node->mpa_hdr.size);
 		return -EINVAL;
 	}
 
@@ -1061,9 +1062,9 @@ negotiate_done:
 		/* Not supported RDMA0 operation */
 		return -EOPNOTSUPP;
 
-	irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-		    "MPAV2 Negotiated ORD: %d, IRD: %d\n", cm_node->ord_size,
-		    cm_node->ird_size);
+	irdma_debug(&cm_node->iwdev->rf->sc_dev,
+		    IRDMA_DEBUG_CM, "MPAV2 Negotiated ORD: %d, IRD: %d\n",
+		    cm_node->ord_size, cm_node->ird_size);
 	return 0;
 }
 
@@ -1084,8 +1085,8 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
 	*type = IRDMA_MPA_REQUEST_ACCEPT;
 
 	if (len < sizeof(struct ietf_mpa_v1)) {
-		irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-			    "ietf buffer small (%x)\n", len);
+		irdma_debug(&cm_node->iwdev->rf->sc_dev,
+			    IRDMA_DEBUG_CM, "ietf buffer small (%x)\n", len);
 		return -EINVAL;
 	}
 
@@ -1094,20 +1095,23 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
 	priv_data_len = ntohs(mpa_frame->priv_data_len);
 
 	if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
-		irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-			    "private_data too big %d\n", priv_data_len);
+		irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+			    "private_data too big %d\n",
+			    priv_data_len);
 		return -EOVERFLOW;
 	}
 
 	if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
-		irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-			    "unsupported mpa rev = %d\n", mpa_frame->rev);
+		irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+			    "unsupported mpa rev = %d\n",
+			    mpa_frame->rev);
 		return -EINVAL;
 	}
 
 	if (mpa_frame->rev > cm_node->mpa_frame_rev) {
-		irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-			    "rev %d\n", mpa_frame->rev);
+		irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+			    "rev %d\n",
+			    mpa_frame->rev);
 		return -EINVAL;
 	}
 
@@ -1115,31 +1119,29 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
 	if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
 		if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ,
 			   IETF_MPA_KEY_SIZE)) {
-			irdma_debug(iwdev_to_idev(cm_node->iwdev),
-				    IRDMA_DEBUG_CM,
-				    "Unexpected MPA Key received\n");
+			irdma_debug(&cm_node->iwdev->rf->sc_dev,
+				    IRDMA_DEBUG_CM, "Unexpected MPA Key received\n");
 			return -EINVAL;
 		}
 	} else {
 		if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP,
 			   IETF_MPA_KEY_SIZE)) {
-			irdma_debug(iwdev_to_idev(cm_node->iwdev),
-				    IRDMA_DEBUG_CM,
-				    "Unexpected MPA Key received\n");
+			irdma_debug(&cm_node->iwdev->rf->sc_dev,
+				    IRDMA_DEBUG_CM, "Unexpected MPA Key received\n");
 			return -EINVAL;
 		}
 	}
 
 	if (priv_data_len + mpa_hdr_len > len) {
-		irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-			    "ietf buffer len(%x + %x != %x)\n", priv_data_len,
-			    mpa_hdr_len, len);
+		irdma_debug(&cm_node->iwdev->rf->sc_dev,
+			    IRDMA_DEBUG_CM, "ietf buffer len(%x + %x != %x)\n",
+			    priv_data_len, mpa_hdr_len, len);
 		return -EOVERFLOW;
 	}
 
 	if (len > IRDMA_MAX_CM_BUF) {
-		irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
-			    "ietf buffer large len = %d\n", len);
+		irdma_debug(&cm_node->iwdev->rf->sc_dev,
+			    IRDMA_DEBUG_CM, "ietf buffer large len = %d\n", len);
 		return -EOVERFLOW;
 	}
 
@@ -1211,7 +1213,7 @@ irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
 		new_send->timetosend += (HZ / 10);
 		if (cm_node->close_entry) {
 			kfree(new_send);
-			irdma_debug(iwdev_to_idev(cm_node->iwdev),
+			irdma_debug(&cm_node->iwdev->rf->sc_dev,
 				    IRDMA_DEBUG_CM, "already close entry\n");
 			return -EINVAL;
 		}
@@ -1520,12 +1522,13 @@ irdma_send_fin(struct irdma_cm_node *cm_node)
  * irdma_find_listener - find a cm node listening on this addr-port pair
  * @cm_core: cm's core
  * @dst_addr: listener ip addr
+ * @ipv4: flag indicating IPv4 when true
  * @dst_port: listener tcp port num
  * @vlan_id: virtual LAN ID
  * @listener_state: state to match with listen node's
  */
 static struct irdma_cm_listener *
-irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
+irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4, u16 dst_port,
 		    u16 vlan_id, enum irdma_cm_listener_state listener_state)
 {
 	struct irdma_cm_listener *listen_node;
@@ -1539,7 +1542,7 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
 	list_for_each_entry(listen_node, &cm_core->listen_list, list) {
 		memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
 		listen_port = listen_node->loc_port;
-		if (listen_port != dst_port ||
+		if (listen_node->ipv4 != ipv4 || listen_port != dst_port ||
 		    !(listener_state & listen_node->listener_state))
 			continue;
 		/* compare node pair, return node handle if a match */
@@ -1579,13 +1582,13 @@ irdma_del_multiple_qhash(struct irdma_device *iwdev,
 		child_listen_node = list_entry(pos, struct irdma_cm_listener,
 					       child_listen_list);
 		if (child_listen_node->ipv4)
-			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+			irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
 				    "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
 				    child_listen_node->loc_addr,
 				    child_listen_node->loc_port,
 				    child_listen_node->vlan_id);
 		else
-			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+			irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
 				    "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
 				    child_listen_node->loc_addr,
 				    child_listen_node->loc_port,
@@ -1603,8 +1606,8 @@ irdma_del_multiple_qhash(struct irdma_device *iwdev,
 		} else {
 			ret = 0;
 		}
-		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
-			    "Child listen node freed = %p\n",
+		irdma_debug(&iwdev->rf->sc_dev,
+			    IRDMA_DEBUG_CM, "Child listen node freed = %p\n",
 			    child_listen_node);
 		kfree(child_listen_node);
 		cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
@@ -1614,6 +1617,10 @@ irdma_del_multiple_qhash(struct irdma_device *iwdev,
 	return ret;
 }
 
+static u8 irdma_get_egress_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4){
+	return prio;
+}
+
 /**
  * irdma_netdev_vlan_ipv6 - Gets the netdev and mac
  * @addr: local IPv6 address
@@ -1702,20 +1709,18 @@ irdma_add_mqh_6(struct irdma_device *iwdev,
 
 		if_addr_rlock(ip_dev);
 		IRDMA_TAILQ_FOREACH(ifp, &ip_dev->if_addrhead, ifa_link) {
-			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+			irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
 				    "IP=%pI6, vlan_id=%d, MAC=%pM\n",
 				    &((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_addr, rdma_vlan_dev_vlan_id(ip_dev),
 				    IF_LLADDR(ip_dev));
 			if (((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_family != AF_INET6)
 				continue;
 			child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
-			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+			irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
 				    "Allocating child listener %p\n",
 				    child_listen_node);
 			if (!child_listen_node) {
-				irdma_debug(iwdev_to_idev(iwdev),
-					    IRDMA_DEBUG_CM,
-					    "listener memory allocation\n");
+				irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "listener memory allocation\n");
 				ret = -ENOMEM;
 				if_addr_runlock(ip_dev);
 				goto exit;
@@ -1729,6 +1734,11 @@ irdma_add_mqh_6(struct irdma_device *iwdev,
 					    ((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_addr.__u6_addr.__u6_addr32);
 			memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
 			       sizeof(cm_info->loc_addr));
+			if (!iwdev->vsi.dscp_mode)
+				cm_info->user_pri =
+				    irdma_get_egress_vlan_prio(child_listen_node->loc_addr,
+							       cm_info->user_pri,
+							       false);
 			ret = irdma_manage_qhash(iwdev, cm_info,
 						 IRDMA_QHASH_TYPE_TCP_SYN,
 						 IRDMA_QHASH_MANAGE_TYPE_ADD,
@@ -1785,20 +1795,19 @@ irdma_add_mqh_4(struct irdma_device *iwdev,
 
 		if_addr_rlock(ip_dev);
 		IRDMA_TAILQ_FOREACH(ifa, &ip_dev->if_addrhead, ifa_link) {
-			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+			irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
 				    "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
-				    &ifa->ifa_addr,
-				    rdma_vlan_dev_vlan_id(ip_dev), IF_LLADDR(ip_dev));
+				    &ifa->ifa_addr, rdma_vlan_dev_vlan_id(ip_dev),
+				    IF_LLADDR(ip_dev));
 			if (((struct sockaddr_in *)ifa->ifa_addr)->sin_family != AF_INET)
 				continue;
 			child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
 			cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
*** 3438 LINES SKIPPED ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202303282129.32SLTWoo016501>