Date: Wed, 8 Feb 2023 00:26:20 GMT From: Eric Joyner <erj@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-branches@FreeBSD.org Subject: git: e675a260ceba - stable/13 - irdma(4): Add code for compilation on stable/13 branch Message-ID: <202302080026.3180QKwY096224@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch stable/13 has been updated by erj: URL: https://cgit.FreeBSD.org/src/commit/?id=e675a260ceba9b110d66fa9dd78d60ef23fc4439 commit e675a260ceba9b110d66fa9dd78d60ef23fc4439 Author: Bartosz Sobczak <bartosz.sobczak@intel.com> AuthorDate: 2023-02-06 22:37:39 +0000 Commit: Eric Joyner <erj@FreeBSD.org> CommitDate: 2023-02-08 00:24:11 +0000 irdma(4): Add code for compilation on stable/13 branch Current content of the irdma(4) driver consists only of code that is compilable on current 14-CURRENT branch which makes it impossible to merge into stable/13 branch because of missing dependencies in the ofed tree. This patch adds missing code that allows for merging into stable branch. Once it is there, code relating only to version 14 or higher should be removed. Signed-off-by: Eric Joyner <erj@FreeBSD.org> Reviewed by: erj@ MFC after: 1 day Sponsored by: Intel Corporation Differential Revision: https://reviews.freebsd.org/D38170 (cherry picked from commit a527c18cd7ba5d4afacbfb85817ca7b8b2487b52) --- contrib/ofed/libirdma/osdep.h | 3 + sys/dev/irdma/fbsd_kcompat.h | 70 +++++ sys/dev/irdma/irdma_kcompat.c | 609 +++++++++++++++++++++++++++++++++++++++++- sys/dev/irdma/irdma_main.h | 4 + sys/dev/irdma/irdma_verbs.c | 194 +++++++++++++- sys/dev/irdma/irdma_verbs.h | 24 ++ sys/dev/irdma/osdep.h | 3 + 7 files changed, 893 insertions(+), 14 deletions(-) diff --git a/contrib/ofed/libirdma/osdep.h b/contrib/ofed/libirdma/osdep.h index 4c226c8e5992..2cb4f8d57cb5 100644 --- a/contrib/ofed/libirdma/osdep.h +++ b/contrib/ofed/libirdma/osdep.h @@ -102,6 +102,9 @@ #define __aligned_u64 uint64_t __aligned(8) #define VLAN_PRIO_SHIFT 13 +#if __FreeBSD_version < 1400000 +#define IB_USER_VERBS_EX_CMD_MODIFY_QP IB_USER_VERBS_CMD_MODIFY_QP +#endif /* * debug definition section diff --git a/sys/dev/irdma/fbsd_kcompat.h b/sys/dev/irdma/fbsd_kcompat.h index 3b03189e41af..1e0544738d7e 100644 --- a/sys/dev/irdma/fbsd_kcompat.h +++ b/sys/dev/irdma/fbsd_kcompat.h @@ -50,12 +50,14 @@ container_of(callback_tasklet, typeof(*var), tasklet_fieldname) #endif +#if __FreeBSD_version >= 1400000 #define IRDMA_SET_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \ (sizeof(struct drv_struct) + \ BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \ BUILD_BUG_ON_ZERO( \ !__same_type(((struct drv_struct *)NULL)->member, \ struct ib_struct))) +#endif /* __FreeBSD_version > 1400000 */ #define set_ibdev_dma_device(ibdev, dev) \ ibdev.dma_device = (dev) @@ -69,11 +71,20 @@ #define kmap_local_page(pg) page_address(pg) #define kunmap(pg) #define kunmap_local(pg) +#if __FreeBSD_version >= 1400026 #define kc_free_lsmm_dereg_mr(iwdev, iwqp) \ ((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr, NULL)) +#else +#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \ + ((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr)) +#endif #define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION +#if __FreeBSD_version < 1400026 +#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp) +#else #define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp, udata) +#endif #ifndef IB_QP_ATTR_STANDARD_BITS #define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0) #endif @@ -119,24 +130,53 @@ static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo, return (*pinfo)->addr; } +#if __FreeBSD_version < 1400026 +struct ib_cq *irdma_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata); +#else int irdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); +#endif struct ib_qp *irdma_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata); +#if __FreeBSD_version >= 1400026 int irdma_create_ah(struct ib_ah *ib_ah, struct ib_ah_attr *attr, u32 flags, struct ib_udata *udata); int irdma_create_ah_stub(struct ib_ah *ib_ah, struct ib_ah_attr *attr, u32 flags, struct ib_udata *udata); +#else +struct ib_ah *irdma_create_ah(struct ib_pd *ibpd, + struct ib_ah_attr *attr, + struct ib_udata *udata); +struct ib_ah *irdma_create_ah_stub(struct ib_pd *ibpd, + struct ib_ah_attr *attr, + struct ib_udata *udata); +#endif void irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr); +#if __FreeBSD_version >= 1400026 void irdma_destroy_ah(struct ib_ah *ibah, u32 flags); void irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags); +#else +int irdma_destroy_ah(struct ib_ah *ibah); +int irdma_destroy_ah_stub(struct ib_ah *ibah); +#endif +#if __FreeBSD_version < 1400026 +int irdma_destroy_qp(struct ib_qp *ibqp); +#else int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); +#endif +#if __FreeBSD_version < 1400026 +int irdma_dereg_mr(struct ib_mr *ib_mr); +#else int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); +#endif void irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed, u8 *active_width); enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, @@ -163,6 +203,10 @@ int irdma_register_qset(struct irdma_sc_vsi *vsi, void irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node); void ib_unregister_device(struct ib_device *ibdev); +#if __FreeBSD_version < 1400026 +int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, + unsigned long pfn, unsigned long size, pgprot_t prot); +#endif void irdma_disassociate_ucontext(struct ib_ucontext *context); int kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr, @@ -196,8 +240,13 @@ int irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len, struct irdma_mr; struct irdma_cq; struct irdma_cq_buf; +#if __FreeBSD_version < 1400026 +struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); +#else struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata); +#endif int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, u16 access); struct ib_mr *irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len, @@ -230,11 +279,32 @@ void irdma_dealloc_push_page(struct irdma_pci_f *rf, struct irdma_sc_qp *qp); int irdma_process_resize_list(struct irdma_cq *iwcq, struct irdma_device *iwdev, struct irdma_cq_buf *lcqe_buf); +#if __FreeBSD_version < 1400026 +int irdma_destroy_cq(struct ib_cq *ib_cq); +#else void irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); +#endif +#if __FreeBSD_version < 1400026 +struct ib_ucontext *irdma_alloc_ucontext(struct ib_device *, struct ib_udata *); +#else int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +#endif +#if __FreeBSD_version < 1400026 +int irdma_dealloc_ucontext(struct ib_ucontext *); +#else void irdma_dealloc_ucontext(struct ib_ucontext *context); +#endif +#if __FreeBSD_version < 1400026 +struct ib_pd *irdma_alloc_pd(struct ib_device *, struct ib_ucontext *, + struct ib_udata *); +#else int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#endif +#if __FreeBSD_version < 1400026 +int irdma_dealloc_pd(struct ib_pd *); +#else void irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +#endif int irdma_add_gid(struct ib_device *, u8, unsigned int, const union ib_gid *, const struct ib_gid_attr *, void **); int irdma_del_gid(struct ib_device *, u8, unsigned int, void **); diff --git a/sys/dev/irdma/irdma_kcompat.c b/sys/dev/irdma/irdma_kcompat.c index d0cf960b5147..d49293aedd78 100644 --- a/sys/dev/irdma/irdma_kcompat.c +++ b/sys/dev/irdma/irdma_kcompat.c @@ -97,6 +97,7 @@ irdma_del_gid(struct ib_device *device, return 0; } +#if __FreeBSD_version >= 1400026 /** * irdma_alloc_mr - register stag for fast memory registration * @pd: ibpd pointer @@ -108,6 +109,18 @@ struct ib_mr * irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata) { +#else +/** + * irdma_alloc_mr - register stag for fast memory registration + * @pd: ibpd pointer + * @mr_type: memory for stag registrion + * @max_num_sg: man number of pages + */ +struct ib_mr * +irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg) +{ +#endif struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_pble_alloc *palloc; struct irdma_pbl *iwpbl; @@ -162,6 +175,7 @@ err: #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8) #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd) +#if __FreeBSD_version >= 1400026 /** * irdma_alloc_ucontext - Allocate the user context data structure * @uctx: context @@ -253,7 +267,116 @@ ver_error: req.userspace_ver, IRDMA_ABI_VER); return -EINVAL; } +#endif +#if __FreeBSD_version < 1400026 +/** + * irdma_alloc_ucontext - Allocate the user context data structure + * @ibdev: ib device pointer + * @udata: user data + * + * This keeps track of all objects associated with a particular + * user-mode client. + */ +struct ib_ucontext * +irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) +{ + struct irdma_device *iwdev = to_iwdev(ibdev); + struct irdma_alloc_ucontext_req req = {0}; + struct irdma_alloc_ucontext_resp uresp = {0}; + struct irdma_ucontext *ucontext; + struct irdma_uk_attrs *uk_attrs; + + if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || + udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) + return ERR_PTR(-EINVAL); + + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) + return ERR_PTR(-EINVAL); + + if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER) + goto ver_error; + + ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL); + if (!ucontext) + return ERR_PTR(-ENOMEM); + + ucontext->iwdev = iwdev; + ucontext->abi_ver = req.userspace_ver; + + uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; + /* GEN_1 legacy support with libi40iw */ + if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { + if (uk_attrs->hw_rev != IRDMA_GEN_1) { + kfree(ucontext); + return ERR_PTR(-EOPNOTSUPP); + } + + ucontext->legacy_mode = true; + uresp.max_qps = iwdev->rf->max_qp; + uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds; + uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2; + uresp.kernel_ver = req.userspace_ver; + if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) { + kfree(ucontext); + return ERR_PTR(-EFAULT); + } + } else { + u64 bar_off; + + uresp.kernel_ver = IRDMA_ABI_VER; + uresp.feature_flags = uk_attrs->feature_flags; + uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; + uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; + uresp.max_hw_inline = uk_attrs->max_hw_inline; + uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; + uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; + uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; + uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; + uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; + uresp.hw_rev = uk_attrs->hw_rev; + + bar_off = + (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; + + spin_lock_init(&ucontext->mmap_tbl_lock); + ucontext->db_mmap_entry = + irdma_user_mmap_entry_add_hash(ucontext, bar_off, + IRDMA_MMAP_IO_NC, + &uresp.db_mmap_key); + if (!ucontext->db_mmap_entry) { + spin_lock_destroy(&ucontext->mmap_tbl_lock); + kfree(ucontext); + return ERR_PTR(-ENOMEM); + } + + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { + irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry); + spin_lock_destroy(&ucontext->mmap_tbl_lock); + kfree(ucontext); + return ERR_PTR(-EFAULT); + } + } + + INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); + spin_lock_init(&ucontext->cq_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); + spin_lock_init(&ucontext->qp_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->vma_list); + mutex_init(&ucontext->vma_list_mutex); + + return &ucontext->ibucontext; + +ver_error: + ibdev_err(&iwdev->ibdev, + "Invalid userspace driver version detected. Detected version %d, should be %d\n", + req.userspace_ver, IRDMA_ABI_VER); + return ERR_PTR(-EINVAL); +} +#endif + +#if __FreeBSD_version >= 1400026 /** * irdma_dealloc_ucontext - deallocate the user context data structure * @context: user context created during alloc @@ -267,8 +390,28 @@ irdma_dealloc_ucontext(struct ib_ucontext *context) return; } +#endif + +#if __FreeBSD_version < 1400026 +/** + * irdma_dealloc_ucontext - deallocate the user context data structure + * @context: user context created during alloc + */ +int +irdma_dealloc_ucontext(struct ib_ucontext *context) +{ + struct irdma_ucontext *ucontext = to_ucontext(context); + + irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry); + spin_lock_destroy(&ucontext->mmap_tbl_lock); + kfree(ucontext); + + return 0; +} +#endif #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd) +#if __FreeBSD_version >= 1400026 /** * irdma_alloc_pd - allocate protection domain * @pd: protection domain @@ -319,7 +462,67 @@ error: return err; } +#endif + +#if __FreeBSD_version < 1400026 +/** + * irdma_alloc_pd - allocate protection domain + * @ibdev: IB device + * @context: user context + * @udata: user data + */ +struct ib_pd * +irdma_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) +{ + struct irdma_pd *iwpd; + struct irdma_device *iwdev = to_iwdev(ibdev); + struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; + struct irdma_pci_f *rf = iwdev->rf; + struct irdma_alloc_pd_resp uresp = {0}; + struct irdma_sc_pd *sc_pd; + u32 pd_id = 0; + int err; + + err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, + &rf->next_pd); + if (err) + return ERR_PTR(err); + + iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL); + if (!iwpd) { + err = -ENOMEM; + goto free_res; + } + + sc_pd = &iwpd->sc_pd; + if (udata) { + struct irdma_ucontext *ucontext = to_ucontext(context); + + irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); + uresp.pd_id = pd_id; + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { + err = -EFAULT; + goto error; + } + } else { + irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER); + } + + return &iwpd->ibpd; + +error: + kfree(iwpd); +free_res: + + irdma_free_rsrc(rf, rf->allocated_pds, pd_id); + return ERR_PTR(err); +} + +#endif + +#if __FreeBSD_version >= 1400026 void irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { @@ -329,6 +532,21 @@ irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); } +#endif + +#if __FreeBSD_version < 1400026 +int +irdma_dealloc_pd(struct ib_pd *ibpd) +{ + struct irdma_pd *iwpd = to_iwpd(ibpd); + struct irdma_device *iwdev = to_iwdev(ibpd->device); + + irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); + kfree(iwpd); + return 0; +} +#endif + static void irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info, const struct ib_gid_attr *sgid_attr, @@ -416,6 +634,7 @@ irdma_create_ah_wait(struct irdma_pci_f *rf, #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd) +#if __FreeBSD_version >= 1400026 /** * irdma_create_ah - create address handle * @ib_ah: ptr to AH @@ -539,6 +758,7 @@ err_gid_l2: return err; } +#endif void irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr) @@ -546,19 +766,172 @@ irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr) ether_addr_copy(dmac, attr->dmac); } +#if __FreeBSD_version < 1400026 +struct ib_ah * +irdma_create_ah_stub(struct ib_pd *ibpd, + struct ib_ah_attr *attr, + struct ib_udata *udata) +#else int irdma_create_ah_stub(struct ib_ah *ib_ah, struct ib_ah_attr *attr, u32 flags, struct ib_udata *udata) +#endif { +#if __FreeBSD_version >= 1400026 return -ENOSYS; +#else + return ERR_PTR(-ENOSYS); +#endif } +#if __FreeBSD_version >= 1400026 void irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags) { return; } +#else +int +irdma_destroy_ah_stub(struct ib_ah *ibah) +{ + return -ENOSYS; +} +#endif + +#if __FreeBSD_version < 1400026 +/** + * irdma_create_ah - create address handle + * @ibpd: ptr to pd + * @attr: address handle attributes + * @udata: user data + * + * returns a pointer to an address handle + */ +struct ib_ah * +irdma_create_ah(struct ib_pd *ibpd, + struct ib_ah_attr *attr, + struct ib_udata *udata) +{ + struct irdma_pd *pd = to_iwpd(ibpd); + struct irdma_device *iwdev = to_iwdev(ibpd->device); + struct irdma_ah *ah; + union ib_gid sgid; + struct ib_gid_attr sgid_attr; + struct irdma_pci_f *rf = iwdev->rf; + struct irdma_sc_ah *sc_ah; + u32 ah_id = 0; + struct irdma_ah_info *ah_info; + struct irdma_create_ah_resp uresp; + union { + struct sockaddr saddr; + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; + } sgid_addr, dgid_addr; + int err; + u8 dmac[ETH_ALEN]; + bool sleep = udata ? true : false; + + if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) + return ERR_PTR(-EINVAL); + + err = irdma_alloc_rsrc(rf, rf->allocated_ahs, + rf->max_ah, &ah_id, &rf->next_ah); + + if (err) + return ERR_PTR(err); + + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); + if (!ah) { + irdma_free_rsrc(rf, rf->allocated_ahs, ah_id); + return ERR_PTR(-ENOMEM); + } + + ah->pd = pd; + sc_ah = &ah->sc_ah; + sc_ah->ah_info.ah_idx = ah_id; + sc_ah->ah_info.vsi = &iwdev->vsi; + irdma_sc_init_ah(&rf->sc_dev, sc_ah); + ah->sgid_index = attr->grh.sgid_index; + memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); + rcu_read_lock(); + err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num, + attr->grh.sgid_index, &sgid, &sgid_attr); + rcu_read_unlock(); + if (err) { + irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS, + "GID lookup at idx=%d with port=%d failed\n", + attr->grh.sgid_index, attr->port_num); + err = -EINVAL; + goto err_gid_l2; + } + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); + ah->av.attrs = *attr; + ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr, + sgid_attr.gid_type, + &sgid); + + if (sgid_attr.ndev) + dev_put(sgid_attr.ndev); + + ah->av.sgid_addr.saddr = sgid_addr.saddr; + ah->av.dgid_addr.saddr = dgid_addr.saddr; + ah_info = &sc_ah->ah_info; + ah_info->ah_idx = ah_id; + ah_info->pd_idx = pd->sc_pd.pd_id; + + ether_addr_copy(ah_info->mac_addr, IF_LLADDR(iwdev->netdev)); + if (attr->ah_flags & IB_AH_GRH) { + ah_info->flow_label = attr->grh.flow_label; + ah_info->hop_ttl = attr->grh.hop_limit; + ah_info->tc_tos = attr->grh.traffic_class; + } + + if (udata) + ib_resolve_eth_dmac(ibpd->device, attr); + irdma_ether_copy(dmac, attr); + + irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr, + dmac, ah->av.net_type); + + err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac); + if (err) + goto err_gid_l2; + + err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE, + sleep, irdma_gsi_ud_qp_ah_cb, sc_ah); + if (err) { + irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS, + "CQP-OP Create AH fail"); + goto err_gid_l2; + } + + err = irdma_create_ah_wait(rf, sc_ah, sleep); + if (err) { + irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS, + "CQP create AH timed out"); + goto err_gid_l2; + } + + if (udata) { + uresp.ah_id = ah->sc_ah.ah_info.ah_idx; + err = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (err) { + irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, + IRDMA_OP_AH_DESTROY, false, NULL, ah); + goto err_gid_l2; + } + } + + return &ah->ibah; +err_gid_l2: + kfree(ah); + irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); + + return ERR_PTR(err); +} +#endif /** * irdma_free_qp_rsrc - free up memory resources for qp @@ -797,8 +1170,13 @@ error: * @ibqp: qp's ib pointer also to get to device's qp address * @udata: user data */ +#if __FreeBSD_version >= 1400026 int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) +#else +int +irdma_destroy_qp(struct ib_qp *ibqp) +#endif { struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; @@ -835,17 +1213,31 @@ free_rsrc: * @attr: attributes for cq * @udata: user data */ +#if __FreeBSD_version >= 1400026 int irdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) +#else +struct ib_cq * +irdma_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata) +#endif { #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf) #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size) +#if __FreeBSD_version >= 1400026 struct ib_device *ibdev = ibcq->device; +#endif struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_pci_f *rf = iwdev->rf; +#if __FreeBSD_version >= 1400026 struct irdma_cq *iwcq = to_iwcq(ibcq); +#else + struct irdma_cq *iwcq; +#endif u32 cq_num = 0; struct irdma_sc_cq *cq; struct irdma_sc_dev *dev = &rf->sc_dev; @@ -859,6 +1251,7 @@ irdma_create_cq(struct ib_cq *ibcq, int entries = attr->cqe; bool cqe_64byte_ena; +#if __FreeBSD_version >= 1400026 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); if (err_code) return err_code; @@ -866,10 +1259,27 @@ irdma_create_cq(struct ib_cq *ibcq, if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) return -EINVAL; +#else + err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); + if (err_code) + return ERR_PTR(err_code); + + if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || + udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) + return ERR_PTR(-EINVAL); + + iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL); + if (!iwcq) + return ERR_PTR(-ENOMEM); +#endif err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, &rf->next_cq); if (err_code) +#if __FreeBSD_version >= 1400026 return err_code; +#else + goto error; +#endif cq = &iwcq->sc_cq; cq->back_cq = iwcq; atomic_set(&iwcq->refcnt, 1); @@ -898,7 +1308,11 @@ irdma_create_cq(struct ib_cq *ibcq, struct irdma_cq_mr *cqmr_shadow; iwcq->user_mode = true; +#if __FreeBSD_version >= 1400026 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); +#else + ucontext = to_ucontext(context); +#endif if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { @@ -1030,12 +1444,22 @@ irdma_create_cq(struct ib_cq *ibcq, rf->cq_table[cq_num] = iwcq; init_completion(&iwcq->free_cq); +#if __FreeBSD_version >= 1400026 return 0; +#else + return &iwcq->ibcq; +#endif cq_destroy: irdma_cq_wq_destroy(rf, cq); cq_free_rsrc: irdma_cq_free_rsrc(rf, iwcq); +#if __FreeBSD_version >= 1400026 return err_code; +#else +error: + kfree(iwcq); + return ERR_PTR(err_code); +#endif } /** @@ -1085,6 +1509,7 @@ irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl, * @ah_flags: destroy flags */ +#if __FreeBSD_version >= 1400026 void irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags) { @@ -1097,9 +1522,33 @@ irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags) irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx); } +#endif +#if __FreeBSD_version < 1400026 +int +irdma_destroy_ah(struct ib_ah *ibah) +{ + struct irdma_device *iwdev = to_iwdev(ibah->device); + struct irdma_ah *ah = to_iwah(ibah); + + irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY, + false, NULL, ah); + + irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah->sc_ah.ah_info.ah_idx); + + kfree(ah); + return 0; +} +#endif + +#if __FreeBSD_version >= 1400026 int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) +#else +int +irdma_dereg_mr(struct ib_mr *ib_mr) +#endif { struct irdma_mr *iwmr = to_iwmr(ib_mr); struct irdma_device *iwdev = to_iwdev(ib_mr->device); @@ -1109,10 +1558,15 @@ irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) { if (iwmr->region) { struct irdma_ucontext *ucontext; +#if __FreeBSD_version >= 1400026 + + ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); + +#else + struct ib_pd *ibpd = ib_mr->pd; - ucontext = rdma_udata_to_drv_context(udata, - struct irdma_ucontext, - ibucontext); + ucontext = to_ucontext(ibpd->uobject->context); +#endif irdma_del_memlist(iwmr, ucontext); } goto done; @@ -1219,6 +1673,7 @@ kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr, return 0; } +#if __FreeBSD_version >= 1400026 /** * irdma_destroy_cq - destroy cq * @ib_cq: cq pointer @@ -1253,6 +1708,46 @@ irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) irdma_cq_free_rsrc(iwdev->rf, iwcq); } +#endif +#if __FreeBSD_version < 1400026 +/** + * irdma_destroy_cq - destroy cq + * @ib_cq: cq pointer + */ +int +irdma_destroy_cq(struct ib_cq *ib_cq) +{ + struct irdma_device *iwdev = to_iwdev(ib_cq->device); + struct irdma_cq *iwcq = to_iwcq(ib_cq); + struct irdma_sc_cq *cq = &iwcq->sc_cq; + struct irdma_sc_dev *dev = cq->dev; + struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id]; + struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq); + unsigned long flags; + + spin_lock_irqsave(&iwcq->lock, flags); + if (!list_empty(&iwcq->cmpl_generated)) + irdma_remove_cmpls_list(iwcq); + if (!list_empty(&iwcq->resize_list)) + irdma_process_resize_list(iwcq, iwdev, NULL); + spin_unlock_irqrestore(&iwcq->lock, flags); + + irdma_cq_rem_ref(ib_cq); + wait_for_completion(&iwcq->free_cq); + + irdma_cq_wq_destroy(iwdev->rf, cq); + + spin_lock_irqsave(&iwceq->ce_lock, flags); + irdma_sc_cleanup_ceqes(cq, ceq); + spin_unlock_irqrestore(&iwceq->ce_lock, flags); + + irdma_cq_free_rsrc(iwdev->rf, iwcq); + kfree(iwcq); + + return 0; +} + +#endif /** * irdma_alloc_mw - Allocate memory window * @pd: Protection domain @@ -1316,6 +1811,75 @@ kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node) (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6); } +#if __FreeBSD_version < 1400026 +struct irdma_vma_data { + struct list_head list; + struct vm_area_struct *vma; + struct mutex *vma_list_mutex; /* protect the vma_list */ +}; + +/** + * irdma_vma_open - + * @vma: User VMA + */ +static void +irdma_vma_open(struct vm_area_struct *vma) +{ + vma->vm_ops = NULL; +} + +/** + * irdma_vma_close - Remove vma data from vma list + * @vma: User VMA + */ +static void +irdma_vma_close(struct vm_area_struct *vma) +{ + struct irdma_vma_data *vma_data; + + vma_data = vma->vm_private_data; + vma->vm_private_data = NULL; + vma_data->vma = NULL; + mutex_lock(vma_data->vma_list_mutex); + list_del(&vma_data->list); + mutex_unlock(vma_data->vma_list_mutex); + kfree(vma_data); +} + +static const struct vm_operations_struct irdma_vm_ops = { + .open = irdma_vma_open, + .close = irdma_vma_close +}; + +/** + * irdma_set_vma_data - Save vma data in context list + * @vma: User VMA + * @context: ib user context + */ +static int +irdma_set_vma_data(struct vm_area_struct *vma, + struct irdma_ucontext *context) +{ + struct list_head *vma_head = &context->vma_list; + struct irdma_vma_data *vma_entry; + + vma_entry = kzalloc(sizeof(*vma_entry), GFP_KERNEL); + if (!vma_entry) + return -ENOMEM; + + vma->vm_private_data = vma_entry; + vma->vm_ops = &irdma_vm_ops; + + vma_entry->vma = vma; + vma_entry->vma_list_mutex = &context->vma_list_mutex; + + mutex_lock(&context->vma_list_mutex); + list_add(&vma_entry->list, vma_head); + mutex_unlock(&context->vma_list_mutex); + + return 0; +} + /** * irdma_disassociate_ucontext - Disassociate user context * @context: ib user context @@ -1323,8 +1887,47 @@ kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node) void irdma_disassociate_ucontext(struct ib_ucontext *context) { + struct irdma_ucontext *ucontext = to_ucontext(context); + + struct irdma_vma_data *vma_data, *n; + struct vm_area_struct *vma; + *** 504 LINES SKIPPED ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202302080026.3180QKwY096224>