Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 30 May 2019 13:01:46 +0000 (UTC)
From:      Marcin Wojtas <mw@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r348385 - in head/sys: contrib/ena-com contrib/ena-com/ena_defs dev/ena
Message-ID:  <201905301301.x4UD1kMB055054@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mw
Date: Thu May 30 13:01:46 2019
New Revision: 348385
URL: https://svnweb.freebsd.org/changeset/base/348385

Log:
  Adjust ENA driver to the new ena-com
  
  Recent HAL change preparing to support ENAv2 required minor driver
  modifications.
  
  The ena_com_sq_empty_space() is not available in this ena-com, so it had
  to be replaced with ena_com_free_desc().
  
  Moreover, the ena_com_admin_init() is no longer using 3rd argument
  indicating if the spin lock should be initialized, so it was removed.
  
  Submitted by:  Michal Krawczyk <mk@semihalf.com>
  Obtained from: Semihalf
  Sponsored by:  Amazon, Inc.

Modified:
  head/sys/contrib/ena-com/ena_com.c
  head/sys/contrib/ena-com/ena_com.h
  head/sys/contrib/ena-com/ena_defs/ena_admin_defs.h
  head/sys/contrib/ena-com/ena_defs/ena_common_defs.h
  head/sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h
  head/sys/contrib/ena-com/ena_defs/ena_gen_info.h
  head/sys/contrib/ena-com/ena_defs/ena_regs_defs.h
  head/sys/contrib/ena-com/ena_eth_com.c
  head/sys/contrib/ena-com/ena_eth_com.h
  head/sys/contrib/ena-com/ena_plat.h
  head/sys/dev/ena/ena.c
Directory Properties:
  head/sys/contrib/ena-com/   (props changed)

Modified: head/sys/contrib/ena-com/ena_com.c
==============================================================================
--- head/sys/contrib/ena-com/ena_com.c	Thu May 30 12:31:51 2019	(r348384)
+++ head/sys/contrib/ena-com/ena_com.c	Thu May 30 13:01:46 2019	(r348385)
@@ -1,7 +1,7 @@
 /*-
  * BSD LICENSE
  *
- * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -32,9 +32,6 @@
  */
 
 #include "ena_com.h"
-#ifdef ENA_INTERNAL
-#include "ena_gen_info.h"
-#endif
 
 /*****************************************************************************/
 /*****************************************************************************/
@@ -52,9 +49,6 @@
 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
 
 #endif /* ENA_EXTENDED_STATS */
-#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
-		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
-		| (ENA_COMMON_SPEC_VERSION_MINOR))
 
 #define ENA_CTRL_MAJOR		0
 #define ENA_CTRL_MINOR		0
@@ -76,6 +70,8 @@
 
 #define ENA_REGS_ADMIN_INTR_MASK 1
 
+#define ENA_POLL_MS	5
+
 /*****************************************************************************/
 /*****************************************************************************/
 /*****************************************************************************/
@@ -112,8 +108,8 @@ static inline int ena_com_mem_addr_set(struct ena_com_
 		return ENA_COM_INVAL;
 	}
 
-	ena_addr->mem_addr_low = (u32)addr;
-	ena_addr->mem_addr_high = (u16)((u64)addr >> 32);
+	ena_addr->mem_addr_low = lower_32_bits(addr);
+	ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
 
 	return 0;
 }
@@ -127,7 +123,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_
 			       sq->mem_handle);
 
 	if (!sq->entries) {
-		ena_trc_err("memory allocation failed");
+		ena_trc_err("memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
@@ -149,7 +145,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_
 			       cq->mem_handle);
 
 	if (!cq->entries)  {
-		ena_trc_err("memory allocation failed");
+		ena_trc_err("memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
@@ -174,7 +170,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev 
 			aenq->mem_handle);
 
 	if (!aenq->entries) {
-		ena_trc_err("memory allocation failed");
+		ena_trc_err("memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
@@ -249,7 +245,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd
 	tail_masked = admin_queue->sq.tail & queue_size_mask;
 
 	/* In case of queue FULL */
-	cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
+	cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
 	if (cnt >= admin_queue->q_depth) {
 		ena_trc_dbg("admin queue is full.\n");
 		admin_queue->stats.out_of_space++;
@@ -301,7 +297,7 @@ static inline int ena_com_init_comp_ctxt(struct ena_co
 
 	queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
 	if (unlikely(!queue->comp_ctx)) {
-		ena_trc_err("memory allocation failed");
+		ena_trc_err("memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
@@ -320,7 +316,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(s
 						     struct ena_admin_acq_entry *comp,
 						     size_t comp_size_in_bytes)
 {
-	unsigned long flags;
+	unsigned long flags = 0;
 	struct ena_comp_ctx *comp_ctx;
 
 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
@@ -332,7 +328,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(s
 					      cmd_size_in_bytes,
 					      comp,
 					      comp_size_in_bytes);
-	if (unlikely(IS_ERR(comp_ctx)))
+	if (IS_ERR(comp_ctx))
 		admin_queue->running_state = false;
 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 
@@ -348,6 +344,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_
 
 	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 
+	io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
 	io_sq->desc_entry_size =
 		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 		sizeof(struct ena_eth_io_tx_desc) :
@@ -373,18 +370,21 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_
 		}
 
 		if (!io_sq->desc_addr.virt_addr) {
-			ena_trc_err("memory allocation failed");
+			ena_trc_err("memory allocation failed\n");
 			return ENA_COM_NO_MEM;
 		}
 	}
 
 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
 		/* Allocate bounce buffers */
-		io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
-		io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+		io_sq->bounce_buf_ctrl.buffer_size =
+			ena_dev->llq_info.desc_list_entry_size;
+		io_sq->bounce_buf_ctrl.buffers_num =
+			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
 		io_sq->bounce_buf_ctrl.next_to_use = 0;
 
-		size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;
+		size = io_sq->bounce_buf_ctrl.buffer_size *
+			io_sq->bounce_buf_ctrl.buffers_num;
 
 		ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
 				   size,
@@ -395,11 +395,12 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_
 			io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
 
 		if (!io_sq->bounce_buf_ctrl.base_buffer) {
-			ena_trc_err("bounce buffer memory allocation failed");
+			ena_trc_err("bounce buffer memory allocation failed\n");
 			return ENA_COM_NO_MEM;
 		}
 
-		memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
+		memcpy(&io_sq->llq_info, &ena_dev->llq_info,
+		       sizeof(io_sq->llq_info));
 
 		/* Initiate the first bounce buffer */
 		io_sq->llq_buf_ctrl.curr_bounce_buf =
@@ -408,6 +409,10 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_
 		       0x0, io_sq->llq_info.desc_list_entry_size);
 		io_sq->llq_buf_ctrl.descs_left_in_line =
 			io_sq->llq_info.descs_num_before_header;
+
+		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
+			io_sq->entries_in_tx_burst_left =
+				io_sq->llq_info.max_entries_in_tx_burst;
 	}
 
 	io_sq->tail = 0;
@@ -451,7 +456,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_
 	}
 
 	if (!io_cq->cdesc_addr.virt_addr) {
-		ena_trc_err("memory allocation failed");
+		ena_trc_err("memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
@@ -500,12 +505,12 @@ static void ena_com_handle_admin_completion(struct ena
 	cqe = &admin_queue->cq.entries[head_masked];
 
 	/* Go over all the completions */
-	while ((cqe->acq_common_descriptor.flags &
+	while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
 			ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
 		/* Do not read the rest of the completion entry before the
 		 * phase bit was validated
 		 */
-		rmb();
+		dma_rmb();
 		ena_com_handle_single_admin_completion(admin_queue, cqe);
 
 		head_masked++;
@@ -552,7 +557,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status
 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
 						     struct ena_com_admin_queue *admin_queue)
 {
-	unsigned long flags, timeout;
+	unsigned long flags = 0;
+	unsigned long timeout;
 	int ret;
 
 	timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
@@ -577,7 +583,7 @@ static int ena_com_wait_and_process_admin_cq_polling(s
 			goto err;
 		}
 
-		ENA_MSLEEP(100);
+		ENA_MSLEEP(ENA_POLL_MS);
 	}
 
 	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
@@ -598,42 +604,113 @@ err:
 	return ret;
 }
 
+/**
+ * Set the LLQ configurations of the firmware
+ *
+ * The driver provides only the enabled feature values to the device,
+ * which in turn, checks if they are supported.
+ */
+static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+{
+	struct ena_com_admin_queue *admin_queue;
+	struct ena_admin_set_feat_cmd cmd;
+	struct ena_admin_set_feat_resp resp;
+	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+	int ret;
+
+	memset(&cmd, 0x0, sizeof(cmd));
+	admin_queue = &ena_dev->admin_queue;
+
+	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+	cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
+
+	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
+	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
+	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
+	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
+
+	ret = ena_com_execute_admin_command(admin_queue,
+					    (struct ena_admin_aq_entry *)&cmd,
+					    sizeof(cmd),
+					    (struct ena_admin_acq_entry *)&resp,
+					    sizeof(resp));
+
+	if (unlikely(ret))
+		ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
+
+	return ret;
+}
+
 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
-				   struct ena_admin_feature_llq_desc *llq_desc)
+				   struct ena_admin_feature_llq_desc *llq_features,
+				   struct ena_llq_configurations *llq_default_cfg)
 {
 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+	u16 supported_feat;
+	int rc;
 
 	memset(llq_info, 0, sizeof(*llq_info));
 
-	switch (llq_desc->header_location_ctrl) {
-	case ENA_ADMIN_INLINE_HEADER:
-		llq_info->inline_header = true;
-		break;
-	case ENA_ADMIN_HEADER_RING:
-		llq_info->inline_header = false;
-		break;
-	default:
-		ena_trc_err("Invalid header location control\n");
+	supported_feat = llq_features->header_location_ctrl_supported;
+
+	if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
+		llq_info->header_location_ctrl =
+			llq_default_cfg->llq_header_location;
+	} else {
+		ena_trc_err("Invalid header location control, supported: 0x%x\n",
+			    supported_feat);
 		return -EINVAL;
 	}
 
-	switch (llq_desc->entry_size_ctrl) {
-	case ENA_ADMIN_LIST_ENTRY_SIZE_128B:
-		llq_info->desc_list_entry_size = 128;
-		break;
-	case ENA_ADMIN_LIST_ENTRY_SIZE_192B:
-		llq_info->desc_list_entry_size = 192;
-		break;
-	case ENA_ADMIN_LIST_ENTRY_SIZE_256B:
-		llq_info->desc_list_entry_size = 256;
-		break;
-	default:
-		ena_trc_err("Invalid entry_size_ctrl %d\n",
-			    llq_desc->entry_size_ctrl);
-		return -EINVAL;
+	if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
+		supported_feat = llq_features->descriptors_stride_ctrl_supported;
+		if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
+			llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
+		} else	{
+			if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
+				llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+			} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
+				llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+			} else {
+				ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
+					    supported_feat);
+				return -EINVAL;
+			}
+
+			ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+				    llq_default_cfg->llq_stride_ctrl,
+				    supported_feat,
+				    llq_info->desc_stride_ctrl);
+		}
+	} else {
+		llq_info->desc_stride_ctrl = 0;
 	}
 
-	if ((llq_info->desc_list_entry_size & 0x7)) {
+	supported_feat = llq_features->entry_size_ctrl_supported;
+	if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
+		llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
+		llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
+	} else {
+		if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
+			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+			llq_info->desc_list_entry_size = 128;
+		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
+			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
+			llq_info->desc_list_entry_size = 192;
+		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
+			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+			llq_info->desc_list_entry_size = 256;
+		} else {
+			ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
+			return -EINVAL;
+		}
+
+		ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+			    llq_default_cfg->llq_ring_entry_size,
+			    supported_feat,
+			    llq_info->desc_list_entry_size);
+	}
+	if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
 		/* The desc list entry size should be whole multiply of 8
 		 * This requirement comes from __iowrite64_copy()
 		 */
@@ -642,35 +719,50 @@ static int ena_com_config_llq_info(struct ena_com_dev 
 		return -EINVAL;
 	}
 
-	if (llq_info->inline_header) {
-		llq_info->desc_stride_ctrl = llq_desc->descriptors_stride_ctrl;
-		if ((llq_info->desc_stride_ctrl != ENA_ADMIN_SINGLE_DESC_PER_ENTRY) &&
-		    (llq_info->desc_stride_ctrl != ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)) {
-			ena_trc_err("Invalid desc_stride_ctrl %d\n",
-				    llq_info->desc_stride_ctrl);
-			return -EINVAL;
-		}
-	} else {
-		llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
-	}
-
-	if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
+	if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
 		llq_info->descs_per_entry = llq_info->desc_list_entry_size /
 			sizeof(struct ena_eth_io_tx_desc);
 	else
 		llq_info->descs_per_entry = 1;
 
-	llq_info->descs_num_before_header = llq_desc->desc_num_before_header_ctrl;
+	supported_feat = llq_features->desc_num_before_header_supported;
+	if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
+		llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
+	} else {
+		if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
+			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
+			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
+		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
+			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
+		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
+			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
+		} else {
+			ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
+				    supported_feat);
+			return -EINVAL;
+		}
 
-	return 0;
-}
+		ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+			    llq_default_cfg->llq_num_decs_before_header,
+			    supported_feat,
+			    llq_info->descs_num_before_header);
+	}
 
+	llq_info->max_entries_in_tx_burst =
+		(u16)(llq_features->max_tx_burst_size /	llq_default_cfg->llq_ring_entry_size_value);
 
+	rc = ena_com_set_llq(ena_dev);
+	if (rc)
+		ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
 
+	return 0;
+}
+
 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
 							struct ena_com_admin_queue *admin_queue)
 {
-	unsigned long flags;
+	unsigned long flags = 0;
 	int ret;
 
 	ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
@@ -715,7 +807,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *
 	volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
 		mmio_read->read_resp;
 	u32 mmio_read_reg, ret, i;
-	unsigned long flags;
+	unsigned long flags = 0;
 	u32 timeout = mmio_read->reg_read_to;
 
 	ENA_MIGHT_SLEEP();
@@ -736,15 +828,11 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *
 	mmio_read_reg |= mmio_read->seq_num &
 			ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
 
-	/* make sure read_resp->req_id get updated before the hw can write
-	 * there
-	 */
-	wmb();
+	ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
+			ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
 
-	ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
-
 	for (i = 0; i < timeout; i++) {
-		if (read_resp->req_id == mmio_read->seq_num)
+		if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
 			break;
 
 		ENA_UDELAY(1);
@@ -761,7 +849,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *
 	}
 
 	if (read_resp->reg_off != offset) {
-		ena_trc_err("Read failure: wrong offset provided");
+		ena_trc_err("Read failure: wrong offset provided\n");
 		ret = ENA_MMIO_READ_TIMEOUT;
 	} else {
 		ret = read_resp->reg_val;
@@ -856,7 +944,6 @@ static void ena_com_io_queue_free(struct ena_com_dev *
 	}
 
 	if (io_sq->bounce_buf_ctrl.base_buffer) {
-		size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
 		ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
 		io_sq->bounce_buf_ctrl.base_buffer = NULL;
 	}
@@ -867,6 +954,9 @@ static int wait_for_reset_state(struct ena_com_dev *en
 {
 	u32 val, i;
 
+	/* Convert timeout from resolution of 100ms to ENA_POLL_MS */
+	timeout = (timeout * 100) / ENA_POLL_MS;
+
 	for (i = 0; i < timeout; i++) {
 		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 
@@ -879,8 +969,7 @@ static int wait_for_reset_state(struct ena_com_dev *en
 			exp_state)
 			return 0;
 
-		/* The resolution of the timeout is 100ms */
-		ENA_MSLEEP(100);
+		ENA_MSLEEP(ENA_POLL_MS);
 	}
 
 	return ENA_COM_TIMER_EXPIRED;
@@ -903,7 +992,8 @@ static int ena_com_get_feature_ex(struct ena_com_dev *
 				  struct ena_admin_get_feat_resp *get_resp,
 				  enum ena_admin_aq_feature_id feature_id,
 				  dma_addr_t control_buf_dma_addr,
-				  u32 control_buff_size)
+				  u32 control_buff_size,
+				  u8 feature_ver)
 {
 	struct ena_com_admin_queue *admin_queue;
 	struct ena_admin_get_feat_cmd get_cmd;
@@ -934,7 +1024,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *
 	}
 
 	get_cmd.control_buffer.length = control_buff_size;
-
+	get_cmd.feat_common.feature_version = feature_ver;
 	get_cmd.feat_common.feature_id = feature_id;
 
 	ret = ena_com_execute_admin_command(admin_queue,
@@ -954,13 +1044,15 @@ static int ena_com_get_feature_ex(struct ena_com_dev *
 
 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
 			       struct ena_admin_get_feat_resp *get_resp,
-			       enum ena_admin_aq_feature_id feature_id)
+			       enum ena_admin_aq_feature_id feature_id,
+			       u8 feature_ver)
 {
 	return ena_com_get_feature_ex(ena_dev,
 				      get_resp,
 				      feature_id,
 				      0,
-				      0);
+				      0,
+				      feature_ver);
 }
 
 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
@@ -1030,7 +1122,7 @@ static int ena_com_indirect_table_allocate(struct ena_
 	int ret;
 
 	ret = ena_com_get_feature(ena_dev, &get_resp,
-				  ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+				  ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
 	if (unlikely(ret))
 		return ret;
 
@@ -1269,7 +1361,7 @@ int ena_com_execute_admin_command(struct ena_com_admin
 
 	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
 					    comp, comp_size);
-	if (unlikely(IS_ERR(comp_ctx))) {
+	if (IS_ERR(comp_ctx)) {
 		if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
 			ena_trc_dbg("Failed to submit command [%ld]\n",
 				    PTR_ERR(comp_ctx));
@@ -1389,12 +1481,12 @@ void ena_com_abort_admin_commands(struct ena_com_dev *
 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
-	unsigned long flags;
+	unsigned long flags = 0;
 
 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 	while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
-		ENA_MSLEEP(20);
+		ENA_MSLEEP(ENA_POLL_MS);
 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 	}
 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@@ -1433,7 +1525,7 @@ bool ena_com_get_admin_running_state(struct ena_com_de
 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
-	unsigned long flags;
+	unsigned long flags = 0;
 
 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 	ena_dev->admin_queue.running_state = state;
@@ -1460,14 +1552,14 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_de
 	struct ena_admin_get_feat_resp get_resp;
 	int ret;
 
-	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
+	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
 	if (ret) {
 		ena_trc_info("Can't get aenq configuration\n");
 		return ret;
 	}
 
 	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
-		ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+		ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
 			     get_resp.u.aenq.supported_groups,
 			     groups_flag);
 		return ENA_COM_UNSUPPORTED;
@@ -1542,11 +1634,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_d
 		     ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
 		     ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
 
-	if (ver < MIN_ENA_VER) {
-		ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
-		return -1;
-	}
-
 	ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
 		     (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
 		     >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
@@ -1579,9 +1666,6 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev
 	u16 size;
 
 	ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
-
-	ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
-
 	if (admin_queue->comp_ctx)
 		ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
 	admin_queue->comp_ctx = NULL;
@@ -1602,6 +1686,7 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
 				      aenq->dma_addr, aenq->mem_handle);
 	aenq->entries = NULL;
+	ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
 }
 
 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
@@ -1611,7 +1696,8 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev
 	if (polling)
 		mask_value = ENA_REGS_ADMIN_INTR_MASK;
 
-	ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
+	ENA_REG_WRITE32(ena_dev->bus, mask_value,
+			ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
 	ena_dev->admin_queue.polling = polling;
 }
 
@@ -1626,7 +1712,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_
 			       mmio_read->read_resp_dma_addr,
 			       mmio_read->read_resp_mem_handle);
 	if (unlikely(!mmio_read->read_resp))
-		return ENA_COM_NO_MEM;
+		goto err;
 
 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
 
@@ -1635,6 +1721,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_
 	mmio_read->readless_supported = true;
 
 	return 0;
+
+err:
+		ENA_SPINLOCK_DESTROY(mmio_read->lock);
+		return ENA_COM_NO_MEM;
 }
 
 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
@@ -1658,7 +1748,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_
 			      mmio_read->read_resp_mem_handle);
 
 	mmio_read->read_resp = NULL;
-
 	ENA_SPINLOCK_DESTROY(mmio_read->lock);
 }
 
@@ -1675,17 +1764,12 @@ void ena_com_mmio_reg_read_request_write_dev_addr(stru
 }
 
 int ena_com_admin_init(struct ena_com_dev *ena_dev,
-		       struct ena_aenq_handlers *aenq_handlers,
-		       bool init_spinlock)
+		       struct ena_aenq_handlers *aenq_handlers)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
 	int ret;
 
-#ifdef ENA_INTERNAL
-	ena_trc_info("ena_defs : Version:[%s] Build date [%s]",
-		     ENA_GEN_COMMIT, ENA_GEN_DATE);
-#endif
 	dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 
 	if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
@@ -1707,8 +1791,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
 
 	ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
 
-	if (init_spinlock)
-		ENA_SPINLOCK_INIT(admin_queue->q_lock);
+	ENA_SPINLOCK_INIT(admin_queue->q_lock);
 
 	ret = ena_com_init_comp_ctxt(admin_queue);
 	if (ret)
@@ -1848,9 +1931,65 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_
 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
 			    struct ena_admin_get_feat_resp *resp)
 {
-	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
+	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
 }
 
+int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
+{
+	struct ena_admin_get_feat_resp resp;
+	struct ena_extra_properties_strings *extra_properties_strings =
+			&ena_dev->extra_properties_strings;
+	u32 rc;
+	extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
+		ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;
+
+	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+			       extra_properties_strings->size,
+			       extra_properties_strings->virt_addr,
+			       extra_properties_strings->dma_addr,
+			       extra_properties_strings->dma_handle);
+	if (unlikely(!extra_properties_strings->virt_addr)) {
+		ena_trc_err("Failed to allocate extra properties strings\n");
+		return 0;
+	}
+
+	rc = ena_com_get_feature_ex(ena_dev, &resp,
+				    ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
+				    extra_properties_strings->dma_addr,
+				    extra_properties_strings->size, 0);
+	if (rc) {
+		ena_trc_dbg("Failed to get extra properties strings\n");
+		goto err;
+	}
+
+	return resp.u.extra_properties_strings.count;
+err:
+	ena_com_delete_extra_properties_strings(ena_dev);
+	return 0;
+}
+
+void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
+{
+	struct ena_extra_properties_strings *extra_properties_strings =
+				&ena_dev->extra_properties_strings;
+
+	if (extra_properties_strings->virt_addr) {
+		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+				      extra_properties_strings->size,
+				      extra_properties_strings->virt_addr,
+				      extra_properties_strings->dma_addr,
+				      extra_properties_strings->dma_handle);
+		extra_properties_strings->virt_addr = NULL;
+	}
+}
+
+int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
+				       struct ena_admin_get_feat_resp *resp)
+{
+	return ena_com_get_feature(ena_dev, resp,
+				   ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0);
+}
+
 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
 {
@@ -1858,7 +1997,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
 	int rc;
 
 	rc = ena_com_get_feature(ena_dev, &get_resp,
-				 ENA_ADMIN_DEVICE_ATTRIBUTES);
+				 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
 	if (rc)
 		return rc;
 
@@ -1866,17 +2005,34 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
 	       sizeof(get_resp.u.dev_attr));
 	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
 
-	rc = ena_com_get_feature(ena_dev, &get_resp,
-				 ENA_ADMIN_MAX_QUEUES_NUM);
-	if (rc)
-		return rc;
+	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+		rc = ena_com_get_feature(ena_dev, &get_resp,
+					 ENA_ADMIN_MAX_QUEUES_EXT,
+					 ENA_FEATURE_MAX_QUEUE_EXT_VER);
+		if (rc)
+			return rc;
 
-	memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
-	       sizeof(get_resp.u.max_queue));
-	ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
+		if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
+			return -EINVAL;
 
+		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
+		       sizeof(get_resp.u.max_queue_ext));
+		ena_dev->tx_max_header_size =
+			get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
+	} else {
+		rc = ena_com_get_feature(ena_dev, &get_resp,
+					 ENA_ADMIN_MAX_QUEUES_NUM, 0);
+		memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
+		       sizeof(get_resp.u.max_queue));
+		ena_dev->tx_max_header_size =
+			get_resp.u.max_queue.max_header_size;
+
+		if (rc)
+			return rc;
+	}
+
 	rc = ena_com_get_feature(ena_dev, &get_resp,
-				 ENA_ADMIN_AENQ_CONFIG);
+				 ENA_ADMIN_AENQ_CONFIG, 0);
 	if (rc)
 		return rc;
 
@@ -1884,7 +2040,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
 	       sizeof(get_resp.u.aenq));
 
 	rc = ena_com_get_feature(ena_dev, &get_resp,
-				 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+				 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
 	if (rc)
 		return rc;
 
@@ -1894,7 +2050,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
 	/* Driver hints isn't mandatory admin command. So in case the
 	 * command isn't supported set driver hints to 0
 	 */
-	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
+	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
 
 	if (!rc)
 		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
@@ -1904,7 +2060,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
 	else
 		return rc;
 
-	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
 	if (!rc)
 		memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
 		       sizeof(get_resp.u.llq));
@@ -1913,6 +2069,17 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
 	else
 		return rc;
 
+	rc = ena_com_get_feature(ena_dev, &get_resp,
+				 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
+	if (!rc)
+		memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
+		       sizeof(get_resp.u.ind_table));
+	else if (rc == ENA_COM_UNSUPPORTED)
+		memset(&get_feat_ctx->ind_table, 0x0,
+		       sizeof(get_feat_ctx->ind_table));
+	else
+		return rc;
+
 	return 0;
 }
 
@@ -1944,8 +2111,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev
 	struct ena_admin_aenq_entry *aenq_e;
 	struct ena_admin_aenq_common_desc *aenq_common;
 	struct ena_com_aenq *aenq  = &dev->aenq;
-	ena_aenq_handler handler_cb;
 	unsigned long long timestamp;
+	ena_aenq_handler handler_cb;
 	u16 masked_head, processed = 0;
 	u8 phase;
 
@@ -1955,8 +2122,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev
 	aenq_common = &aenq_e->aenq_common_desc;
 
 	/* Go over all the events */
-	while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
-		phase) {
+	while ((READ_ONCE8(aenq_common->flags) &
+		ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+		/* Make sure the phase bit (ownership) is as expected before
+		 * reading the rest of the descriptor.
+		 */
+		dma_rmb();
+
 		timestamp = (unsigned long long)aenq_common->timestamp_low |
 			((unsigned long long)aenq_common->timestamp_high << 32);
 		ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
@@ -1990,7 +2162,9 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev
 
 	/* write the aenq doorbell after all AENQ descriptors were read */
 	mb();
-	ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+	ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
+				dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+	mmiowb();
 }
 #ifdef ENA_EXTENDED_STATS
 /*
@@ -2141,7 +2315,7 @@ int ena_com_get_dev_extended_stats(struct ena_com_dev 
 				   phys_addr);
 	if (unlikely(ret)) {
 		ena_trc_err("memory address set failed\n");
-		return ret;
+		goto free_ext_stats_mem;
 	}
 	get_cmd->u.control_buffer.length = len;
 
@@ -2202,7 +2376,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *e
 	struct ena_admin_get_feat_resp resp;
 
 	ret = ena_com_get_feature(ena_dev, &resp,
-				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
 	if (unlikely(ret)) {
 		ena_trc_err("Failed to get offload capabilities %d\n", ret);
 		return ret;
@@ -2231,11 +2405,11 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_
 
 	/* Validate hash function is supported */
 	ret = ena_com_get_feature(ena_dev, &get_resp,
-				  ENA_ADMIN_RSS_HASH_FUNCTION);
+				  ENA_ADMIN_RSS_HASH_FUNCTION, 0);
 	if (unlikely(ret))
 		return ret;
 
-	if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
+	if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
 		ena_trc_err("Func hash %d isn't supported by device, abort\n",
 			    rss->hash_func);
 		return ENA_COM_UNSUPPORTED;
@@ -2291,7 +2465,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena
 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
 				    ENA_ADMIN_RSS_HASH_FUNCTION,
 				    rss->hash_key_dma_addr,
-				    sizeof(*rss->hash_key));
+				    sizeof(*rss->hash_key), 0);
 	if (unlikely(rc))
 		return rc;
 
@@ -2320,6 +2494,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena
 		return ENA_COM_INVAL;
 	}
 
+	rss->hash_func = func;
 	rc = ena_com_set_hash_function(ena_dev);
 
 	/* Restore the old function */
@@ -2342,7 +2517,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_
 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
 				    ENA_ADMIN_RSS_HASH_FUNCTION,
 				    rss->hash_key_dma_addr,
-				    sizeof(*rss->hash_key));
+				    sizeof(*rss->hash_key), 0);
 	if (unlikely(rc))
 		return rc;
 
@@ -2367,7 +2542,7 @@ int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
 				    ENA_ADMIN_RSS_HASH_INPUT,
 				    rss->hash_ctrl_dma_addr,
-				    sizeof(*rss->hash_ctrl));
+				    sizeof(*rss->hash_ctrl), 0);
 	if (unlikely(rc))
 		return rc;
 
@@ -2603,7 +2778,7 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena
 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
 				    ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
 				    rss->rss_ind_tbl_dma_addr,
-				    tbl_size);
+				    tbl_size, 0);
 	if (unlikely(rc))
 		return rc;
 
@@ -2670,6 +2845,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena
 	if (unlikely(!host_attr->host_info))
 		return ENA_COM_NO_MEM;
 
+	host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
+		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
+		(ENA_COMMON_SPEC_VERSION_MINOR));
+
 	return 0;
 }
 
@@ -2822,7 +3001,7 @@ int ena_com_init_interrupt_moderation(struct ena_com_d
 	int rc;
 
 	rc = ena_com_get_feature(ena_dev, &get_resp,
-				 ENA_ADMIN_INTERRUPT_MODERATION);
+				 ENA_ADMIN_INTERRUPT_MODERATION, 0);
 
 	if (rc) {
 		if (rc == ENA_COM_UNSUPPORTED) {
@@ -2950,17 +3129,18 @@ void ena_com_get_intr_moderation_entry(struct ena_com_
 }
 
 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
-			    struct ena_admin_feature_llq_desc *llq)
+			    struct ena_admin_feature_llq_desc *llq_features,
+			    struct ena_llq_configurations *llq_default_cfg)
 {
 	int rc;
 	int size;
 
-	if (llq->max_llq_num == 0) {
+	if (!llq_features->max_llq_num) {
 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
 		return 0;
 	}
 
-	rc = ena_com_config_llq_info(ena_dev, llq);
+	rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
 	if (rc)
 		return rc;

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201905301301.x4UD1kMB055054>