Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 2 Feb 2017 00:20:53 +0000 (UTC)
From:      David C Somayajulu <davidcs@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r313072 - stable/10/sys/dev/qlxgbe
Message-ID:  <201702020020.v120KrDY068063@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: davidcs
Date: Thu Feb  2 00:20:53 2017
New Revision: 313072
URL: https://svnweb.freebsd.org/changeset/base/313072

Log:
  MFC r312728
      Added support for if_transmit and if_qflush
      Removed if_start
      updated version to 3.10.33

Modified:
  stable/10/sys/dev/qlxgbe/ql_def.h
  stable/10/sys/dev/qlxgbe/ql_glbl.h
  stable/10/sys/dev/qlxgbe/ql_hw.c
  stable/10/sys/dev/qlxgbe/ql_hw.h
  stable/10/sys/dev/qlxgbe/ql_isr.c
  stable/10/sys/dev/qlxgbe/ql_os.c
  stable/10/sys/dev/qlxgbe/ql_os.h
  stable/10/sys/dev/qlxgbe/ql_ver.h
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/dev/qlxgbe/ql_def.h
==============================================================================
--- stable/10/sys/dev/qlxgbe/ql_def.h	Thu Feb  2 00:20:18 2017	(r313071)
+++ stable/10/sys/dev/qlxgbe/ql_def.h	Thu Feb  2 00:20:53 2017	(r313072)
@@ -112,6 +112,16 @@ typedef struct _qla_tx_ring {
 	uint64_t	count;
 } qla_tx_ring_t;
 
+typedef struct _qla_tx_fp {
+	struct mtx		tx_mtx;
+	char			tx_mtx_name[32];
+	struct buf_ring		*tx_br;
+	struct task		fp_task;
+	struct taskqueue	*fp_taskqueue;
+	void			*ha;
+	uint32_t		txr_idx;
+} qla_tx_fp_t;
+
 /*
  * Adapter structure contains the hardware independant information of the
  * pci function.
@@ -178,10 +188,9 @@ struct qla_host {
 	qla_tx_ring_t		tx_ring[NUM_TX_RINGS];
 						
 	bus_dma_tag_t		tx_tag;
-	struct task		tx_task;
-	struct taskqueue	*tx_tq;
 	struct callout		tx_callout;
-	struct mtx		tx_lock;
+
+	qla_tx_fp_t		tx_fp[MAX_SDS_RINGS];
 
 	qla_rx_ring_t		rx_ring[MAX_RDS_RINGS];
 	bus_dma_tag_t		rx_tag;

Modified: stable/10/sys/dev/qlxgbe/ql_glbl.h
==============================================================================
--- stable/10/sys/dev/qlxgbe/ql_glbl.h	Thu Feb  2 00:20:18 2017	(r313071)
+++ stable/10/sys/dev/qlxgbe/ql_glbl.h	Thu Feb  2 00:20:53 2017	(r313072)
@@ -39,6 +39,7 @@
  */
 extern void ql_mbx_isr(void *arg);
 extern void ql_isr(void *arg);
+extern uint32_t ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count);
 
 /*
  * from ql_os.c
@@ -66,7 +67,7 @@ extern void qla_reset_promisc(qla_host_t
 extern int ql_set_allmulti(qla_host_t *ha);
 extern void qla_reset_allmulti(qla_host_t *ha);
 extern void ql_update_link_state(qla_host_t *ha);
-extern void ql_hw_tx_done(qla_host_t *ha);
+extern void ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
 extern int ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id);
 extern void ql_hw_stop_rcv(qla_host_t *ha);
 extern void ql_get_stats(qla_host_t *ha);
@@ -76,7 +77,7 @@ extern void qla_hw_async_event(qla_host_
 extern int qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
 		uint32_t *num_rcvq);
 
-extern int qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp);
+extern int ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp);
 
 extern void ql_minidump(qla_host_t *ha);
 extern int ql_minidump_init(qla_host_t *ha);

Modified: stable/10/sys/dev/qlxgbe/ql_hw.c
==============================================================================
--- stable/10/sys/dev/qlxgbe/ql_hw.c	Thu Feb  2 00:20:18 2017	(r313071)
+++ stable/10/sys/dev/qlxgbe/ql_hw.c	Thu Feb  2 00:20:53 2017	(r313072)
@@ -51,7 +51,6 @@ static void qla_del_rcv_cntxt(qla_host_t
 static int qla_init_rcv_cntxt(qla_host_t *ha);
 static void qla_del_xmt_cntxt(qla_host_t *ha);
 static int qla_init_xmt_cntxt(qla_host_t *ha);
-static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
@@ -2047,7 +2046,7 @@ ql_hw_send(qla_host_t *ha, bus_dma_segme
 		ha->hw.iscsi_pkt_count++;
 
 	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
-		qla_hw_tx_done_locked(ha, txr_idx);
+		ql_hw_tx_done_locked(ha, txr_idx);
 		if (hw->tx_cntxt[txr_idx].txr_free <=
 				(num_tx_cmds + QLA_TX_MIN_FREE)) {
         		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
@@ -2552,15 +2551,8 @@ qla_init_rcv_cntxt(qla_host_t *ha)
 			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
 		rcntxt->sds[i].size =
 			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
-		if (ha->msix_count == 2) {
-			rcntxt->sds[i].intr_id =
-				qla_host_to_le16(hw->intr_id[0]);
-			rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
-		} else {
-			rcntxt->sds[i].intr_id =
-				qla_host_to_le16(hw->intr_id[i]);
-			rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
-		}
+		rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
+		rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
 	}
 
 	for (i = 0; i <  rcntxt_rds_rings; i++) {
@@ -2672,17 +2664,11 @@ qla_add_rcv_rings(qla_host_t *ha, uint32
                 add_rcv->sds[i].size =
                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
 
-                if (ha->msix_count == 2) {
-                        add_rcv->sds[i].intr_id =
-                                qla_host_to_le16(hw->intr_id[0]);
-                        add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
-                } else {
-                        add_rcv->sds[i].intr_id =
-                                qla_host_to_le16(hw->intr_id[j]);
-                        add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
-                }
+                add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
+                add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
 
         }
+
         for (i = 0; (i <  nsds); i++) {
                 j = i + sds_idx;
 
@@ -2803,6 +2789,7 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uin
 	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
 	uint32_t		err;
 	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
+	uint32_t		intr_idx;
 
 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
 
@@ -2818,6 +2805,8 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uin
 	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
 
+	intr_idx = txr_idx;
+
 #ifdef QL_ENABLE_ISCSI_TLV
 
 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
@@ -2827,8 +2816,9 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uin
 		tcntxt->traffic_class = 1;
 	}
 
-#else
+	intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
 
+#else
 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
 
 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
@@ -2841,10 +2831,9 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uin
 		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
 	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
 
-	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
+	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
 	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
 
-
 	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
 	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
 
@@ -3166,11 +3155,11 @@ ql_hw_set_multi(qla_host_t *ha, uint8_t 
 }
 
 /*
- * Name: qla_hw_tx_done_locked
+ * Name: ql_hw_tx_done_locked
  * Function: Handle Transmit Completions
  */
-static void
-qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
+void
+ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
 {
 	qla_tx_buf_t *txb;
         qla_hw_t *hw = &ha->hw;
@@ -3208,34 +3197,6 @@ qla_hw_tx_done_locked(qla_host_t *ha, ui
 	return;
 }
 
-/*
- * Name: ql_hw_tx_done
- * Function: Handle Transmit Completions
- */
-void
-ql_hw_tx_done(qla_host_t *ha)
-{
-	int i;
-	uint32_t flag = 0;
-
-	if (!mtx_trylock(&ha->tx_lock)) {
-       		QL_DPRINT8(ha, (ha->pci_dev,
-			"%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
-		return;
-	}
-	for (i = 0; i < ha->hw.num_tx_rings; i++) {
-		qla_hw_tx_done_locked(ha, i);
-		if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
-			flag = 1;
-	}
-
-	if (!flag)
-		ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
-	QLA_TX_UNLOCK(ha);
-	return;
-}
-
 void
 ql_update_link_state(qla_host_t *ha)
 {
@@ -3655,7 +3616,7 @@ qla_get_port_config(qla_host_t *ha, uint
 }
 
 int
-qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
+ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
 {
         struct ether_vlan_header        *eh;
         uint16_t                        etype;

Modified: stable/10/sys/dev/qlxgbe/ql_hw.h
==============================================================================
--- stable/10/sys/dev/qlxgbe/ql_hw.h	Thu Feb  2 00:20:18 2017	(r313071)
+++ stable/10/sys/dev/qlxgbe/ql_hw.h	Thu Feb  2 00:20:53 2017	(r313072)
@@ -1543,7 +1543,6 @@ typedef struct _qla_hw_tx_cntxt {
 
 	uint32_t        tx_prod_reg;
 	uint16_t	tx_cntxt_id;
-	uint8_t		frame_hdr[QL_FRAME_HDR_SIZE];
 
 } qla_hw_tx_cntxt_t;
 

Modified: stable/10/sys/dev/qlxgbe/ql_isr.c
==============================================================================
--- stable/10/sys/dev/qlxgbe/ql_isr.c	Thu Feb  2 00:20:18 2017	(r313071)
+++ stable/10/sys/dev/qlxgbe/ql_isr.c	Thu Feb  2 00:20:53 2017	(r313072)
@@ -159,7 +159,12 @@ qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_
 	ifp->if_ipackets++;
 
 	mpf->m_pkthdr.flowid = sgc->rss_hash;
-	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
+
+#if __FreeBSD_version >= 1100000
+	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
+#else
+	M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
+#endif /* #if __FreeBSD_version >= 1100000 */
 
 	(*ifp->if_input)(ifp, mpf);
 
@@ -450,11 +455,11 @@ qla_rcv_cont_sds(qla_host_t *ha, uint32_
 }
 
 /*
- * Name: qla_rcv_isr
+ * Name: ql_rcv_isr
  * Function: Main Interrupt Service Routine
  */
-static uint32_t
-qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
+uint32_t
+ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
 {
 	device_t dev;
 	qla_hw_t *hw;
@@ -704,7 +709,7 @@ qla_rcv_isr(qla_host_t *ha, uint32_t sds
 	}
 
 	if (ha->flags.stop_rcv)
-		goto qla_rcv_isr_exit;
+		goto ql_rcv_isr_exit;
 
 	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
 		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
@@ -727,7 +732,7 @@ qla_rcv_isr(qla_host_t *ha, uint32_t sds
 	if (opcode)
 		ret = -1;
 
-qla_rcv_isr_exit:
+ql_rcv_isr_exit:
 	hw->sds[sds_idx].rcv_active = 0;
 
 	return (ret);
@@ -931,7 +936,7 @@ ql_isr(void *arg)
 	int idx;
 	qla_hw_t *hw;
 	struct ifnet *ifp;
-	uint32_t ret = 0;
+	qla_tx_fp_t *fp;
 
 	ha = ivec->ha;
 	hw = &ha->hw;
@@ -940,17 +945,12 @@ ql_isr(void *arg)
 	if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
 		return;
 
-	if (idx == 0)
-		taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
-	
-	ret = qla_rcv_isr(ha, idx, -1);
 
-	if (idx == 0)
-		taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+	fp = &ha->tx_fp[idx];
+
+	if (fp->fp_taskqueue != NULL)
+		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
 
-	if (!ha->flags.stop_rcv) {
-		QL_ENABLE_INTERRUPTS(ha, idx);
-	}
 	return;
 }
 

Modified: stable/10/sys/dev/qlxgbe/ql_os.c
==============================================================================
--- stable/10/sys/dev/qlxgbe/ql_os.c	Thu Feb  2 00:20:18 2017	(r313071)
+++ stable/10/sys/dev/qlxgbe/ql_os.c	Thu Feb  2 00:20:53 2017	(r313072)
@@ -76,11 +76,11 @@ static void qla_release(qla_host_t *ha);
 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
 		int error);
 static void qla_stop(qla_host_t *ha);
-static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
-static void qla_tx_done(void *context, int pending);
 static void qla_get_peer(qla_host_t *ha);
 static void qla_error_recovery(void *context, int pending);
 static void qla_async_event(void *context, int pending);
+static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
+		uint32_t iscsi_pdu);
 
 /*
  * Hooks to the Operating Systems
@@ -93,7 +93,14 @@ static void qla_init(void *arg);
 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
 static int qla_media_change(struct ifnet *ifp);
 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
-static void qla_start(struct ifnet *ifp);
+
+static int qla_transmit(struct ifnet *ifp, struct mbuf  *mp);
+static void qla_qflush(struct ifnet *ifp);
+static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
+static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
+static int qla_create_fp_taskqueues(qla_host_t *ha);
+static void qla_destroy_fp_taskqueues(qla_host_t *ha);
+static void qla_drain_fp_taskqueues(qla_host_t *ha);
 
 static device_method_t qla_pci_methods[] = {
 	/* Device interface */
@@ -225,7 +232,6 @@ qla_watchdog(void *arg)
 	qla_hw_t *hw;
 	struct ifnet *ifp;
 	uint32_t i;
-	qla_hw_tx_cntxt_t *hw_tx_cntxt;
 
 	hw = &ha->hw;
 	ifp = ha->ifp;
@@ -254,19 +260,14 @@ qla_watchdog(void *arg)
                                         &ha->async_event_task);
                         }
 
-			for (i = 0; i < ha->hw.num_tx_rings; i++) {
-				hw_tx_cntxt = &hw->tx_cntxt[i];
-				if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) !=
-					hw_tx_cntxt->txr_comp) {
-					taskqueue_enqueue(ha->tx_tq,
-						&ha->tx_task);
-					break;
-				}
-			}
+			for (i = 0; i < ha->hw.num_sds_rings; i++) {
+				qla_tx_fp_t *fp = &ha->tx_fp[i];
 
-			if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
-				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+				if (fp->fp_taskqueue != NULL)
+					taskqueue_enqueue(fp->fp_taskqueue,
+						&fp->fp_task);
 			}
+
 			ha->qla_watchdog_paused = 0;
 		} else {
 			ha->qla_watchdog_paused = 0;
@@ -322,9 +323,7 @@ qla_pci_attach(device_t dev)
 	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
 					ha->reg_rid);
 
-	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_SPIN);
-
-	mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
+	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
 
 	qla_add_sysctls(ha);
 	ql_hw_add_sysctls(ha);
@@ -344,8 +343,9 @@ qla_pci_attach(device_t dev)
 	}
 
 	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
-		" msix_count 0x%x pci_reg %p\n", __func__, ha,
-		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
+		" msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
+		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
+		ha->pci_reg1));
 
         /* initialize hardware */
         if (ql_init_hw(ha)) {
@@ -366,14 +366,15 @@ qla_pci_attach(device_t dev)
                 goto qla_pci_attach_err;
         }
         device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
-                " msix_count 0x%x pci_reg %p num_rcvq = %d\n", __func__, ha,
-                ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, num_rcvq);
+                " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
+		__func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
+		ha->pci_reg, ha->pci_reg1, num_rcvq);
 
 
 #ifdef QL_ENABLE_ISCSI_TLV
         if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
                 ha->hw.num_sds_rings = 15;
-                ha->hw.num_tx_rings = 32;
+                ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
         }
 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
 	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
@@ -421,8 +422,20 @@ qla_pci_attach(device_t dev)
 			device_printf(dev, "could not setup interrupt\n");
 			goto qla_pci_attach_err;
 		}
+
+		ha->tx_fp[i].ha = ha;
+		ha->tx_fp[i].txr_idx = i;
+
+		if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
+			device_printf(dev, "%s: could not allocate tx_br[%d]\n",
+				__func__, i);
+			goto qla_pci_attach_err;
+		}
 	}
 
+	if (qla_create_fp_taskqueues(ha) != 0)
+		goto qla_pci_attach_err;
+
 	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
 		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
 
@@ -452,13 +465,6 @@ qla_pci_attach(device_t dev)
 	ha->flags.qla_watchdog_active = 1;
 	ha->flags.qla_watchdog_pause = 0;
 
-
-	TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
-	ha->tx_tq = taskqueue_create("qla_txq", M_NOWAIT,
-			taskqueue_thread_enqueue, &ha->tx_tq);
-	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
-		device_get_nameunit(ha->pci_dev));
-	
 	callout_init(&ha->tx_callout, TRUE);
 	ha->flags.qla_callout_init = 1;
 
@@ -584,11 +590,6 @@ qla_release(qla_host_t *ha)
 		taskqueue_free(ha->err_tq);
 	}
 
-	if (ha->tx_tq) {
-		taskqueue_drain(ha->tx_tq, &ha->tx_task);
-		taskqueue_free(ha->tx_tq);
-	}
-
 	ql_del_cdev(ha);
 
 	if (ha->flags.qla_watchdog_active) {
@@ -626,13 +627,15 @@ qla_release(qla_host_t *ha)
 				ha->irq_vec[i].irq_rid,
 				ha->irq_vec[i].irq);
 		}
+
+		qla_free_tx_br(ha, &ha->tx_fp[i]);
 	}
+	qla_destroy_fp_taskqueues(ha);
 
 	if (ha->msix_count)
 		pci_release_msi(dev);
 
 	if (ha->flags.lock_init) {
-		mtx_destroy(&ha->tx_lock);
 		mtx_destroy(&ha->hw_lock);
 	}
 
@@ -812,7 +815,9 @@ qla_init_ifnet(device_t dev, qla_host_t 
 	ifp->if_softc = ha;
 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 	ifp->if_ioctl = qla_ioctl;
-	ifp->if_start = qla_start;
+
+	ifp->if_transmit = qla_transmit;
+	ifp->if_qflush = qla_qflush;
 
 	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
 	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
@@ -822,12 +827,13 @@ qla_init_ifnet(device_t dev, qla_host_t 
 
 	ether_ifattach(ifp, qla_get_mac_addr(ha));
 
-	ifp->if_capabilities = IFCAP_HWCSUM |
+	ifp->if_capabilities |= IFCAP_HWCSUM |
 				IFCAP_TSO4 |
-				IFCAP_JUMBO_MTU;
-
-	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
-	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
+				IFCAP_JUMBO_MTU |
+				IFCAP_VLAN_HWTAGGING |
+				IFCAP_VLAN_MTU |
+				IFCAP_VLAN_HWTSO |
+				IFCAP_LRO;
 
 	ifp->if_capenable = ifp->if_capabilities;
 
@@ -922,10 +928,13 @@ qla_set_multi(qla_host_t *ha, uint32_t a
 
 	if_maddr_runlock(ifp);
 
-	if (QLA_LOCK(ha, __func__, 1) == 0) {
-		ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
-		QLA_UNLOCK(ha, __func__);
-	}
+	//if (QLA_LOCK(ha, __func__, 1) == 0) {
+	//	ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
+	//	QLA_UNLOCK(ha, __func__);
+	//}
+	QLA_LOCK(ha, __func__, 1);
+	ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
+	QLA_UNLOCK(ha, __func__);
 
 	return (ret);
 }
@@ -1130,64 +1139,10 @@ qla_media_status(struct ifnet *ifp, stru
 	return;
 }
 
-static void
-qla_start(struct ifnet *ifp)
-{
-	struct mbuf    *m_head;
-	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
-
-	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
-
-	if (!mtx_trylock(&ha->tx_lock)) {
-		QL_DPRINT8(ha, (ha->pci_dev,
-			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
-		return;
-	}
-
-	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 
-		IFF_DRV_RUNNING) {
-		QL_DPRINT8(ha,
-			(ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
-		QLA_TX_UNLOCK(ha);
-		return;
-	}
-
-	if (!ha->hw.link_up || !ha->watchdog_ticks)
-		ql_update_link_state(ha);
-
-	if (!ha->hw.link_up) {
-		QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__));
-		QLA_TX_UNLOCK(ha);
-		return;
-	}
-
-	while (ifp->if_snd.ifq_head != NULL) {
-		IF_DEQUEUE(&ifp->if_snd, m_head);
-
-		if (m_head == NULL) {
-			QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n",
-				__func__));
-			break;
-		}
-
-		if (qla_send(ha, &m_head)) {
-			if (m_head == NULL)
-				break;
-			QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__));
-			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
-			IF_PREPEND(&ifp->if_snd, m_head);
-			break;
-		}
-		/* Send a copy of the frame to the BPF listener */
-		ETHER_BPF_MTAP(ifp, m_head);
-	}
-	QLA_TX_UNLOCK(ha);
-	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
-	return;
-}
 
 static int
-qla_send(qla_host_t *ha, struct mbuf **m_headp)
+qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
+	uint32_t iscsi_pdu)
 {
 	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
 	bus_dmamap_t		map;
@@ -1195,29 +1150,9 @@ qla_send(qla_host_t *ha, struct mbuf **m
 	int			ret = -1;
 	uint32_t		tx_idx;
 	struct mbuf		*m_head = *m_headp;
-	uint32_t		txr_idx = ha->txr_idx;
-	uint32_t		iscsi_pdu = 0;
 
 	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
 
-	/* check if flowid is set */
-
-	if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) {
-#ifdef QL_ENABLE_ISCSI_TLV
-		if (qla_iscsi_pdu(ha, m_head) == 0) {
-			iscsi_pdu = 1;
-			txr_idx = m_head->m_pkthdr.flowid &
-					((ha->hw.num_tx_rings >> 1) - 1);
-		} else {
-			txr_idx = m_head->m_pkthdr.flowid &
-					(ha->hw.num_tx_rings - 1);
-		}
-#else
-		txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
-#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
-	}
-
-
 	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
 	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
 
@@ -1295,16 +1230,302 @@ qla_send(qla_host_t *ha, struct mbuf **m
 	return (ret);
 }
 
+static int
+qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
+{
+        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
+                "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
+
+        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
+
+        fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
+                                   M_NOWAIT, &fp->tx_mtx);
+        if (fp->tx_br == NULL) {
+            QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
+                " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
+            return (-ENOMEM);
+        }
+        return 0;
+}
+
+static void
+qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
+{
+        struct mbuf *mp;
+        struct ifnet *ifp = ha->ifp;
+
+        if (mtx_initialized(&fp->tx_mtx)) {
+
+                if (fp->tx_br != NULL) {
+
+                        mtx_lock(&fp->tx_mtx);
+
+                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
+                                m_freem(mp);
+                        }
+
+                        mtx_unlock(&fp->tx_mtx);
+
+                        buf_ring_free(fp->tx_br, M_DEVBUF);
+                        fp->tx_br = NULL;
+                }
+                mtx_destroy(&fp->tx_mtx);
+        }
+        return;
+}
+
+static void
+qla_fp_taskqueue(void *context, int pending)
+{
+        qla_tx_fp_t *fp;
+        qla_host_t *ha;
+        struct ifnet *ifp;
+        struct mbuf  *mp;
+        int ret;
+	uint32_t txr_idx;
+	uint32_t iscsi_pdu = 0;
+	uint32_t rx_pkts_left;
+
+        fp = context;
+
+        if (fp == NULL)
+                return;
+
+        ha = (qla_host_t *)fp->ha;
+
+        ifp = ha->ifp;
+
+	txr_idx = fp->txr_idx;
+
+        mtx_lock(&fp->tx_mtx);
+
+        if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+                IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
+                mtx_unlock(&fp->tx_mtx);
+                goto qla_fp_taskqueue_exit;
+        }
+
+	rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
+
+#ifdef QL_ENABLE_ISCSI_TLV
+	ql_hw_tx_done_locked(ha, fp->txr_idx);
+	ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
+	txr_idx = txr_idx + (ha->hw.num_tx_rings >> 1);
+#else
+	ql_hw_tx_done_locked(ha, fp->txr_idx);
+#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
+
+        mp = drbr_peek(ifp, fp->tx_br);
+
+        while (mp != NULL) {
+
+		if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
+#ifdef QL_ENABLE_ISCSI_TLV
+			if (ql_iscsi_pdu(ha, mp) == 0) {
+				iscsi_pdu = 1;
+			}
+#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
+		}
+
+		ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
+
+                if (ret) {
+                        if (mp != NULL)
+                                drbr_putback(ifp, fp->tx_br, mp);
+                        else {
+                                drbr_advance(ifp, fp->tx_br);
+                        }
+
+                        mtx_unlock(&fp->tx_mtx);
+
+                        goto qla_fp_taskqueue_exit0;
+                } else {
+                        drbr_advance(ifp, fp->tx_br);
+                }
+
+                mp = drbr_peek(ifp, fp->tx_br);
+        }
+
+        mtx_unlock(&fp->tx_mtx);
+
+qla_fp_taskqueue_exit0:
+
+	if (rx_pkts_left || ((mp != NULL) && ret)) {
+		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
+	} else {
+		if (!ha->flags.stop_rcv) {
+			QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
+		}
+	}
+
+qla_fp_taskqueue_exit:
+
+        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
+        return;
+}
+
+static int
+qla_create_fp_taskqueues(qla_host_t *ha)
+{
+        int     i;
+        uint8_t tq_name[32];
+
+        for (i = 0; i < ha->hw.num_sds_rings; i++) {
+
+                qla_tx_fp_t *fp = &ha->tx_fp[i];
+
+                bzero(tq_name, sizeof (tq_name));
+                snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
+
+                TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
+
+                fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
+                                        taskqueue_thread_enqueue,
+                                        &fp->fp_taskqueue);
+
+                if (fp->fp_taskqueue == NULL)
+                        return (-1);
+
+                taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
+                        tq_name);
+
+                QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
+                        fp->fp_taskqueue));
+        }
+
+        return (0);
+}
+
+static void
+qla_destroy_fp_taskqueues(qla_host_t *ha)
+{
+        int     i;
+
+        for (i = 0; i < ha->hw.num_sds_rings; i++) {
+
+                qla_tx_fp_t *fp = &ha->tx_fp[i];
+
+                if (fp->fp_taskqueue != NULL) {
+                        taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
+                        taskqueue_free(fp->fp_taskqueue);
+                        fp->fp_taskqueue = NULL;
+                }
+        }
+        return;
+}
+
+static void
+qla_drain_fp_taskqueues(qla_host_t *ha)
+{
+        int     i;
+
+        for (i = 0; i < ha->hw.num_sds_rings; i++) {
+                qla_tx_fp_t *fp = &ha->tx_fp[i];
+
+                if (fp->fp_taskqueue != NULL) {
+                        taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
+                }
+        }
+        return;
+}
+
+static int
+qla_transmit(struct ifnet *ifp, struct mbuf  *mp)
+{
+	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
+        qla_tx_fp_t *fp;
+        int rss_id = 0;
+        int ret = 0;
+
+        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
+
+#if __FreeBSD_version >= 1100000
+        if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
+#else
+        if (mp->m_flags & M_FLOWID)
+#endif
+                rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
+                                        ha->hw.num_sds_rings;
+        fp = &ha->tx_fp[rss_id];
+
+        if (fp->tx_br == NULL) {
+                ret = EINVAL;
+                goto qla_transmit_exit;
+        }
+
+        if (mp != NULL) {
+                ret = drbr_enqueue(ifp, fp->tx_br, mp);
+        }
+
+        if (fp->fp_taskqueue != NULL)
+                taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
+
+        ret = 0;
+
+qla_transmit_exit:
+
+        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
+        return ret;
+}
+
+static void
+qla_qflush(struct ifnet *ifp)
+{
+        int                     i;
+        qla_tx_fp_t		*fp;
+        struct mbuf             *mp;
+        qla_host_t              *ha;
+
+        ha = (qla_host_t *)ifp->if_softc;
+
+        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
+
+        for (i = 0; i < ha->hw.num_sds_rings; i++) {
+
+                fp = &ha->tx_fp[i];
+
+                if (fp == NULL)
+                        continue;
+
+                if (fp->tx_br) {
+                        mtx_lock(&fp->tx_mtx);
+
+                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
+                                m_freem(mp);
+                        }
+                        mtx_unlock(&fp->tx_mtx);
+                }
+        }
+        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
+
+        return;
+}
+
+
 static void
 qla_stop(qla_host_t *ha)
 {
 	struct ifnet *ifp = ha->ifp;
 	device_t	dev;
+	int i = 0;
 
 	dev = ha->pci_dev;
 
 	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
-	QLA_TX_LOCK(ha); QLA_TX_UNLOCK(ha);
+
+        for (i = 0; i < ha->hw.num_sds_rings; i++) {
+        	qla_tx_fp_t *fp;
+
+		fp = &ha->tx_fp[i];
+
+                if (fp == NULL)
+                        continue;
+
+		if (fp->tx_br != NULL) {
+                        mtx_lock(&fp->tx_mtx);
+                        mtx_unlock(&fp->tx_mtx);
+		}
+	}
 
 	ha->flags.qla_watchdog_pause = 1;
 
@@ -1313,6 +1534,8 @@ qla_stop(qla_host_t *ha)
 
 	ha->flags.qla_interface_up = 0;
 
+	qla_drain_fp_taskqueues(ha);
+
 	ql_hw_stop_rcv(ha);
 
 	ql_del_hw_if(ha);
@@ -1653,25 +1876,6 @@ exit_ql_get_mbuf:
 	return (ret);
 }
 
-static void
-qla_tx_done(void *context, int pending)
-{
-	qla_host_t *ha = context;
-	struct ifnet   *ifp;
-
-	ifp = ha->ifp;
-
-	if (!ifp) 
-		return;
-
-	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
-		QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
-		return;
-	}
-	ql_hw_tx_done(ha);
-
-	qla_start(ha->ifp);
-}
 
 static void
 qla_get_peer(qla_host_t *ha)
@@ -1714,18 +1918,32 @@ qla_error_recovery(void *context, int pe
 	qla_host_t *ha = context;
 	uint32_t msecs_100 = 100;
 	struct ifnet *ifp = ha->ifp;
+	int i = 0;
 
         (void)QLA_LOCK(ha, __func__, 0);
 
 	if (ha->flags.qla_interface_up) {
 
-	ha->hw.imd_compl = 1;
-	qla_mdelay(__func__, 300);
+		ha->hw.imd_compl = 1;
+		qla_mdelay(__func__, 300);
 
-        ql_hw_stop_rcv(ha);
+		ql_hw_stop_rcv(ha);
 
-        ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
-		QLA_TX_LOCK(ha); QLA_TX_UNLOCK(ha);
+	        ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
+
+		for (i = 0; i < ha->hw.num_sds_rings; i++) {
+	        	qla_tx_fp_t *fp;
+
+			fp = &ha->tx_fp[i];
+
+			if (fp == NULL)
+				continue;
+
+			if (fp->tx_br != NULL) {
+				mtx_lock(&fp->tx_mtx);
+				mtx_unlock(&fp->tx_mtx);
+			}
+		}
 	}
 

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201702020020.v120KrDY068063>