Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 6 Jun 2011 22:26:02 +0000 (UTC)
From:      Navdeep Parhar <np@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org
Subject:   svn commit: r222794 - in stable/8/sys: conf dev/cxgbe dev/cxgbe/common modules/cxgbe/if_cxgbe
Message-ID:  <201106062226.p56MQ292051814@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: np
Date: Mon Jun  6 22:26:02 2011
New Revision: 222794
URL: http://svn.freebsd.org/changeset/base/222794

Log:
  MFC r222509, r222510, r222513, r222551, r222552, r222701, r222703,
  and some direct modifications to minimize diffs with head.
  
  r222509:
  L2 table code.  This is enough to get the T4's switch + L2 rewrite
  filters working.  (All other filters - switch without L2 info rewrite,
  steer, and drop - were already fully-functional).
  
  r222510:
  - Specialized ingress queues that take interrupts for other ingress
    queues.  Try to have a set of these per port when possible, fall back
    to sharing a common pool between all ports otherwise.
  - One control queue per port (used to be one per hardware channel).
  - t4_eth_rx now handles Ethernet rx only.
  - sysctls to display pidx/cidx for some queues.
  
  r222513:
  Update to firmware interface 1.3.10
  
  r222551:
  Firmware device log.
  
  r222552:
  Provide hit-count with rest of the information about a filter.
  
  r222701:
  Allow lazy fill up of freelists.
  
  r222703:
  Cause backpressure (instead of dropping frames) on congestion.

Added:
  stable/8/sys/dev/cxgbe/common/jhash.h
     - copied unchanged from r222509, head/sys/dev/cxgbe/common/jhash.h
  stable/8/sys/dev/cxgbe/t4_l2t.c
     - copied unchanged from r222509, head/sys/dev/cxgbe/t4_l2t.c
  stable/8/sys/dev/cxgbe/t4_l2t.h
     - copied unchanged from r222509, head/sys/dev/cxgbe/t4_l2t.h
Modified:
  stable/8/sys/conf/files
  stable/8/sys/dev/cxgbe/adapter.h
  stable/8/sys/dev/cxgbe/common/common.h
  stable/8/sys/dev/cxgbe/common/t4fw_interface.h
  stable/8/sys/dev/cxgbe/offload.h
  stable/8/sys/dev/cxgbe/osdep.h
  stable/8/sys/dev/cxgbe/t4_ioctl.h
  stable/8/sys/dev/cxgbe/t4_main.c
  stable/8/sys/dev/cxgbe/t4_sge.c
  stable/8/sys/modules/cxgbe/if_cxgbe/Makefile
Directory Properties:
  stable/8/sys/   (props changed)
  stable/8/sys/amd64/include/xen/   (props changed)
  stable/8/sys/cddl/contrib/opensolaris/   (props changed)
  stable/8/sys/contrib/dev/acpica/   (props changed)
  stable/8/sys/contrib/pf/   (props changed)

Modified: stable/8/sys/conf/files
==============================================================================
--- stable/8/sys/conf/files	Mon Jun  6 22:18:40 2011	(r222793)
+++ stable/8/sys/conf/files	Mon Jun  6 22:26:02 2011	(r222794)
@@ -826,6 +826,8 @@ dev/cxgbe/t4_main.c		optional cxgbe pci 
 	compile-with "${NORMAL_C} -I$S/dev/cxgbe"
 dev/cxgbe/t4_sge.c		optional cxgbe pci \
 	compile-with "${NORMAL_C} -I$S/dev/cxgbe"
+dev/cxgbe/t4_l2t.c		optional cxgbe pci \
+	compile-with "${NORMAL_C} -I$S/dev/cxgbe"
 dev/cxgbe/common/t4_hw.c	optional cxgbe pci \
 	compile-with "${NORMAL_C} -I$S/dev/cxgbe"
 dev/cy/cy.c			optional cy

Modified: stable/8/sys/dev/cxgbe/adapter.h
==============================================================================
--- stable/8/sys/dev/cxgbe/adapter.h	Mon Jun  6 22:18:40 2011	(r222793)
+++ stable/8/sys/dev/cxgbe/adapter.h	Mon Jun  6 22:26:02 2011	(r222794)
@@ -64,6 +64,16 @@ prefetch(void *x)
 #define prefetch(x)
 #endif
 
+#ifndef SYSCTL_ADD_UQUAD
+#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
+#define sysctl_handle_64 sysctl_handle_quad
+#define CTLTYPE_U64 CTLTYPE_QUAD
+#endif
+
+#if __FreeBSD_version >= 802507
+#define T4_DEVLOG 1
+#endif
+
 #ifdef __amd64__
 /* XXX: need systemwide bus_space_read_8/bus_space_write_8 */
 static __inline uint64_t
@@ -110,6 +120,9 @@ enum {
 	FW_IQ_QSIZE = 256,
 	FW_IQ_ESIZE = 64,	/* At least 64 mandated by the firmware spec */
 
+	INTR_IQ_QSIZE = 64,
+	INTR_IQ_ESIZE = 64,	/* Handles some CPLs too, do not reduce */
+
 	CTRL_EQ_QSIZE = 128,
 	CTRL_EQ_ESIZE = 64,
 
@@ -141,7 +154,7 @@ enum {
 	/* adapter flags */
 	FULL_INIT_DONE	= (1 << 0),
 	FW_OK		= (1 << 1),
-	INTR_FWD	= (1 << 2),
+	INTR_SHARED	= (1 << 2),	/* one set of intrq's for all ports */
 
 	CXGBE_BUSY	= (1 << 9),
 
@@ -384,17 +397,16 @@ struct sge_ctrlq {
 
 	/* stats for common events first */
 
-	uint64_t total_wrs;	/* # of work requests sent down this queue */
 
 	/* stats for not-that-common events */
 
 	uint32_t no_desc;	/* out of hardware descriptors */
-	uint32_t too_long;	/* WR longer than hardware max */
 } __aligned(CACHE_LINE_SIZE);
 
 struct sge {
 	uint16_t timer_val[SGE_NTIMERS];
 	uint8_t  counter_val[SGE_NCOUNTERS];
+	int fl_starve_threshold;
 
 	int nrxq;	/* total rx queues (all ports and the rest) */
 	int ntxq;	/* total tx queues (all ports and the rest) */
@@ -403,7 +415,7 @@ struct sge {
 
 	struct sge_iq fwq;	/* Firmware event queue */
 	struct sge_ctrlq *ctrlq;/* Control queues */
-	struct sge_iq *fiq;	/* Forwarded interrupt queues (INTR_FWD) */
+	struct sge_iq *intrq;	/* Interrupt queues */
 	struct sge_txq *txq;	/* NIC tx queues */
 	struct sge_rxq *rxq;	/* NIC rx queues */
 
@@ -445,6 +457,7 @@ struct adapter {
 	struct port_info *port[MAX_NPORTS];
 	uint8_t chan_map[NCHAN];
 
+	struct l2t_data *l2t;	/* L2 table */
 	struct tid_info tids;
 
 	int registered_device_map;
@@ -456,7 +469,9 @@ struct adapter {
 	struct t4_virt_res vres;
 
 	struct sysctl_ctx_list ctx; /* from first_port_up to last_port_down */
+	struct sysctl_oid *oid_fwq;
 	struct sysctl_oid *oid_ctrlq;
+	struct sysctl_oid *oid_intrq;
 
 	struct mtx sc_lock;
 	char lockname[16];
@@ -502,7 +517,10 @@ struct adapter {
 	rxq = &pi->adapter->sge.rxq[pi->first_rxq]; \
 	for (iter = 0; iter < pi->nrxq; ++iter, ++rxq)
 
-#define NFIQ(sc) ((sc)->intr_count > 1 ? (sc)->intr_count - 1 : 1)
+/* One for errors, one for firmware events */
+#define T4_EXTRA_INTR 2
+#define NINTRQ(sc) ((sc)->intr_count > T4_EXTRA_INTR ? \
+    (sc)->intr_count - T4_EXTRA_INTR : 1)
 
 static inline uint32_t
 t4_read_reg(struct adapter *sc, uint32_t reg)
@@ -599,12 +617,9 @@ int t4_teardown_adapter_queues(struct ad
 int t4_setup_eth_queues(struct port_info *);
 int t4_teardown_eth_queues(struct port_info *);
 void t4_intr_all(void *);
-void t4_intr_fwd(void *);
+void t4_intr(void *);
 void t4_intr_err(void *);
 void t4_intr_evt(void *);
-void t4_intr_data(void *);
-void t4_evt_rx(void *);
-void t4_eth_rx(void *);
 int t4_mgmt_tx(struct adapter *, struct mbuf *);
 int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
 void t4_update_fl_bufsize(struct ifnet *);

Modified: stable/8/sys/dev/cxgbe/common/common.h
==============================================================================
--- stable/8/sys/dev/cxgbe/common/common.h	Mon Jun  6 22:18:40 2011	(r222793)
+++ stable/8/sys/dev/cxgbe/common/common.h	Mon Jun  6 22:26:02 2011	(r222794)
@@ -54,7 +54,7 @@ enum {
 
 #define FW_VERSION_MAJOR 1
 #define FW_VERSION_MINOR 3
-#define FW_VERSION_MICRO 8
+#define FW_VERSION_MICRO 10
 
 struct port_stats {
 	u64 tx_octets;            /* total # of octets in good frames */

Copied: stable/8/sys/dev/cxgbe/common/jhash.h (from r222509, head/sys/dev/cxgbe/common/jhash.h)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/8/sys/dev/cxgbe/common/jhash.h	Mon Jun  6 22:26:02 2011	(r222794, copy of r222509, head/sys/dev/cxgbe/common/jhash.h)
@@ -0,0 +1,140 @@
+#ifndef _JHASH_H
+#define _JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose.  It has no warranty.
+ *
+ * $FreeBSD$
+ */
+
+/* NOTE: Arguments are modified. */
+#define __jhash_mix(a, b, c) \
+{ \
+  a -= b; a -= c; a ^= (c>>13); \
+  b -= c; b -= a; b ^= (a<<8); \
+  c -= a; c -= b; c ^= (b>>13); \
+  a -= b; a -= c; a ^= (c>>12);  \
+  b -= c; b -= a; b ^= (a<<16); \
+  c -= a; c -= b; c ^= (b>>5); \
+  a -= b; a -= c; a ^= (c>>3);  \
+  b -= c; b -= a; b ^= (a<<10); \
+  c -= a; c -= b; c ^= (b>>15); \
+}
+
+/* The golden ration: an arbitrary value */
+#define JHASH_GOLDEN_RATIO	0x9e3779b9
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes.  No alignment or length assumptions are made about
+ * the input key.
+ */
+static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
+	u32 a, b, c, len;
+	const u8 *k = key;
+
+	len = length;
+	a = b = JHASH_GOLDEN_RATIO;
+	c = initval;
+
+	while (len >= 12) {
+		a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
+		b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
+		c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
+
+		__jhash_mix(a,b,c);
+
+		k += 12;
+		len -= 12;
+	}
+
+	c += length;
+	switch (len) {
+	case 11: c += ((u32)k[10]<<24);
+	case 10: c += ((u32)k[9]<<16);
+	case 9 : c += ((u32)k[8]<<8);
+	case 8 : b += ((u32)k[7]<<24);
+	case 7 : b += ((u32)k[6]<<16);
+	case 6 : b += ((u32)k[5]<<8);
+	case 5 : b += k[4];
+	case 4 : a += ((u32)k[3]<<24);
+	case 3 : a += ((u32)k[2]<<16);
+	case 2 : a += ((u32)k[1]<<8);
+	case 1 : a += k[0];
+	};
+
+	__jhash_mix(a,b,c);
+
+	return c;
+}
+
+/* A special optimized version that handles 1 or more of u32s.
+ * The length parameter here is the number of u32s in the key.
+ */
+static inline u32 jhash2(u32 *k, u32 length, u32 initval)
+{
+	u32 a, b, c, len;
+
+	a = b = JHASH_GOLDEN_RATIO;
+	c = initval;
+	len = length;
+
+	while (len >= 3) {
+		a += k[0];
+		b += k[1];
+		c += k[2];
+		__jhash_mix(a, b, c);
+		k += 3; len -= 3;
+	}
+
+	c += length * 4;
+
+	switch (len) {
+	case 2 : b += k[1];
+	case 1 : a += k[0];
+	};
+
+	__jhash_mix(a,b,c);
+
+	return c;
+}
+
+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
+ *
+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
+ *       done at the end is not done here.
+ */
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+	a += JHASH_GOLDEN_RATIO;
+	b += JHASH_GOLDEN_RATIO;
+	c += initval;
+
+	__jhash_mix(a, b, c);
+
+	return c;
+}
+
+static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+	return jhash_3words(a, b, 0, initval);
+}
+
+static inline u32 jhash_1word(u32 a, u32 initval)
+{
+	return jhash_3words(a, 0, 0, initval);
+}
+
+#endif /* _JHASH_H */

Modified: stable/8/sys/dev/cxgbe/common/t4fw_interface.h
==============================================================================
--- stable/8/sys/dev/cxgbe/common/t4fw_interface.h	Mon Jun  6 22:18:40 2011	(r222793)
+++ stable/8/sys/dev/cxgbe/common/t4fw_interface.h	Mon Jun  6 22:26:02 2011	(r222794)
@@ -43,6 +43,7 @@ enum fw_retval {
 	FW_ENOMEM		= 12,	/* out of memory */
 	FW_EFAULT		= 14,	/* bad address; fw bad */
 	FW_EBUSY		= 16,	/* resource busy */
+	FW_EEXIST		= 17,	/* File exists */
 	FW_EINVAL		= 22,	/* invalid argument */
 	FW_ENOSYS		= 38,	/* functionality not implemented */
 	FW_EPROTO		= 71,	/* protocol error */
@@ -59,6 +60,8 @@ enum fw_retval {
 	FW_FCOE_NO_XCHG		= 136,	/* */
 	FW_SCSI_RSP_ERR		= 137,	/* */
 	FW_ERR_RDEV_IMPL_LOGO	= 138,	/* */
+	FW_SCSI_UNDER_FLOW_ERR  = 139,	/* */
+	FW_SCSI_OVER_FLOW_ERR   = 140,	/* */
 };
 
 /******************************************************************************
@@ -85,7 +88,8 @@ enum fw_wr_opcodes {
 	FW_RI_FR_NSMR_WR	= 0x19,
 	FW_RI_INV_LSTAG_WR	= 0x1a,
 	FW_RI_WR		= 0x0d,
-	FW_LASTC2E_WR		= 0x4a
+	FW_ISCSI_NODE_WR	= 0x4a,
+	FW_LASTC2E_WR		= 0x4b
 };
 
 /*
@@ -514,7 +518,7 @@ struct fw_eth_tx_pkts_wr {
 	__be32 r3;
 	__be16 plen;
 	__u8   npkt;
-	__u8   r4;
+	__u8   type;
 };
 
 struct fw_eq_flush_wr {
@@ -1465,6 +1469,65 @@ struct fw_ri_wr {
 #define G_FW_RI_WR_P2PTYPE(x)	\
     (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
 
+#ifdef FOISCSI
+struct fw_iscsi_node_wr {
+	__u8   opcode;
+	__u8   subop;
+	__u8   node_attr_to_compl;
+	__u8   len16;
+	__u8   status;
+	__u8   r2;
+	__be16 immd_len;
+	__be64 cookie;
+	__be32 node_id;
+	__be32 ctrl_handle;
+	__be32 io_handle;
+	__be32 r3;
+};
+
+#define S_FW_ISCSI_NODE_WR_NODE_ATTR	7
+#define M_FW_ISCSI_NODE_WR_NODE_ATTR	0x1
+#define V_FW_ISCSI_NODE_WR_NODE_ATTR(x)	((x) << S_FW_ISCSI_NODE_WR_NODE_ATTR)
+#define G_FW_ISCSI_NODE_WR_NODE_ATTR(x)	\
+    (((x) >> S_FW_ISCSI_NODE_WR_NODE_ATTR) & M_FW_ISCSI_NODE_WR_NODE_ATTR)
+#define F_FW_ISCSI_NODE_WR_NODE_ATTR	V_FW_ISCSI_NODE_WR_NODE_ATTR(1U)
+
+#define S_FW_ISCSI_NODE_WR_SESS_ATTR	6
+#define M_FW_ISCSI_NODE_WR_SESS_ATTR	0x1
+#define V_FW_ISCSI_NODE_WR_SESS_ATTR(x)	((x) << S_FW_ISCSI_NODE_WR_SESS_ATTR)
+#define G_FW_ISCSI_NODE_WR_SESS_ATTR(x)	\
+    (((x) >> S_FW_ISCSI_NODE_WR_SESS_ATTR) & M_FW_ISCSI_NODE_WR_SESS_ATTR)
+#define F_FW_ISCSI_NODE_WR_SESS_ATTR	V_FW_ISCSI_NODE_WR_SESS_ATTR(1U)
+
+#define S_FW_ISCSI_NODE_WR_CONN_ATTR	5
+#define M_FW_ISCSI_NODE_WR_CONN_ATTR	0x1
+#define V_FW_ISCSI_NODE_WR_CONN_ATTR(x)	((x) << S_FW_ISCSI_NODE_WR_CONN_ATTR)
+#define G_FW_ISCSI_NODE_WR_CONN_ATTR(x)	\
+    (((x) >> S_FW_ISCSI_NODE_WR_CONN_ATTR) & M_FW_ISCSI_NODE_WR_CONN_ATTR)
+#define F_FW_ISCSI_NODE_WR_CONN_ATTR	V_FW_ISCSI_NODE_WR_CONN_ATTR(1U)
+
+#define S_FW_ISCSI_NODE_WR_TGT_ATTR	4
+#define M_FW_ISCSI_NODE_WR_TGT_ATTR	0x1
+#define V_FW_ISCSI_NODE_WR_TGT_ATTR(x)	((x) << S_FW_ISCSI_NODE_WR_TGT_ATTR)
+#define G_FW_ISCSI_NODE_WR_TGT_ATTR(x)	\
+    (((x) >> S_FW_ISCSI_NODE_WR_TGT_ATTR) & M_FW_ISCSI_NODE_WR_TGT_ATTR)
+#define F_FW_ISCSI_NODE_WR_TGT_ATTR	V_FW_ISCSI_NODE_WR_TGT_ATTR(1U)
+
+#define S_FW_ISCSI_NODE_WR_NODE_TYPE	3
+#define M_FW_ISCSI_NODE_WR_NODE_TYPE	0x1
+#define V_FW_ISCSI_NODE_WR_NODE_TYPE(x)	((x) << S_FW_ISCSI_NODE_WR_NODE_TYPE)
+#define G_FW_ISCSI_NODE_WR_NODE_TYPE(x)	\
+    (((x) >> S_FW_ISCSI_NODE_WR_NODE_TYPE) & M_FW_ISCSI_NODE_WR_NODE_TYPE)
+#define F_FW_ISCSI_NODE_WR_NODE_TYPE	V_FW_ISCSI_NODE_WR_NODE_TYPE(1U)
+
+#define S_FW_ISCSI_NODE_WR_COMPL	0
+#define M_FW_ISCSI_NODE_WR_COMPL	0x1
+#define V_FW_ISCSI_NODE_WR_COMPL(x)	((x) << S_FW_ISCSI_NODE_WR_COMPL)
+#define G_FW_ISCSI_NODE_WR_COMPL(x)	\
+    (((x) >> S_FW_ISCSI_NODE_WR_COMPL) & M_FW_ISCSI_NODE_WR_COMPL)
+#define F_FW_ISCSI_NODE_WR_COMPL	V_FW_ISCSI_NODE_WR_COMPL(1U)
+
+#endif
 
 /******************************************************************************
  *  C O M M A N D s
@@ -1511,6 +1574,7 @@ enum fw_cmd_opcodes {
 	FW_RSS_VI_CONFIG_CMD           = 0x23,
 	FW_SCHED_CMD                   = 0x24,
 	FW_DEVLOG_CMD                  = 0x25,
+	FW_NETIF_CMD                   = 0x26,
 	FW_LASTC2E_CMD                 = 0x40,
 	FW_ERROR_CMD                   = 0x80,
 	FW_DEBUG_CMD                   = 0x81,
@@ -1941,6 +2005,8 @@ enum fw_caps_config_iscsi {
 	FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002,
 	FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004,
 	FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
+	FW_CAPS_CONFIG_ISCSI_INITIATOR_SSNOFLD = 0x00000010,
+	FW_CAPS_CONFIG_ISCSI_TARGET_SSNOFLD = 0x00000020,
 };
 
 enum fw_caps_config_fcoe {
@@ -3941,6 +4007,39 @@ enum fw_port_cap {
 	FW_PORT_CAP_TECHKX4		= 0x2000,
 };
 
+#define S_FW_PORT_AUXLINFO_MDI		3
+#define M_FW_PORT_AUXLINFO_MDI		0x3
+#define V_FW_PORT_AUXLINFO_MDI(x)	((x) << S_FW_PORT_AUXLINFO_MDI)
+#define G_FW_PORT_AUXLINFO_MDI(x) \
+    (((x) >> S_FW_PORT_AUXLINFO_MDI) & M_FW_PORT_AUXLINFO_MDI)
+
+#define S_FW_PORT_AUXLINFO_KX4		2
+#define M_FW_PORT_AUXLINFO_KX4		0x1
+#define V_FW_PORT_AUXLINFO_KX4(x)	((x) << S_FW_PORT_AUXLINFO_KX4)
+#define G_FW_PORT_AUXLINFO_KX4(x) \
+    (((x) >> S_FW_PORT_AUXLINFO_KX4) & M_FW_PORT_AUXLINFO_KX4)
+#define F_FW_PORT_AUXLINFO_KX4		V_FW_PORT_AUXLINFO_KX4(1U)
+
+#define S_FW_PORT_AUXLINFO_KR		1
+#define M_FW_PORT_AUXLINFO_KR		0x1
+#define V_FW_PORT_AUXLINFO_KR(x)	((x) << S_FW_PORT_AUXLINFO_KR)
+#define G_FW_PORT_AUXLINFO_KR(x) \
+    (((x) >> S_FW_PORT_AUXLINFO_KR) & M_FW_PORT_AUXLINFO_KR)
+#define F_FW_PORT_AUXLINFO_KR		V_FW_PORT_AUXLINFO_KR(1U)
+
+#define S_FW_PORT_AUXLINFO_FEC		0
+#define M_FW_PORT_AUXLINFO_FEC		0x1
+#define V_FW_PORT_AUXLINFO_FEC(x)	((x) << S_FW_PORT_AUXLINFO_FEC)
+#define G_FW_PORT_AUXLINFO_FEC(x) \
+    (((x) >> S_FW_PORT_AUXLINFO_FEC) & M_FW_PORT_AUXLINFO_FEC) 
+#define F_FW_PORT_AUXLINFO_FEC		V_FW_PORT_AUXLINFO_FEC(1U)
+
+#define S_FW_PORT_RCAP_AUX	11
+#define M_FW_PORT_RCAP_AUX	0x7
+#define V_FW_PORT_RCAP_AUX(x)	((x) << S_FW_PORT_RCAP_AUX)
+#define G_FW_PORT_RCAP_AUX(x) \
+    (((x) >> S_FW_PORT_RCAP_AUX) & M_FW_PORT_RCAP_AUX)
+
 #define S_FW_PORT_CAP_SPEED	0
 #define M_FW_PORT_CAP_SPEED	0x3f
 #define V_FW_PORT_CAP_SPEED(x)	((x) << S_FW_PORT_CAP_SPEED)
@@ -4002,11 +4101,23 @@ enum fw_port_l2cfg_ctlbf {
 	FW_PORT_L2_CTLBF_MTU	= 0x40
 };
 
+enum fw_port_dcb_cfg {
+	FW_PORT_DCB_CFG_PG	= 0x01,
+	FW_PORT_DCB_CFG_PFC	= 0x02,
+	FW_PORT_DCB_CFG_APPL	= 0x04
+};
+
+enum fw_port_dcb_cfg_rc {
+	FW_PORT_DCB_CFG_SUCCESS	= 0x0,
+	FW_PORT_DCB_CFG_ERROR	= 0x1
+};
+
 enum fw_port_dcb_type {
 	FW_PORT_DCB_TYPE_PGID		= 0x00,
 	FW_PORT_DCB_TYPE_PGRATE		= 0x01,
 	FW_PORT_DCB_TYPE_PRIORATE	= 0x02,
-	FW_PORT_DCB_TYPE_PFC		= 0x03
+	FW_PORT_DCB_TYPE_PFC		= 0x03,
+	FW_PORT_DCB_TYPE_APP_ID		= 0x04,
 };
 
 struct fw_port_cmd {
@@ -4038,7 +4149,7 @@ struct fw_port_cmd {
 			__be16 acap;
 			__be16 mtu;
 			__u8   cbllen;
-			__u8   r7;
+			__u8   auxlinfo;
 			__be32 r8;
 			__be64 r9;
 		} info;
@@ -4068,6 +4179,14 @@ struct fw_port_cmd {
 				__be16 r10[3];
 				__be64 r11;
 			} pfc;
+			struct fw_port_app_priority {
+				__u8   type;
+				__u8   r10_lo[3];
+				__u8   prio;
+				__u8   sel;
+				__be16 protocolid;
+				__u8   r12[8];
+			} app_priority;
 		} dcb;
 	} u;
 };
@@ -5232,6 +5351,116 @@ struct fw_devlog_cmd {
     (((x) >> S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG) & \
      M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG)
 
+struct fw_netif_cmd {
+	__be32 op_portid;
+	__be32 retval_to_len16;
+	__be32 add_to_ipv4gw;
+	__be32 vlanid_mtuval;
+	__be32 gwaddr;
+	__be32 addr;
+	__be32 nmask;
+	__be32 bcaddr;
+};
+
+#define S_FW_NETIF_CMD_PORTID		0
+#define M_FW_NETIF_CMD_PORTID		0xf
+#define V_FW_NETIF_CMD_PORTID(x)	((x) << S_FW_NETIF_CMD_PORTID)
+#define G_FW_NETIF_CMD_PORTID(x)	\
+    (((x) >> S_FW_NETIF_CMD_PORTID) & M_FW_NETIF_CMD_PORTID)
+
+#define S_FW_NETIF_CMD_RETVAL		24
+#define M_FW_NETIF_CMD_RETVAL		0xff
+#define V_FW_NETIF_CMD_RETVAL(x)	((x) << S_FW_NETIF_CMD_RETVAL)
+#define G_FW_NETIF_CMD_RETVAL(x)	\
+    (((x) >> S_FW_NETIF_CMD_RETVAL) & M_FW_NETIF_CMD_RETVAL)
+
+#define S_FW_NETIF_CMD_IFIDX	16
+#define M_FW_NETIF_CMD_IFIDX	0xff
+#define V_FW_NETIF_CMD_IFIDX(x)	((x) << S_FW_NETIF_CMD_IFIDX)
+#define G_FW_NETIF_CMD_IFIDX(x)	\
+    (((x) >> S_FW_NETIF_CMD_IFIDX) & M_FW_NETIF_CMD_IFIDX)
+
+#define S_FW_NETIF_CMD_LEN16	0
+#define M_FW_NETIF_CMD_LEN16	0xff
+#define V_FW_NETIF_CMD_LEN16(x)	((x) << S_FW_NETIF_CMD_LEN16)
+#define G_FW_NETIF_CMD_LEN16(x)	\
+    (((x) >> S_FW_NETIF_CMD_LEN16) & M_FW_NETIF_CMD_LEN16)
+
+#define S_FW_NETIF_CMD_ADD	31
+#define M_FW_NETIF_CMD_ADD	0x1
+#define V_FW_NETIF_CMD_ADD(x)	((x) << S_FW_NETIF_CMD_ADD)
+#define G_FW_NETIF_CMD_ADD(x)	\
+    (((x) >> S_FW_NETIF_CMD_ADD) & M_FW_NETIF_CMD_ADD)
+#define F_FW_NETIF_CMD_ADD	V_FW_NETIF_CMD_ADD(1U)
+
+#define S_FW_NETIF_CMD_LINK	30
+#define M_FW_NETIF_CMD_LINK	0x1
+#define V_FW_NETIF_CMD_LINK(x)	((x) << S_FW_NETIF_CMD_LINK)
+#define G_FW_NETIF_CMD_LINK(x)	\
+    (((x) >> S_FW_NETIF_CMD_LINK) & M_FW_NETIF_CMD_LINK)
+#define F_FW_NETIF_CMD_LINK	V_FW_NETIF_CMD_LINK(1U)
+
+#define S_FW_NETIF_CMD_VLAN	29
+#define M_FW_NETIF_CMD_VLAN	0x1
+#define V_FW_NETIF_CMD_VLAN(x)	((x) << S_FW_NETIF_CMD_VLAN)
+#define G_FW_NETIF_CMD_VLAN(x)	\
+    (((x) >> S_FW_NETIF_CMD_VLAN) & M_FW_NETIF_CMD_VLAN)
+#define F_FW_NETIF_CMD_VLAN	V_FW_NETIF_CMD_VLAN(1U)
+
+#define S_FW_NETIF_CMD_MTU	28
+#define M_FW_NETIF_CMD_MTU	0x1
+#define V_FW_NETIF_CMD_MTU(x)	((x) << S_FW_NETIF_CMD_MTU)
+#define G_FW_NETIF_CMD_MTU(x)	\
+    (((x) >> S_FW_NETIF_CMD_MTU) & M_FW_NETIF_CMD_MTU)
+#define F_FW_NETIF_CMD_MTU	V_FW_NETIF_CMD_MTU(1U)
+
+#define S_FW_NETIF_CMD_DHCP	27
+#define M_FW_NETIF_CMD_DHCP	0x1
+#define V_FW_NETIF_CMD_DHCP(x)	((x) << S_FW_NETIF_CMD_DHCP)
+#define G_FW_NETIF_CMD_DHCP(x)	\
+    (((x) >> S_FW_NETIF_CMD_DHCP) & M_FW_NETIF_CMD_DHCP)
+#define F_FW_NETIF_CMD_DHCP	V_FW_NETIF_CMD_DHCP(1U)
+
+#define S_FW_NETIF_CMD_IPV4BCADDR	3
+#define M_FW_NETIF_CMD_IPV4BCADDR	0x1
+#define V_FW_NETIF_CMD_IPV4BCADDR(x)	((x) << S_FW_NETIF_CMD_IPV4BCADDR)
+#define G_FW_NETIF_CMD_IPV4BCADDR(x)	\
+    (((x) >> S_FW_NETIF_CMD_IPV4BCADDR) & M_FW_NETIF_CMD_IPV4BCADDR)
+#define F_FW_NETIF_CMD_IPV4BCADDR	V_FW_NETIF_CMD_IPV4BCADDR(1U)
+
+#define S_FW_NETIF_CMD_IPV4NMASK	2
+#define M_FW_NETIF_CMD_IPV4NMASK	0x1
+#define V_FW_NETIF_CMD_IPV4NMASK(x)	((x) << S_FW_NETIF_CMD_IPV4NMASK)
+#define G_FW_NETIF_CMD_IPV4NMASK(x)	\
+    (((x) >> S_FW_NETIF_CMD_IPV4NMASK) & M_FW_NETIF_CMD_IPV4NMASK)
+#define F_FW_NETIF_CMD_IPV4NMASK	V_FW_NETIF_CMD_IPV4NMASK(1U)
+
+#define S_FW_NETIF_CMD_IPV4ADDR		1
+#define M_FW_NETIF_CMD_IPV4ADDR		0x1
+#define V_FW_NETIF_CMD_IPV4ADDR(x)	((x) << S_FW_NETIF_CMD_IPV4ADDR)
+#define G_FW_NETIF_CMD_IPV4ADDR(x)	\
+    (((x) >> S_FW_NETIF_CMD_IPV4ADDR) & M_FW_NETIF_CMD_IPV4ADDR)
+#define F_FW_NETIF_CMD_IPV4ADDR	V_FW_NETIF_CMD_IPV4ADDR(1U)
+
+#define S_FW_NETIF_CMD_IPV4GW		0
+#define M_FW_NETIF_CMD_IPV4GW		0x1
+#define V_FW_NETIF_CMD_IPV4GW(x)	((x) << S_FW_NETIF_CMD_IPV4GW)
+#define G_FW_NETIF_CMD_IPV4GW(x)	\
+    (((x) >> S_FW_NETIF_CMD_IPV4GW) & M_FW_NETIF_CMD_IPV4GW)
+#define F_FW_NETIF_CMD_IPV4GW	V_FW_NETIF_CMD_IPV4GW(1U)
+
+#define S_FW_NETIF_CMD_VLANID		16
+#define M_FW_NETIF_CMD_VLANID		0xfff
+#define V_FW_NETIF_CMD_VLANID(x)	((x) << S_FW_NETIF_CMD_VLANID)
+#define G_FW_NETIF_CMD_VLANID(x)	\
+    (((x) >> S_FW_NETIF_CMD_VLANID) & M_FW_NETIF_CMD_VLANID)
+
+#define S_FW_NETIF_CMD_MTUVAL		0
+#define M_FW_NETIF_CMD_MTUVAL		0xffff
+#define V_FW_NETIF_CMD_MTUVAL(x)	((x) << S_FW_NETIF_CMD_MTUVAL)
+#define G_FW_NETIF_CMD_MTUVAL(x)	\
+    (((x) >> S_FW_NETIF_CMD_MTUVAL) & M_FW_NETIF_CMD_MTUVAL)
+
 enum fw_error_type {
 	FW_ERROR_TYPE_EXCEPTION		= 0x0,
 	FW_ERROR_TYPE_HWMODULE		= 0x1,

Modified: stable/8/sys/dev/cxgbe/offload.h
==============================================================================
--- stable/8/sys/dev/cxgbe/offload.h	Mon Jun  6 22:18:40 2011	(r222793)
+++ stable/8/sys/dev/cxgbe/offload.h	Mon Jun  6 22:26:02 2011	(r222794)
@@ -31,6 +31,24 @@
 #ifndef __T4_OFFLOAD_H__
 #define __T4_OFFLOAD_H__
 
+/* CPL message priority levels */
+enum {
+	CPL_PRIORITY_DATA     = 0,  /* data messages */
+	CPL_PRIORITY_SETUP    = 1,  /* connection setup messages */
+	CPL_PRIORITY_TEARDOWN = 0,  /* connection teardown messages */
+	CPL_PRIORITY_LISTEN   = 1,  /* listen start/stop messages */
+	CPL_PRIORITY_ACK      = 1,  /* RX ACK messages */
+	CPL_PRIORITY_CONTROL  = 1   /* control messages */
+};
+
+#define INIT_TP_WR(w, tid) do { \
+	(w)->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | \
+                              V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+	(w)->wr.wr_mid = htonl(V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+                               V_FW_WR_FLOWID(tid)); \
+	(w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
 /*
  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
  */

Modified: stable/8/sys/dev/cxgbe/osdep.h
==============================================================================
--- stable/8/sys/dev/cxgbe/osdep.h	Mon Jun  6 22:18:40 2011	(r222793)
+++ stable/8/sys/dev/cxgbe/osdep.h	Mon Jun  6 22:26:02 2011	(r222794)
@@ -82,6 +82,7 @@ typedef boolean_t bool;
 #define DIV_ROUND_UP(x, y) howmany(x, y)
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define container_of(p, s, f) ((s *)(((uint8_t *)(p)) - offsetof(s, f)))
 
 #define swab16(x) bswap16(x) 
 #define swab32(x) bswap32(x) 

Modified: stable/8/sys/dev/cxgbe/t4_ioctl.h
==============================================================================
--- stable/8/sys/dev/cxgbe/t4_ioctl.h	Mon Jun  6 22:18:40 2011	(r222793)
+++ stable/8/sys/dev/cxgbe/t4_ioctl.h	Mon Jun  6 22:26:02 2011	(r222794)
@@ -178,6 +178,8 @@ struct t4_filter_specification {
 
 struct t4_filter {
 	uint32_t idx;
+	uint16_t l2tidx;
+	uint16_t smtidx;
 	uint64_t hits;
 	struct t4_filter_specification fs;
 };

Copied: stable/8/sys/dev/cxgbe/t4_l2t.c (from r222509, head/sys/dev/cxgbe/t4_l2t.c)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/8/sys/dev/cxgbe/t4_l2t.c	Mon Jun  6 22:26:02 2011	(r222794, copy of r222509, head/sys/dev/cxgbe/t4_l2t.c)
@@ -0,0 +1,361 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_inet.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/rwlock.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <net/ethernet.h>
+#include <net/if_vlan_var.h>
+#include <net/if_dl.h>
+#include <net/if_llatbl.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <netinet/if_ether.h>
+
+#include "common/common.h"
+#include "common/jhash.h"
+#include "common/t4_msg.h"
+#include "offload.h"
+#include "t4_l2t.h"
+
+/* identifies sync vs async L2T_WRITE_REQs */
+#define S_SYNC_WR    12
+#define V_SYNC_WR(x) ((x) << S_SYNC_WR)
+#define F_SYNC_WR    V_SYNC_WR(1)
+
+enum {
+	L2T_STATE_VALID,	/* entry is up to date */
+	L2T_STATE_STALE,	/* entry may be used but needs revalidation */
+	L2T_STATE_RESOLVING,	/* entry needs address resolution */
+	L2T_STATE_SYNC_WRITE,	/* synchronous write of entry underway */
+
+	/* when state is one of the below the entry is not hashed */
+	L2T_STATE_SWITCHING,	/* entry is being used by a switching filter */
+	L2T_STATE_UNUSED	/* entry not in use */
+};
+
+struct l2t_data {
+	struct rwlock lock;
+	volatile int nfree;	/* number of free entries */
+	struct l2t_entry *rover;/* starting point for next allocation */
+	struct l2t_entry l2tab[L2T_SIZE];
+};
+
+/*
+ * Module locking notes:  There is a RW lock protecting the L2 table as a
+ * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
+ * under the protection of the table lock, individual entry changes happen
+ * while holding that entry's spinlock.  The table lock nests outside the
+ * entry locks.  Allocations of new entries take the table lock as writers so
+ * no other lookups can happen while allocating new entries.  Entry updates
+ * take the table lock as readers so multiple entries can be updated in
+ * parallel.  An L2T entry can be dropped by decrementing its reference count
+ * and therefore can happen in parallel with entry allocation but no entry
+ * can change state or increment its ref count during allocation as both of
+ * these perform lookups.
+ *
+ * Note: We do not take refereces to ifnets in this module because both
+ * the TOE and the sockets already hold references to the interfaces and the
+ * lifetime of an L2T entry is fully contained in the lifetime of the TOE.
+ */
+static inline unsigned int
+vlan_prio(const struct l2t_entry *e)
+{
+	return e->vlan >> 13;
+}
+
+static inline void
+l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
+	if (atomic_fetchadd_int(&e->refcnt, 1) == 0)  /* 0 -> 1 transition */
+		atomic_add_int(&d->nfree, -1);
+}
+
+/*
+ * To avoid having to check address families we do not allow v4 and v6
+ * neighbors to be on the same hash chain.  We keep v4 entries in the first
+ * half of available hash buckets and v6 in the second.
+ */
+enum {
+	L2T_SZ_HALF = L2T_SIZE / 2,
+	L2T_HASH_MASK = L2T_SZ_HALF - 1
+};
+
+static inline unsigned int
+arp_hash(const uint32_t *key, int ifindex)
+{
+	return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+}
+
+static inline unsigned int
+ipv6_hash(const uint32_t *key, int ifindex)
+{
+	uint32_t xor = key[0] ^ key[1] ^ key[2] ^ key[3];
+
+	return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+}
+
+static inline unsigned int
+addr_hash(const uint32_t *addr, int addr_len, int ifindex)
+{
+	return addr_len == 4 ? arp_hash(addr, ifindex) :
+			       ipv6_hash(addr, ifindex);
+}
+
+/*
+ * Checks if an L2T entry is for the given IP/IPv6 address.  It does not check
+ * whether the L2T entry and the address are of the same address family.
+ * Callers ensure an address is only checked against L2T entries of the same
+ * family, something made trivial by the separation of IP and IPv6 hash chains
+ * mentioned above.  Returns 0 if there's a match,
+ */
+static inline int
+addreq(const struct l2t_entry *e, const uint32_t *addr)
+{
+	if (e->v6)
+		return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
+		       (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
+	return e->addr[0] ^ addr[0];
+}
+
+/*
+ * Write an L2T entry.  Must be called with the entry locked (XXX: really?).
+ * The write may be synchronous or asynchronous.
+ */
+static int
+write_l2e(struct adapter *sc, struct l2t_entry *e, int sync)
+{
+	struct mbuf *m;
+	struct cpl_l2t_write_req *req;
+
+	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
+		return (ENOMEM);
+
+	req = mtod(m, struct cpl_l2t_write_req *);
+	m->m_pkthdr.len = m->m_len = sizeof(*req);
+
+	INIT_TP_WR(req, 0);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx |
+	    V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id)));
+	req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync));
+	req->l2t_idx = htons(e->idx);
+	req->vlan = htons(e->vlan);
+	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+
+	t4_mgmt_tx(sc, m);
+
+	if (sync && e->state != L2T_STATE_SWITCHING)
+		e->state = L2T_STATE_SYNC_WRITE;
+
+	return (0);
+}
+
+/*
+ * Add a packet to an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void
+arpq_enqueue(struct l2t_entry *e, struct mbuf *m)
+{
+	mtx_assert(&e->lock, MA_OWNED);
+
+	m->m_next = NULL;
+	if (e->arpq_head)
+		e->arpq_tail->m_next = m;
+	else
+		e->arpq_head = m;
+	e->arpq_tail = m;
+}
+
+/*
+ * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *
+alloc_l2e(struct l2t_data *d)
+{
+	struct l2t_entry *end, *e, **p;
+
+	rw_assert(&d->lock, RA_WLOCKED);
+
+	if (!atomic_load_acq_int(&d->nfree))
+		return (NULL);
+
+	/* there's definitely a free entry */
+	for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+		if (atomic_load_acq_int(&e->refcnt) == 0)
+			goto found;
+
+	for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e) ;
+found:
+	d->rover = e + 1;
+	atomic_add_int(&d->nfree, -1);
+
+	/*
+	 * The entry we found may be an inactive entry that is
+	 * presently in the hash table.  We need to remove it.
+	 */
+	if (e->state < L2T_STATE_SWITCHING) {
+		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
+			if (*p == e) {
+				*p = e->next;
+				e->next = NULL;
+				break;
+			}
+		}
+	}
+
+	e->state = L2T_STATE_UNUSED;
+	return e;
+}
+
+/*
+ * Called when an L2T entry has no more users.  The entry is left in the hash
+ * table since it is likely to be reused but we also bump nfree to indicate
+ * that the entry can be reallocated for a different neighbor.  We also drop
+ * the existing neighbor reference in case the neighbor is going away and is
+ * waiting on our reference.
+ *
+ * Because entries can be reallocated to other neighbors once their ref count
+ * drops to 0 we need to take the entry's lock to avoid races with a new
+ * incarnation.
+ */
+static void
+t4_l2e_free(struct l2t_entry *e)
+{
+	struct llentry *lle = NULL;
+	struct l2t_data *d;
+
+	mtx_lock(&e->lock);
+	if (atomic_load_acq_int(&e->refcnt) == 0) {  /* hasn't been recycled */
+		lle = e->lle;
+		e->lle = NULL;
+		/*
+		 * Don't need to worry about the arpq, an L2T entry can't be
+		 * released if any packets are waiting for resolution as we
+		 * need to be able to communicate with the device to close a
+		 * connection.
+		 */
+	}
+	mtx_unlock(&e->lock);
+
+	d = container_of(e, struct l2t_data, l2tab[e->idx]);
+	atomic_add_int(&d->nfree, 1);
+
+	if (lle)
+		LLE_FREE(lle);
+}
+
+void
+t4_l2t_release(struct l2t_entry *e)
+{
+	if (atomic_fetchadd_int(&e->refcnt, -1) == 1)
+		t4_l2e_free(e);
+}
+
+/*
+ * Allocate an L2T entry for use by a switching rule.  Such need to be
+ * explicitly freed and while busy they are not on any hash chain, so normal
+ * address resolution updates do not see them.
+ */
+struct l2t_entry *
+t4_l2t_alloc_switching(struct l2t_data *d)
+{
+	struct l2t_entry *e;
+
+	rw_rlock(&d->lock);
+	e = alloc_l2e(d);
+	if (e) {
+		mtx_lock(&e->lock);          /* avoid race with t4_l2t_free */
+		e->state = L2T_STATE_SWITCHING;
+		atomic_store_rel_int(&e->refcnt, 1);
+		mtx_unlock(&e->lock);
+	}
+	rw_runlock(&d->lock);
+	return e;
+}
+
+/*
+ * Sets/updates the contents of a switching L2T entry that has been allocated
+ * with an earlier call to @t4_l2t_alloc_switching.
+ */
+int
+t4_l2t_set_switching(struct adapter *sc, struct l2t_entry *e, uint16_t vlan,
+    uint8_t port, uint8_t *eth_addr)
+{
+	e->vlan = vlan;
+	e->lport = port;
+	memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
+	return write_l2e(sc, e, 0);
+}
+
+struct l2t_data *
+t4_init_l2t(int flags)
+{
+	int i;
+	struct l2t_data *d;
+
+	d = malloc(sizeof(*d), M_CXGBE, M_ZERO | flags);
+	if (!d)
+		return (NULL);
+
+	d->rover = d->l2tab;
+	atomic_store_rel_int(&d->nfree, L2T_SIZE);
+	rw_init(&d->lock, "L2T");
+
+	for (i = 0; i < L2T_SIZE; i++) {
+		d->l2tab[i].idx = i;
+		d->l2tab[i].state = L2T_STATE_UNUSED;
+		mtx_init(&d->l2tab[i].lock, "L2T_E", NULL, MTX_DEF);
+		atomic_store_rel_int(&d->l2tab[i].refcnt, 0);
+	}
+
+	return (d);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201106062226.p56MQ292051814>