Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 1 Jun 2015 17:43:35 +0000 (UTC)
From:      Jack F Vogel <jfv@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r283883 - in head/sys: dev/ixgbe modules/ixv
Message-ID:  <201506011743.t51HhZUP011379@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jfv
Date: Mon Jun  1 17:43:34 2015
New Revision: 283883
URL: https://svnweb.freebsd.org/changeset/base/283883

Log:
  Delta D2489 - Add SRIOV support to the Intel 10G driver.
  
  NOTE: This is a technology preview, while it has undergone
        development testing, Intel has not yet completed full
        validation of the feature. It is being integrated for
        early access and customer testing.

Modified:
  head/sys/dev/ixgbe/if_ix.c
  head/sys/dev/ixgbe/if_ixv.c
  head/sys/dev/ixgbe/ix_txrx.c
  head/sys/dev/ixgbe/ixgbe.h
  head/sys/dev/ixgbe/ixgbe_mbx.h
  head/sys/dev/ixgbe/ixgbe_vf.c
  head/sys/modules/ixv/Makefile

Modified: head/sys/dev/ixgbe/if_ix.c
==============================================================================
--- head/sys/dev/ixgbe/if_ix.c	Mon Jun  1 17:35:29 2015	(r283882)
+++ head/sys/dev/ixgbe/if_ix.c	Mon Jun  1 17:43:34 2015	(r283883)
@@ -54,7 +54,7 @@ int             ixgbe_display_debug_stat
 /*********************************************************************
  *  Driver version
  *********************************************************************/
-char ixgbe_driver_version[] = "2.8.3";
+char ixgbe_driver_version[] = "3.1.0";
 
 /*********************************************************************
  *  PCI Device ID Table
@@ -138,6 +138,7 @@ static int	ixgbe_setup_msix(struct adapt
 static void	ixgbe_free_pci_resources(struct adapter *);
 static void	ixgbe_local_timer(void *);
 static int	ixgbe_setup_interface(device_t, struct adapter *);
+static void	ixgbe_config_gpie(struct adapter *);
 static void	ixgbe_config_dmac(struct adapter *);
 static void	ixgbe_config_delay_values(struct adapter *);
 static void	ixgbe_config_link(struct adapter *);
@@ -204,6 +205,18 @@ static void	ixgbe_handle_phy(void *, int
 static void	ixgbe_reinit_fdir(void *, int);
 #endif
 
+#ifdef PCI_IOV
+static void	ixgbe_ping_all_vfs(struct adapter *);
+static void	ixgbe_handle_mbx(void *, int);
+static int	ixgbe_init_iov(device_t, u16, const nvlist_t *);
+static void	ixgbe_uninit_iov(device_t);
+static int	ixgbe_add_vf(device_t, u16, const nvlist_t *);
+static void	ixgbe_initialize_iov(struct adapter *);
+static void	ixgbe_recalculate_max_frame(struct adapter *);
+static void	ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
+#endif /* PCI_IOV */
+
+
 /*********************************************************************
  *  FreeBSD Device Interface Entry Points
  *********************************************************************/
@@ -216,6 +229,11 @@ static device_method_t ix_methods[] = {
 	DEVMETHOD(device_shutdown, ixgbe_shutdown),
 	DEVMETHOD(device_suspend, ixgbe_suspend),
 	DEVMETHOD(device_resume, ixgbe_resume),
+#ifdef PCI_IOV
+	DEVMETHOD(pci_init_iov, ixgbe_init_iov),
+	DEVMETHOD(pci_uninit_iov, ixgbe_uninit_iov),
+	DEVMETHOD(pci_add_vf, ixgbe_add_vf),
+#endif /* PCI_IOV */
 	DEVMETHOD_END
 };
 
@@ -341,6 +359,8 @@ static int fdir_pballoc = 1;
 #include <dev/netmap/ixgbe_netmap.h>
 #endif /* DEV_NETMAP */
 
+static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
+
 /*********************************************************************
  *  Device identification routine
  *
@@ -472,7 +492,7 @@ ixgbe_attach(device_t dev)
 	}
 
 	/* Allocate multicast array memory. */
-	adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
+	adapter->mta = malloc(sizeof(*adapter->mta) *
 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
 	if (adapter->mta == NULL) {
 		device_printf(dev, "Can not allocate multicast setup array\n");
@@ -558,6 +578,28 @@ ixgbe_attach(device_t dev)
 	/* Set an initial default flow control value */
 	adapter->fc = ixgbe_fc_full;
 
+#ifdef PCI_IOV
+	if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
+		nvlist_t *pf_schema, *vf_schema;
+
+		hw->mbx.ops.init_params(hw);
+		pf_schema = pci_iov_schema_alloc_node();
+		vf_schema = pci_iov_schema_alloc_node();
+		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
+		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
+		    IOV_SCHEMA_HASDEFAULT, TRUE);
+		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
+		    IOV_SCHEMA_HASDEFAULT, FALSE);
+		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
+		    IOV_SCHEMA_HASDEFAULT, FALSE);
+		error = pci_iov_attach(dev, pf_schema, vf_schema);
+		if (error != 0) {
+			device_printf(dev,
+			    "Error %d setting up SR-IOV\n", error);
+		}
+	}
+#endif /* PCI_IOV */
+
 	/* Check for certain supported features */
 	ixgbe_check_wol_support(adapter);
 	ixgbe_check_eee_support(adapter);
@@ -614,6 +656,13 @@ ixgbe_detach(device_t dev)
 		return (EBUSY);
 	}
 
+#ifdef PCI_IOV
+	if (pci_iov_detach(dev) != 0) {
+		device_printf(dev, "SR-IOV in use; detach first.\n");
+		return (EBUSY);
+	}
+#endif /* PCI_IOV */
+
 	/* Stop the adapter */
 	IXGBE_CORE_LOCK(adapter);
 	ixgbe_setup_low_power_mode(adapter);
@@ -634,6 +683,9 @@ ixgbe_detach(device_t dev)
 		taskqueue_drain(adapter->tq, &adapter->link_task);
 		taskqueue_drain(adapter->tq, &adapter->mod_task);
 		taskqueue_drain(adapter->tq, &adapter->msf_task);
+#ifdef PCI_IOV
+		taskqueue_drain(adapter->tq, &adapter->mbx_task);
+#endif
 		taskqueue_drain(adapter->tq, &adapter->phy_task);
 #ifdef IXGBE_FDIR
 		taskqueue_drain(adapter->tq, &adapter->fdir_task);
@@ -810,6 +862,9 @@ ixgbe_ioctl(struct ifnet * ifp, u_long c
 			adapter->max_frame_size =
 				ifp->if_mtu + IXGBE_MTU_HDR;
 			ixgbe_init_locked(adapter);
+#ifdef PCI_IOV
+			ixgbe_recalculate_max_frame(adapter);
+#endif
 			IXGBE_CORE_UNLOCK(adapter);
 		}
 		break;
@@ -925,22 +980,36 @@ ixgbe_init_locked(struct adapter *adapte
 	struct ifnet   *ifp = adapter->ifp;
 	device_t 	dev = adapter->dev;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32		k, txdctl, mhadd, gpie;
+	struct tx_ring  *txr;
+	struct rx_ring  *rxr;
+	u32		txdctl, mhadd;
 	u32		rxdctl, rxctrl;
+#ifdef PCI_IOV
+	enum ixgbe_iov_mode mode;
+#endif
 
 	mtx_assert(&adapter->core_mtx, MA_OWNED);
 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
+
 	hw->adapter_stopped = FALSE;
 	ixgbe_stop_adapter(hw);
         callout_stop(&adapter->timer);
 
+#ifdef PCI_IOV
+	mode = ixgbe_get_iov_mode(adapter);
+	adapter->pool = ixgbe_max_vfs(mode);
+	/* Queue indices may change with IOV mode */
+	for (int i = 0; i < adapter->num_queues; i++) {
+		adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
+		adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
+	}
+#endif
         /* reprogram the RAR[0] in case user changed it. */
-        ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
 
 	/* Get the latest mac address, User can use a LAA */
-	bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
-	      IXGBE_ETH_LENGTH_OF_ADDRESS);
-	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
+	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
 	hw->addr_ctrl.rar_used_count = 1;
 
 	/* Set the various hardware offload abilities */
@@ -963,6 +1032,9 @@ ixgbe_init_locked(struct adapter *adapte
 	}
 
 	ixgbe_init_hw(hw);
+#ifdef PCI_IOV
+	ixgbe_initialize_iov(adapter);
+#endif
 	ixgbe_initialize_transmit_units(adapter);
 
 	/* Setup Multicast table */
@@ -972,14 +1044,10 @@ ixgbe_init_locked(struct adapter *adapte
 	** Determine the correct mbuf pool
 	** for doing jumbo frames
 	*/
-	if (adapter->max_frame_size <= 2048)
+	if (adapter->max_frame_size <= MCLBYTES)
 		adapter->rx_mbuf_sz = MCLBYTES;
-	else if (adapter->max_frame_size <= 4096)
-		adapter->rx_mbuf_sz = MJUMPAGESIZE;
-	else if (adapter->max_frame_size <= 9216)
-		adapter->rx_mbuf_sz = MJUM9BYTES;
 	else
-		adapter->rx_mbuf_sz = MJUM16BYTES;
+		adapter->rx_mbuf_sz = MJUMPAGESIZE;
 
 	/* Prepare receive descriptors and buffers */
 	if (ixgbe_setup_receive_structures(adapter)) {
@@ -991,31 +1059,8 @@ ixgbe_init_locked(struct adapter *adapte
 	/* Configure RX settings */
 	ixgbe_initialize_receive_units(adapter);
 
-	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
-
-	/* Enable Fan Failure Interrupt */
-	gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
-
-	/* Add for Module detection */
-	if (hw->mac.type == ixgbe_mac_82599EB)
-		gpie |= IXGBE_SDP2_GPIEN;
-
-	/*
-	 * Thermal Failure Detection (X540)
-	 * Link Detection (X552)
-	 */
-	if (hw->mac.type == ixgbe_mac_X540 ||
-	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
-	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
-		gpie |= IXGBE_SDP0_GPIEN_X540;
-
-	if (adapter->msix > 1) {
-		/* Enable Enhanced MSIX mode */
-		gpie |= IXGBE_GPIE_MSIX_MODE;
-		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
-		    IXGBE_GPIE_OCD;
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+	/* Enable SDP & MSIX interrupts based on adapter */
+	ixgbe_config_gpie(adapter);
 
 	/* Set MTU size */
 	if (ifp->if_mtu > ETHERMTU) {
@@ -1028,7 +1073,8 @@ ixgbe_init_locked(struct adapter *adapte
 	
 	/* Now enable all the queues */
 	for (int i = 0; i < adapter->num_queues; i++) {
-		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+		txr = &adapter->tx_rings[i];
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
 		txdctl |= IXGBE_TXDCTL_ENABLE;
 		/* Set WTHRESH to 8, burst writeback */
 		txdctl |= (8 << 16);
@@ -1040,11 +1086,12 @@ ixgbe_init_locked(struct adapter *adapte
 		 * Prefetching enables tx line rate even with 1 queue.
 		 */
 		txdctl |= (32 << 0) | (1 << 8);
-		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
 	}
 
-	for (int i = 0; i < adapter->num_queues; i++) {
-		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
+		rxr = &adapter->rx_rings[i];
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
 		if (hw->mac.type == ixgbe_mac_82598EB) {
 			/*
 			** PTHRESH = 21
@@ -1055,9 +1102,9 @@ ixgbe_init_locked(struct adapter *adapte
 			rxdctl |= 0x080420;
 		}
 		rxdctl |= IXGBE_RXDCTL_ENABLE;
-		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
-		for (k = 0; k < 10; k++) {
-			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
+		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
+		for (; j < 10; j++) {
+			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
 			    IXGBE_RXDCTL_ENABLE)
 				break;
 			else
@@ -1086,10 +1133,10 @@ ixgbe_init_locked(struct adapter *adapte
 			struct netmap_kring *kring = &na->rx_rings[i];
 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
 
-			IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
+			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
 		} else
 #endif /* DEV_NETMAP */
-		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
+		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
 	}
 
 	/* Enable Receive engine */
@@ -1128,9 +1175,9 @@ ixgbe_init_locked(struct adapter *adapte
 #endif
 
 	/*
-	** Check on any SFP devices that
-	** need to be kick-started
-	*/
+	 * Check on any SFP devices that
+	 * need to be kick-started
+	 */
 	if (hw->phy.type == ixgbe_phy_none) {
 		int err = hw->phy.ops.identify(hw);
 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
@@ -1144,8 +1191,7 @@ ixgbe_init_locked(struct adapter *adapte
 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
 
 	/* Configure Energy Efficient Ethernet for supported devices */
-	if (adapter->eee_support)
-		ixgbe_setup_eee(hw, adapter->eee_enabled);
+	ixgbe_setup_eee(hw, adapter->eee_enabled);
 
 	/* Config/Enable Link */
 	ixgbe_config_link(adapter);
@@ -1165,6 +1211,15 @@ ixgbe_init_locked(struct adapter *adapte
 	/* And now turn on interrupts */
 	ixgbe_enable_intr(adapter);
 
+#ifdef PCI_IOV
+	/* Enable the use of the MBX by the VF's */
+	{
+		u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+		reg |= IXGBE_CTRL_EXT_PFRSTD;
+		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
+	}
+#endif
+
 	/* Now inform the stack we're ready */
 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
 
@@ -1183,6 +1238,51 @@ ixgbe_init(void *arg)
 }
 
 static void
+ixgbe_config_gpie(struct adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 gpie;
+
+	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+
+	/* Fan Failure Interrupt */
+	if (hw->device_id == IXGBE_DEV_ID_82598AT)
+		gpie |= IXGBE_SDP1_GPIEN;
+
+	/*
+	 * Module detection (SDP2)
+	 * Media ready (SDP1)
+	 */
+	if (hw->mac.type == ixgbe_mac_82599EB) {
+		gpie |= IXGBE_SDP2_GPIEN;
+		if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
+			gpie |= IXGBE_SDP1_GPIEN;
+	}
+
+	/*
+	 * Thermal Failure Detection (X540)
+	 * Link Detection (X557)
+	 */
+	if (hw->mac.type == ixgbe_mac_X540 ||
+	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
+		gpie |= IXGBE_SDP0_GPIEN_X540;
+
+	if (adapter->msix > 1) {
+		/* Enable Enhanced MSIX mode */
+		gpie |= IXGBE_GPIE_MSIX_MODE;
+		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
+		    IXGBE_GPIE_OCD;
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+	return;
+}
+
+/*
+ * Requires adapter->max_frame_size to be set.
+ */
+static void
 ixgbe_config_delay_values(struct adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -1276,10 +1376,9 @@ ixgbe_handle_que(void *context, int pend
 	struct adapter  *adapter = que->adapter;
 	struct tx_ring  *txr = que->txr;
 	struct ifnet    *ifp = adapter->ifp;
-	bool		more;
 
 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-		more = ixgbe_rxeof(que);
+		ixgbe_rxeof(que);
 		IXGBE_TX_LOCK(txr);
 		ixgbe_txeof(txr);
 #ifndef IXGBE_LEGACY_TX
@@ -1341,8 +1440,8 @@ ixgbe_legacy_irq(void *arg)
 	IXGBE_TX_UNLOCK(txr);
 
 	/* Check for fan failure */
-	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
-	    (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
+	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
+	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
 		    "REPLACE IMMEDIATELY!!\n");
 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
@@ -1381,6 +1480,7 @@ ixgbe_msix_que(void *arg)
 	bool		more;
 	u32		newitr = 0;
 
+
 	/* Protect against spurious interrupts */
 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 		return;
@@ -1504,6 +1604,10 @@ ixgbe_msix_link(void *arg)
 			device_printf(adapter->dev, "System shutdown required!\n");
 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
 		}
+#ifdef PCI_IOV
+		if (reg_eicr & IXGBE_EICR_MAILBOX)
+			taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
+#endif
 	}
 
 	/* Pluggable optics-related interrupt */
@@ -1569,7 +1673,7 @@ ixgbe_media_status(struct ifnet * ifp, s
 	}
 
 	ifmr->ifm_status |= IFM_ACTIVE;
-	layer = ixgbe_get_supported_physical_layer(hw);
+	layer = adapter->phy_layer;
 
 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
@@ -1802,18 +1906,17 @@ ixgbe_set_promisc(struct adapter *adapte
 static void
 ixgbe_set_multi(struct adapter *adapter)
 {
-	u32	fctrl;
-	u8	*mta;
-	u8	*update_ptr;
-	struct	ifmultiaddr *ifma;
-	int	mcnt = 0;
-	struct ifnet   *ifp = adapter->ifp;
+	u32			fctrl;
+	u8			*update_ptr;
+	struct ifmultiaddr	*ifma;
+	struct ixgbe_mc_addr	*mta;
+	int			mcnt = 0;
+	struct ifnet		*ifp = adapter->ifp;
 
 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
 
 	mta = adapter->mta;
-	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
-	    MAX_NUM_MULTICAST_ADDRESSES);
+	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
 
 #if __FreeBSD_version < 800000
 	IF_ADDR_LOCK(ifp);
@@ -1826,8 +1929,8 @@ ixgbe_set_multi(struct adapter *adapter)
 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
 			break;
 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
-		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
-		    IXGBE_ETH_LENGTH_OF_ADDRESS);
+		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+		mta[mcnt].vmdq = adapter->pool;
 		mcnt++;
 	}
 #if __FreeBSD_version < 800000
@@ -1850,7 +1953,7 @@ ixgbe_set_multi(struct adapter *adapter)
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
 
 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
-		update_ptr = mta;
+		update_ptr = (u8 *)mta;
 		ixgbe_update_mc_addr_list(&adapter->hw,
 		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
 	}
@@ -1866,13 +1969,13 @@ ixgbe_set_multi(struct adapter *adapter)
 static u8 *
 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
 {
-	u8 *addr = *update_ptr;
-	u8 *newptr;
-	*vmdq = 0;
-
-	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
-	*update_ptr = newptr;
-	return addr;
+	struct ixgbe_mc_addr *mta;
+
+	mta = (struct ixgbe_mc_addr *)*update_ptr;
+	*vmdq = mta->vmdq;
+
+	*update_ptr = (u8*)(mta + 1);;
+	return (mta->addr);
 }
 
 
@@ -1954,6 +2057,7 @@ watchdog:
 	ixgbe_init_locked(adapter);
 }
 
+
 /*
 ** Note: this routine updates the OS on the link state
 **	the real check of the hardware only happens with
@@ -1977,6 +2081,9 @@ ixgbe_update_link_status(struct adapter 
 			/* Update DMA coalescing config */
 			ixgbe_config_dmac(adapter);
 			if_link_state_change(ifp, LINK_STATE_UP);
+#ifdef PCI_IOV
+			ixgbe_ping_all_vfs(adapter);
+#endif
 		}
 	} else { /* Link down */
 		if (adapter->link_active == TRUE) {
@@ -1984,6 +2091,9 @@ ixgbe_update_link_status(struct adapter 
 				device_printf(dev,"Link is Down\n");
 			if_link_state_change(ifp, LINK_STATE_DOWN);
 			adapter->link_active = FALSE;
+#ifdef PCI_IOV
+			ixgbe_ping_all_vfs(adapter);
+#endif
 		}
 	}
 
@@ -2083,7 +2193,7 @@ ixgbe_setup_optics(struct adapter *adapt
 	struct ixgbe_hw *hw = &adapter->hw;
 	int		layer;
 
-	layer = ixgbe_get_supported_physical_layer(hw);
+	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
 
 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
 		adapter->optics = IFM_10G_T;
@@ -2281,17 +2391,19 @@ ixgbe_allocate_msix(struct adapter *adap
 #endif
 		if (adapter->num_queues > 1)
 			bus_bind_intr(dev, que->res, cpu_id);
-
+#ifdef IXGBE_DEBUG
 #ifdef	RSS
 		device_printf(dev,
 		    "Bound RSS bucket %d to CPU %d\n",
 		    i, cpu_id);
 #else
-		if (bootverbose)
-			device_printf(dev,
-			    "Bound queue %d to cpu %d\n",
-			    i, cpu_id);
+		device_printf(dev,
+		    "Bound queue %d to cpu %d\n",
+		    i, cpu_id);
 #endif
+#endif /* IXGBE_DEBUG */
+
+
 #ifndef IXGBE_LEGACY_TX
 		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
 #endif
@@ -2337,6 +2449,9 @@ ixgbe_allocate_msix(struct adapter *adap
 	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
 	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
 	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
+#ifdef PCI_IOV
+	TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
+#endif
 	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
 #ifdef IXGBE_FDIR
 	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
@@ -2630,7 +2745,7 @@ ixgbe_add_media_types(struct adapter *ad
 	device_t dev = adapter->dev;
 	int layer;
 
-	layer = ixgbe_get_supported_physical_layer(hw);
+	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
 
 	/* Media types with matching FreeBSD media defines */
 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
@@ -2741,19 +2856,20 @@ ixgbe_initialize_transmit_units(struct a
 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
 		u64	tdba = txr->txdma.dma_paddr;
 		u32	txctrl = 0;
+		int	j = txr->me;
 
-		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
 		       (tdba & 0x00000000ffffffffULL));
-		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
 
 		/* Setup the HW Tx Head and Tail descriptor pointers */
-		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
 
 		/* Cache the tail address */
-		txr->tail = IXGBE_TDT(txr->me);
+		txr->tail = IXGBE_TDT(j);
 
 		/* Set the processing limit */
 		txr->process_limit = ixgbe_tx_process_limit;
@@ -2761,23 +2877,23 @@ ixgbe_initialize_transmit_units(struct a
 		/* Disable Head Writeback */
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
-			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
 			break;
 		case ixgbe_mac_82599EB:
 		case ixgbe_mac_X540:
 		default:
-			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
 			break;
                 }
 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
-			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
+			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
 			break;
 		case ixgbe_mac_82599EB:
 		case ixgbe_mac_X540:
 		default:
-			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
+			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
 			break;
 		}
 
@@ -2785,6 +2901,9 @@ ixgbe_initialize_transmit_units(struct a
 
 	if (hw->mac.type != ixgbe_mac_82598EB) {
 		u32 dmatxctl, rttdcs;
+#ifdef PCI_IOV
+		enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
+#endif
 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
 		dmatxctl |= IXGBE_DMATXCTL_TE;
 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
@@ -2792,7 +2911,11 @@ ixgbe_initialize_transmit_units(struct a
 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+#ifdef PCI_IOV
+		IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
+#else
 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+#endif
 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 	}
@@ -2804,17 +2927,14 @@ static void
 ixgbe_initialise_rss_mapping(struct adapter *adapter)
 {
 	struct ixgbe_hw	*hw = &adapter->hw;
-	uint32_t reta;
-	int i, j, queue_id, table_size;
-	int index_mult;
-	uint32_t rss_key[10];
-	uint32_t mrqc;
+	u32 reta = 0, mrqc, rss_key[10];
+	int queue_id, table_size, index_mult;
 #ifdef	RSS
-	uint32_t rss_hash_config;
+	u32 rss_hash_config;
+#endif
+#ifdef PCI_IOV
+	enum ixgbe_iov_mode mode;
 #endif
-
-	/* Setup RSS */
-	reta = 0;
 
 #ifdef	RSS
 	/* Fetch the configured RSS key */
@@ -2840,7 +2960,7 @@ ixgbe_initialise_rss_mapping(struct adap
 	}
 
 	/* Set up the redirection table */
-	for (i = 0, j = 0; i < table_size; i++, j++) {
+	for (int i = 0, j = 0; i < table_size; i++, j++) {
 		if (j == adapter->num_queues) j = 0;
 #ifdef	RSS
 		/*
@@ -2907,19 +3027,16 @@ ixgbe_initialise_rss_mapping(struct adap
 	mrqc = IXGBE_MRQC_RSSEN
 	     | IXGBE_MRQC_RSS_FIELD_IPV4
 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
-#if 0
-	     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
-#endif
 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
 	     | IXGBE_MRQC_RSS_FIELD_IPV6
 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
-#if 0
-	     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
-	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
-#endif
 	;
 #endif /* RSS */
+#ifdef PCI_IOV
+	mode = ixgbe_get_iov_mode(adapter);
+	mrqc |= ixgbe_get_mrqc(mode);
+#endif
 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 }
 
@@ -2978,16 +3095,17 @@ ixgbe_initialize_receive_units(struct ad
 
 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
 		u64 rdba = rxr->rxdma.dma_paddr;
+		int j = rxr->me;
 
 		/* Setup the Base and Length of the Rx Descriptor Ring */
-		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
 			       (rdba & 0x00000000ffffffffULL));
-		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
 
 		/* Set up the SRRCTL register */
-		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
 		srrctl |= bufsz;
@@ -3006,11 +3124,11 @@ ixgbe_initialize_receive_units(struct ad
 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
 		}
 
-		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
 
 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
-		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
 
 		/* Set the processing limit */
 		rxr->process_limit = ixgbe_rx_process_limit;
@@ -3126,9 +3244,9 @@ ixgbe_setup_vlan_hw_support(struct adapt
 		rxr = &adapter->rx_rings[i];
 		/* On 82599 the VLAN enable is per/queue in RXDCTL */
 		if (hw->mac.type != ixgbe_mac_82598EB) {
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
 			ctrl |= IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
 		}
 		rxr->vtag_strip = TRUE;
 	}
@@ -3178,6 +3296,9 @@ ixgbe_enable_intr(struct adapter *adapte
 #ifdef IXGBE_FDIR
 			mask |= IXGBE_EIMS_FLOW_DIR;
 #endif
+#ifdef PCI_IOV
+			mask |= IXGBE_EIMS_MAILBOX;
+#endif
 			break;
 		case ixgbe_mac_X540:
 			/* Detect if Thermal Sensor is enabled */
@@ -3201,6 +3322,9 @@ ixgbe_enable_intr(struct adapter *adapte
 #ifdef IXGBE_FDIR
 			mask |= IXGBE_EIMS_FLOW_DIR;
 #endif
+#ifdef PCI_IOV
+			mask |= IXGBE_EIMS_MAILBOX;
+#endif
 		/* falls through */
 		default:
 			break;
@@ -3214,6 +3338,9 @@ ixgbe_enable_intr(struct adapter *adapte
 		/* Don't autoclear Link */
 		mask &= ~IXGBE_EIMS_OTHER;
 		mask &= ~IXGBE_EIMS_LSC;
+#ifdef PCI_IOV
+		mask &= ~IXGBE_EIMS_MAILBOX;
+#endif
 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
 	}
 
@@ -3412,8 +3539,8 @@ ixgbe_set_ivar(struct adapter *adapter, 
 static void
 ixgbe_configure_ivars(struct adapter *adapter)
 {
-	struct  ix_queue *que = adapter->queues;
-	u32 newitr;
+	struct  ix_queue	*que = adapter->queues;
+	u32			newitr;
 
 	if (ixgbe_max_interrupt_rate > 0)
 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
@@ -3427,10 +3554,12 @@ ixgbe_configure_ivars(struct adapter *ad
 	}
 
         for (int i = 0; i < adapter->num_queues; i++, que++) {
+		struct rx_ring *rxr = &adapter->rx_rings[i];
+		struct tx_ring *txr = &adapter->tx_rings[i];
 		/* First the RX queue entry */
-                ixgbe_set_ivar(adapter, i, que->msix, 0);
+                ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
 		/* ... and the TX */
-		ixgbe_set_ivar(adapter, i, que->msix, 1);
+		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
 		/* Set an Initial EITR value */
                 IXGBE_WRITE_REG(&adapter->hw,
                     IXGBE_EITR(que->msix), newitr);
@@ -3444,7 +3573,8 @@ ixgbe_configure_ivars(struct adapter *ad
 ** ixgbe_sfp_probe - called in the local timer to
 ** determine if a port had optics inserted.
 */  
-static bool ixgbe_sfp_probe(struct adapter *adapter)
+static bool
+ixgbe_sfp_probe(struct adapter *adapter)
 {
 	struct ixgbe_hw	*hw = &adapter->hw;
 	device_t	dev = adapter->dev;
@@ -3504,6 +3634,7 @@ ixgbe_handle_mod(void *context, int pend
 		    "Unsupported SFP+ module type was detected.\n");
 		return;
 	}
+
 	err = hw->mac.ops.setup_sfp(hw);
 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 		device_printf(dev,
@@ -3626,9 +3757,7 @@ ixgbe_check_eee_support(struct adapter *
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 
-	adapter->eee_support = adapter->eee_enabled =
-	    (hw->device_id == IXGBE_DEV_ID_X550T ||
-	        hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
+	adapter->eee_enabled = !!(hw->mac.ops.setup_eee);
 }
 
 /*
@@ -4004,8 +4133,7 @@ ixgbe_add_device_sysctls(struct adapter 
 				ixgbe_sysctl_dmac, "I", "DMA Coalesce");
 
 	/* for X550T and X550EM backplane devices */
-	if (hw->device_id == IXGBE_DEV_ID_X550T ||
-	    hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
+	if (hw->mac.ops.setup_eee) {
 		struct sysctl_oid *eee_node;
 		struct sysctl_oid_list *eee_list;
 
@@ -4625,6 +4753,7 @@ static int
 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
 {
 	struct adapter *adapter = (struct adapter *) arg1;
+	struct ixgbe_hw *hw = &adapter->hw;
 	struct ifnet *ifp = adapter->ifp;
 	int new_eee_enabled, error = 0;
 
@@ -4635,7 +4764,7 @@ ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_A
 	if (new_eee_enabled == adapter->eee_enabled)
 		return (0);
 
-	if (new_eee_enabled > 0 && !adapter->eee_support)
+	if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
 		return (ENODEV);
 	else
 		adapter->eee_enabled = !!(new_eee_enabled);
@@ -4751,10 +4880,19 @@ ixgbe_enable_rx_drop(struct adapter *ada
         struct ixgbe_hw *hw = &adapter->hw;
 
 	for (int i = 0; i < adapter->num_queues; i++) {
-        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+		struct rx_ring *rxr = &adapter->rx_rings[i];
+        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
         	srrctl |= IXGBE_SRRCTL_DROP_EN;
-        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
+	}
+#ifdef PCI_IOV
+	/* enable drop for each vf */
+	for (int i = 0; i < adapter->num_vfs; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_QDE,
+		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
+		    IXGBE_QDE_ENABLE));
 	}
+#endif
 }
 
 static void
@@ -4763,10 +4901,18 @@ ixgbe_disable_rx_drop(struct adapter *ad
         struct ixgbe_hw *hw = &adapter->hw;
 
 	for (int i = 0; i < adapter->num_queues; i++) {
-        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+		struct rx_ring *rxr = &adapter->rx_rings[i];
+        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
-        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
+	}
+#ifdef PCI_IOV
+	/* disable drop for each vf */
+	for (int i = 0; i < adapter->num_vfs; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_QDE,
+		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
 	}
+#endif
 }
 
 static void
@@ -4793,4 +4939,722 @@ ixgbe_rearm_queues(struct adapter *adapt
 	}
 }
 
+#ifdef PCI_IOV
+
+/*
+** Support functions for SRIOV/VF management
+*/
+
+static void
+ixgbe_ping_all_vfs(struct adapter *adapter)
+{
+	struct ixgbe_vf *vf;
+
+	for (int i = 0; i < adapter->num_vfs; i++) {
+		vf = &adapter->vfs[i];
+		if (vf->flags & IXGBE_VF_ACTIVE)
+			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
+	}
+}
+
+
+static void
+ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
+    uint16_t tag)
+{
+	struct ixgbe_hw *hw;
+	uint32_t vmolr, vmvir;
+
+	hw = &adapter->hw;
+
+	vf->vlan_tag = tag;
+	
+	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
+
+	/* Do not receive packets that pass inexact filters. */
+	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
+
+	/* Disable Multicast Promicuous Mode. */
+	vmolr &= ~IXGBE_VMOLR_MPE;
+
+	/* Accept broadcasts. */
+	vmolr |= IXGBE_VMOLR_BAM;
+
+	if (tag == 0) {
+		/* Accept non-vlan tagged traffic. */
+		//vmolr |= IXGBE_VMOLR_AUPE;
+
+		/* Allow VM to tag outgoing traffic; no default tag. */
+		vmvir = 0;
+	} else {
+		/* Require vlan-tagged traffic. */
+		vmolr &= ~IXGBE_VMOLR_AUPE;
+
+		/* Tag all traffic with provided vlan tag. */
+		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
+	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
+}
+
+
+static boolean_t
+ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
+{
+
+	/*
+	 * Frame size compatibility between PF and VF is only a problem on
+	 * 82599-based cards.  X540 and later support any combination of jumbo
+	 * frames on PFs and VFs.
+	 */
+	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
+		return (TRUE);
+
+	switch (vf->api_ver) {
+	case IXGBE_API_VER_1_0:
+	case IXGBE_API_VER_UNKNOWN:
+		/*
+		 * On legacy (1.0 and older) VF versions, we don't support jumbo
+		 * frames on either the PF or the VF.
+		 */
+		if (adapter->max_frame_size > ETHER_MAX_LEN ||
+		    vf->max_frame_size > ETHER_MAX_LEN)
+		    return (FALSE);
+

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201506011743.t51HhZUP011379>