Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 4 May 2016 20:06:20 +0000 (UTC)
From:      Jared McNeill <jmcneill@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r299084 - head/sys/arm/allwinner
Message-ID:  <201605042006.u44K6K7l008072@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jmcneill
Date: Wed May  4 20:06:20 2016
New Revision: 299084
URL: https://svnweb.freebsd.org/changeset/base/299084

Log:
  Add driver for Allwinner A83T/H3/A64 Gigabit Ethernet.
  
  The datasheets refer to this controller as EMAC, not to be confused with
  the fast ethernet controller (also named EMAC) found in A10/A20 SoCs.
  
  Tested on a BananaPi M3 (A83T), which uses an external RGMII PHY (RTL8211E).
  
  Reviewed by:		adrian
  Differential Revision:	https://reviews.freebsd.org/D6169

Added:
  head/sys/arm/allwinner/if_awg.c   (contents, props changed)
  head/sys/arm/allwinner/if_awgreg.h   (contents, props changed)
Modified:
  head/sys/arm/allwinner/files.allwinner

Modified: head/sys/arm/allwinner/files.allwinner
==============================================================================
--- head/sys/arm/allwinner/files.allwinner	Wed May  4 18:08:38 2016	(r299083)
+++ head/sys/arm/allwinner/files.allwinner	Wed May  4 20:06:20 2016	(r299084)
@@ -18,6 +18,7 @@ arm/allwinner/a20/a20_cpu_cfg.c 	standar
 arm/allwinner/allwinner_machdep.c	standard
 arm/allwinner/aw_mp.c			optional	smp
 arm/allwinner/axp209.c			optional	axp209
+arm/allwinner/if_awg.c			optional	awg
 arm/allwinner/if_emac.c			optional	emac
 arm/allwinner/sunxi_dma_if.m		standard
 dev/iicbus/twsi/a10_twsi.c		optional	twsi

Added: head/sys/arm/allwinner/if_awg.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/arm/allwinner/if_awg.c	Wed May  4 20:06:20 2016	(r299084)
@@ -0,0 +1,1418 @@
+/*-
+ * Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Allwinner Gigabit Ethernet MAC (EMAC) controller
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/module.h>
+#include <sys/taskqueue.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <arm/allwinner/if_awgreg.h>
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/hwreset/hwreset.h>
+#include <dev/extres/regulator/regulator.h>
+
+#include "miibus_if.h"
+
+#define	RD4(sc, reg)		bus_read_4((sc)->res[0], (reg))
+#define	WR4(sc, reg, val)	bus_write_4((sc)->res[0], (reg), (val))
+
+#define	AWG_LOCK(sc)		mtx_lock(&(sc)->mtx)
+#define	AWG_UNLOCK(sc)		mtx_unlock(&(sc)->mtx);
+#define	AWG_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->mtx, MA_OWNED)
+#define	AWG_ASSERT_UNLOCKED(sc)	mtx_assert(&(sc)->mtx, MA_NOTOWNED)
+
+#define	DESC_ALIGN		4
+#define	TX_DESC_COUNT		256
+#define	TX_DESC_SIZE		(sizeof(struct emac_desc) * TX_DESC_COUNT)
+#define	RX_DESC_COUNT		256
+#define	RX_DESC_SIZE		(sizeof(struct emac_desc) * RX_DESC_COUNT)
+
+#define	DESC_OFF(n)		((n) * sizeof(struct emac_desc))
+#define	TX_NEXT(n)		(((n) + 1) & (TX_DESC_COUNT - 1))
+#define	TX_SKIP(n, o)		(((n) + (o)) & (TX_DESC_COUNT - 1))
+#define	RX_NEXT(n)		(((n) + 1) & (RX_DESC_COUNT - 1))
+
+#define	TX_MAX_SEGS		10
+
+#define	SOFT_RST_RETRY		1000
+#define	MII_BUSY_RETRY		1000
+#define	MDIO_FREQ		2500000
+
+#define	BURST_LEN_DEFAULT	8
+#define	RX_TX_PRI_DEFAULT	0
+#define	PAUSE_TIME_DEFAULT	0x400
+#define	TX_INTERVAL_DEFAULT	64
+
+/* Burst length of RX and TX DMA transfers */
+static int awg_burst_len = BURST_LEN_DEFAULT;
+TUNABLE_INT("hw.awg.burst_len", &awg_burst_len);
+
+/* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */
+static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT;
+TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri);
+
+/* Pause time field in the transmitted control frame */
+static int awg_pause_time = PAUSE_TIME_DEFAULT;
+TUNABLE_INT("hw.awg.pause_time", &awg_pause_time);
+
+/* Request a TX interrupt every <n> descriptors */
+static int awg_tx_interval = TX_INTERVAL_DEFAULT;
+TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval);
+
+static struct ofw_compat_data compat_data[] = {
+	{ "allwinner,sun8i-a83t-emac",		1 },
+	{ NULL,					0 }
+};
+
+struct awg_bufmap {
+	bus_dmamap_t		map;
+	struct mbuf		*mbuf;
+};
+
+struct awg_txring {
+	bus_dma_tag_t		desc_tag;
+	bus_dmamap_t		desc_map;
+	struct emac_desc	*desc_ring;
+	bus_addr_t		desc_ring_paddr;
+	bus_dma_tag_t		buf_tag;
+	struct awg_bufmap	buf_map[TX_DESC_COUNT];
+	u_int			cur, next, queued;
+};
+
+struct awg_rxring {
+	bus_dma_tag_t		desc_tag;
+	bus_dmamap_t		desc_map;
+	struct emac_desc	*desc_ring;
+	bus_addr_t		desc_ring_paddr;
+	bus_dma_tag_t		buf_tag;
+	struct awg_bufmap	buf_map[RX_DESC_COUNT];
+	u_int			cur;
+};
+
+struct awg_softc {
+	struct resource		*res[2];
+	struct mtx		mtx;
+	if_t			ifp;
+	device_t		miibus;
+	struct callout		stat_ch;
+	struct task		link_task;
+	void			*ih;
+	u_int			mdc_div_ratio_m;
+	int			link;
+	int			if_flags;
+
+	struct awg_txring	tx;
+	struct awg_rxring	rx;
+};
+
+static struct resource_spec awg_spec[] = {
+	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
+	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
+	{ -1, 0 }
+};
+
+static int
+awg_miibus_readreg(device_t dev, int phy, int reg)
+{
+	struct awg_softc *sc;
+	int retry, val;
+
+	sc = device_get_softc(dev);
+	val = 0;
+
+	WR4(sc, EMAC_MII_CMD,
+	    (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) |
+	    (phy << PHY_ADDR_SHIFT) |
+	    (reg << PHY_REG_ADDR_SHIFT) |
+	    MII_BUSY);
+	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
+		if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) {
+			val = RD4(sc, EMAC_MII_DATA);
+			break;
+		}
+		DELAY(10);
+	}
+
+	if (retry == 0)
+		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
+		    phy, reg);
+
+	return (val);
+}
+
+static int
+awg_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+	struct awg_softc *sc;
+	int retry;
+
+	sc = device_get_softc(dev);
+
+	WR4(sc, EMAC_MII_DATA, val);
+	WR4(sc, EMAC_MII_CMD,
+	    (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) |
+	    (phy << PHY_ADDR_SHIFT) |
+	    (reg << PHY_REG_ADDR_SHIFT) |
+	    MII_WR | MII_BUSY);
+	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
+		if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0)
+			break;
+		DELAY(10);
+	}
+
+	if (retry == 0)
+		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
+		    phy, reg);
+
+	return (0);
+}
+
+static void
+awg_update_link_locked(struct awg_softc *sc)
+{
+	struct mii_data *mii;
+	uint32_t val;
+
+	AWG_ASSERT_LOCKED(sc);
+
+	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
+		return;
+	mii = device_get_softc(sc->miibus);
+
+	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
+	    (IFM_ACTIVE | IFM_AVALID)) {
+		switch (IFM_SUBTYPE(mii->mii_media_active)) {
+		case IFM_1000_T:
+		case IFM_1000_SX:
+		case IFM_100_TX:
+		case IFM_10_T:
+			sc->link = 1;
+			break;
+		default:
+			sc->link = 0;
+			break;
+		}
+	} else
+		sc->link = 0;
+
+	if (sc->link == 0)
+		return;
+
+	val = RD4(sc, EMAC_BASIC_CTL_0);
+	val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX);
+
+	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
+	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
+		val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT;
+	else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
+		val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT;
+	else
+		val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT;
+
+	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
+		val |= BASIC_CTL_DUPLEX;
+
+	WR4(sc, EMAC_BASIC_CTL_0, val);
+
+	val = RD4(sc, EMAC_RX_CTL_0);
+	val &= ~RX_FLOW_CTL_EN;
+	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
+		val |= RX_FLOW_CTL_EN;
+	WR4(sc, EMAC_RX_CTL_0, val);
+
+	val = RD4(sc, EMAC_TX_FLOW_CTL);
+	val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN);
+	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
+		val |= TX_FLOW_CTL_EN;
+	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
+		val |= awg_pause_time << PAUSE_TIME_SHIFT;
+	WR4(sc, EMAC_TX_FLOW_CTL, val);
+}
+
+static void
+awg_link_task(void *arg, int pending)
+{
+	struct awg_softc *sc;
+
+	sc = arg;
+
+	AWG_LOCK(sc);
+	awg_update_link_locked(sc);
+	AWG_UNLOCK(sc);
+}
+
+static void
+awg_miibus_statchg(device_t dev)
+{
+	struct awg_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	taskqueue_enqueue(taskqueue_swi, &sc->link_task);
+}
+
+static void
+awg_media_status(if_t ifp, struct ifmediareq *ifmr)
+{
+	struct awg_softc *sc;
+	struct mii_data *mii;
+
+	sc = if_getsoftc(ifp);
+	mii = device_get_softc(sc->miibus);
+
+	AWG_LOCK(sc);
+	mii_pollstat(mii);
+	ifmr->ifm_active = mii->mii_media_active;
+	ifmr->ifm_status = mii->mii_media_status;
+	AWG_UNLOCK(sc);
+}
+
+static int
+awg_media_change(if_t ifp)
+{
+	struct awg_softc *sc;
+	struct mii_data *mii;
+	int error;
+
+	sc = if_getsoftc(ifp);
+	mii = device_get_softc(sc->miibus);
+
+	AWG_LOCK(sc);
+	error = mii_mediachg(mii);
+	AWG_UNLOCK(sc);
+
+	return (error);
+}
+
+static void
+awg_setup_txdesc(struct awg_softc *sc, int index, int flags, bus_addr_t paddr,
+    u_int len)
+{
+	uint32_t status, size;
+
+	if (paddr == 0 || len == 0) {
+		status = 0;
+		size = 0;
+		--sc->tx.queued;
+	} else {
+		status = TX_DESC_CTL;
+		size = flags | len;
+		if ((index & (awg_tx_interval - 1)) == 0)
+			size |= htole32(TX_INT_CTL);
+		++sc->tx.queued;
+	}
+
+	sc->tx.desc_ring[index].addr = htole32((uint32_t)paddr);
+	sc->tx.desc_ring[index].size = htole32(size);
+	sc->tx.desc_ring[index].status = htole32(status);
+}
+
+static int
+awg_setup_txbuf(struct awg_softc *sc, int index, struct mbuf **mp)
+{
+	bus_dma_segment_t segs[TX_MAX_SEGS];
+	int error, nsegs, cur, i, flags;
+	u_int csum_flags;
+	struct mbuf *m;
+
+	m = *mp;
+	error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
+	    sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT);
+	if (error == EFBIG) {
+		m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
+		if (m == NULL)
+			return (0);
+		*mp = m;
+		error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
+		    sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT);
+	}
+	if (error != 0)
+		return (0);
+
+	bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[index].map,
+	    BUS_DMASYNC_PREWRITE);
+
+	flags = TX_FIR_DESC;
+	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
+		if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0)
+			csum_flags = TX_CHECKSUM_CTL_FULL;
+		else
+			csum_flags = TX_CHECKSUM_CTL_IP;
+		flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT);
+	}
+
+	for (cur = index, i = 0; i < nsegs; i++) {
+		sc->tx.buf_map[cur].mbuf = (i == 0 ? m : NULL);
+		if (i == nsegs - 1)
+			flags |= TX_LAST_DESC;
+		awg_setup_txdesc(sc, cur, flags, segs[i].ds_addr,
+		    segs[i].ds_len);
+		flags &= ~TX_FIR_DESC;
+		cur = TX_NEXT(cur);
+	}
+
+	return (nsegs);
+}
+
+static void
+awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr)
+{
+	uint32_t status, size;
+
+	status = RX_DESC_CTL;
+	size = MCLBYTES - 1;
+
+	sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr);
+	sc->rx.desc_ring[index].size = htole32(size);
+	sc->rx.desc_ring[index].next =
+	    htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(index)));
+	sc->rx.desc_ring[index].status = htole32(status);
+}
+
+static int
+awg_setup_rxbuf(struct awg_softc *sc, int index, struct mbuf *m)
+{
+	bus_dma_segment_t seg;
+	int error, nsegs;
+
+	m_adj(m, ETHER_ALIGN);
+
+	error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag,
+	    sc->rx.buf_map[index].map, m, &seg, &nsegs, 0);
+	if (error != 0)
+		return (error);
+
+	bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
+	    BUS_DMASYNC_PREREAD);
+
+	sc->rx.buf_map[index].mbuf = m;
+	awg_setup_rxdesc(sc, index, seg.ds_addr);
+
+	return (0);
+}
+
+static struct mbuf *
+awg_alloc_mbufcl(struct awg_softc *sc)
+{
+	struct mbuf *m;
+
+	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+	if (m != NULL)
+		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
+
+	return (m);
+}
+
+static void
+awg_start_locked(struct awg_softc *sc)
+{
+	struct mbuf *m;
+	uint32_t val;
+	if_t ifp;
+	int cnt, nsegs;
+
+	AWG_ASSERT_LOCKED(sc);
+
+	if (!sc->link)
+		return;
+
+	ifp = sc->ifp;
+
+	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
+	    IFF_DRV_RUNNING)
+		return;
+
+	for (cnt = 0; ; cnt++) {
+		if (sc->tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) {
+			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
+			break;
+		}
+
+		m = if_dequeue(ifp);
+		if (m == NULL)
+			break;
+
+		nsegs = awg_setup_txbuf(sc, sc->tx.cur, &m);
+		if (nsegs == 0) {
+			if_sendq_prepend(ifp, m);
+			break;
+		}
+		if_bpfmtap(ifp, m);
+		sc->tx.cur = TX_SKIP(sc->tx.cur, nsegs);
+	}
+
+	if (cnt != 0) {
+		bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
+		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+		/* Start and run TX DMA */
+		val = RD4(sc, EMAC_TX_CTL_1);
+		WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START);
+	}
+}
+
+static void
+awg_start(if_t ifp)
+{
+	struct awg_softc *sc;
+
+	sc = if_getsoftc(ifp);
+
+	AWG_LOCK(sc);
+	awg_start_locked(sc);
+	AWG_UNLOCK(sc);
+}
+
+static void
+awg_tick(void *softc)
+{
+	struct awg_softc *sc;
+	struct mii_data *mii;
+	if_t ifp;
+	int link;
+
+	sc = softc;
+	ifp = sc->ifp;
+	mii = device_get_softc(sc->miibus);
+
+	AWG_ASSERT_LOCKED(sc);
+
+	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
+		return;
+
+	link = sc->link;
+	mii_tick(mii);
+	if (sc->link && !link)
+		awg_start_locked(sc);
+
+	callout_reset(&sc->stat_ch, hz, awg_tick, sc);
+}
+
+/* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
+static uint32_t
+bitrev32(uint32_t x)
+{
+	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
+	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
+	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
+	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
+
+	return (x >> 16) | (x << 16);
+}
+
+static void
+awg_setup_rxfilter(struct awg_softc *sc)
+{
+	uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo;
+	int mc_count, mcnt, i;
+	uint8_t *eaddr, *mta;
+	if_t ifp;
+
+	AWG_ASSERT_LOCKED(sc);
+
+	ifp = sc->ifp;
+	val = 0;
+	hash[0] = hash[1] = 0;
+
+	mc_count = if_multiaddr_count(ifp, -1);
+
+	if (if_getflags(ifp) & IFF_PROMISC)
+		val |= DIS_ADDR_FILTER;
+	else if (if_getflags(ifp) & IFF_ALLMULTI) {
+		val |= RX_ALL_MULTICAST;
+		hash[0] = hash[1] = ~0;
+	} else if (mc_count > 0) {
+		val |= HASH_MULTICAST;
+
+		mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count,
+		    M_DEVBUF, M_NOWAIT);
+		if (mta == NULL) {
+			if_printf(ifp,
+			    "failed to allocate temporary multicast list\n");
+			return;
+		}
+
+		if_multiaddr_array(ifp, mta, &mcnt, mc_count);
+		for (i = 0; i < mcnt; i++) {
+			crc = ether_crc32_le(mta + (i * ETHER_ADDR_LEN),
+			    ETHER_ADDR_LEN) & 0x7f;
+			crc = bitrev32(~crc) >> 26;
+			hashreg = (crc >> 5);
+			hashbit = (crc & 0x1f);
+			hash[hashreg] |= (1 << hashbit);
+		}
+
+		free(mta, M_DEVBUF);
+	}
+
+	/* Write our unicast address */
+	eaddr = IF_LLADDR(ifp);
+	machi = (eaddr[5] << 8) | eaddr[4];
+	maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) |
+	   (eaddr[0] << 0);
+	WR4(sc, EMAC_ADDR_HIGH(0), machi);
+	WR4(sc, EMAC_ADDR_LOW(0), maclo);
+
+	/* Multicast hash filters */
+	WR4(sc, EMAC_RX_HASH_0, hash[1]);
+	WR4(sc, EMAC_RX_HASH_1, hash[0]);
+
+	/* RX frame filter config */
+	WR4(sc, EMAC_RX_FRM_FLT, val);
+}
+
+static void
+awg_init_locked(struct awg_softc *sc)
+{
+	struct mii_data *mii;
+	uint32_t val;
+	if_t ifp;
+
+	mii = device_get_softc(sc->miibus);
+	ifp = sc->ifp;
+
+	AWG_ASSERT_LOCKED(sc);
+
+	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
+		return;
+
+	awg_setup_rxfilter(sc);
+
+	/* Configure DMA burst length and priorities */
+	val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT;
+	if (awg_rx_tx_pri)
+		val |= BASIC_CTL_RX_TX_PRI;
+	WR4(sc, EMAC_BASIC_CTL_1, val);
+
+	/* Enable interrupts */
+	WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN);
+
+	/* Enable transmit DMA */
+	val = RD4(sc, EMAC_TX_CTL_1);
+	WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD);
+
+	/* Enable receive DMA */
+	val = RD4(sc, EMAC_RX_CTL_1);
+	WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD);
+
+	/* Enable transmitter */
+	val = RD4(sc, EMAC_TX_CTL_0);
+	WR4(sc, EMAC_TX_CTL_0, val | TX_EN);
+
+	/* Enable receiver */
+	val = RD4(sc, EMAC_RX_CTL_0);
+	WR4(sc, EMAC_RX_CTL_0, val | RX_EN | CHECK_CRC);
+
+	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
+
+	mii_mediachg(mii);
+	callout_reset(&sc->stat_ch, hz, awg_tick, sc);
+}
+
+static void
+awg_init(void *softc)
+{
+	struct awg_softc *sc;
+
+	sc = softc;
+
+	AWG_LOCK(sc);
+	awg_init_locked(sc);
+	AWG_UNLOCK(sc);
+}
+
+static void
+awg_stop(struct awg_softc *sc)
+{
+	if_t ifp;
+	uint32_t val;
+
+	AWG_ASSERT_LOCKED(sc);
+
+	ifp = sc->ifp;
+
+	callout_stop(&sc->stat_ch);
+
+	/* Stop transmit DMA and flush data in the TX FIFO */
+	val = RD4(sc, EMAC_TX_CTL_1);
+	val &= ~TX_DMA_EN;
+	val |= FLUSH_TX_FIFO;
+	WR4(sc, EMAC_TX_CTL_1, val);
+
+	/* Disable transmitter */
+	val = RD4(sc, EMAC_TX_CTL_0);
+	WR4(sc, EMAC_TX_CTL_0, val & ~TX_EN);
+
+	/* Disable receiver */
+	val = RD4(sc, EMAC_RX_CTL_0);
+	WR4(sc, EMAC_RX_CTL_0, val & ~RX_EN);
+
+	/* Disable interrupts */
+	WR4(sc, EMAC_INT_EN, 0);
+
+	/* Disable transmit DMA */
+	val = RD4(sc, EMAC_TX_CTL_1);
+	WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN);
+
+	/* Disable receive DMA */
+	val = RD4(sc, EMAC_RX_CTL_1);
+	WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN);
+
+	sc->link = 0;
+
+	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+}
+
+static void
+awg_rxintr(struct awg_softc *sc)
+{
+	if_t ifp;
+	struct mbuf *m, *m0;
+	int error, index, len;
+	uint32_t status;
+
+	ifp = sc->ifp;
+
+	bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
+	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+	for (index = sc->rx.cur; ; index = RX_NEXT(index)) {
+		status = le32toh(sc->rx.desc_ring[index].status);
+		if ((status & RX_DESC_CTL) != 0)
+			break;
+
+		bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
+		    BUS_DMASYNC_POSTREAD);
+		bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map);
+
+		len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT;
+		if (len != 0) {
+			m = sc->rx.buf_map[index].mbuf;
+			m->m_pkthdr.rcvif = ifp;
+			m->m_pkthdr.len = len;
+			m->m_len = len;
+			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+
+			if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
+			    (status & RX_FRM_TYPE) != 0) {
+				m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+				if ((status & RX_HEADER_ERR) == 0)
+					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+				if ((status & RX_PAYLOAD_ERR) == 0) {
+					m->m_pkthdr.csum_flags |=
+					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+					m->m_pkthdr.csum_data = 0xffff;
+				}
+			}
+
+			AWG_UNLOCK(sc);
+			if_input(ifp, m);
+			AWG_LOCK(sc);
+		}
+
+		if ((m0 = awg_alloc_mbufcl(sc)) != NULL) {
+			error = awg_setup_rxbuf(sc, index, m0);
+			if (error != 0) {
+				/* XXX hole in RX ring */
+			}
+		} else
+			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+	}
+
+	if (index != sc->rx.cur) {
+		bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
+		    BUS_DMASYNC_PREWRITE);
+	}
+
+	sc->rx.cur = index;
+}
+
+static void
+awg_txintr(struct awg_softc *sc)
+{
+	struct awg_bufmap *bmap;
+	struct emac_desc *desc;
+	uint32_t status;
+	if_t ifp;
+	int i;
+
+	AWG_ASSERT_LOCKED(sc);
+
+	bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
+	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+	ifp = sc->ifp;
+	for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) {
+		desc = &sc->tx.desc_ring[i];
+		status = le32toh(desc->status);
+		if ((status & TX_DESC_CTL) != 0)
+			break;
+		bmap = &sc->tx.buf_map[i];
+		if (bmap->mbuf != NULL) {
+			bus_dmamap_sync(sc->tx.buf_tag, bmap->map,
+			    BUS_DMASYNC_POSTWRITE);
+			bus_dmamap_unload(sc->tx.buf_tag, bmap->map);
+			m_freem(bmap->mbuf);
+			bmap->mbuf = NULL;
+		}
+		awg_setup_txdesc(sc, i, 0, 0, 0);
+		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
+		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+	}
+
+	sc->tx.next = i;
+
+	bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
+	    BUS_DMASYNC_PREWRITE);
+}
+
+static void
+awg_intr(void *arg)
+{
+	struct awg_softc *sc;
+	uint32_t val;
+
+	sc = arg;
+
+	AWG_LOCK(sc);
+	val = RD4(sc, EMAC_INT_STA);
+	WR4(sc, EMAC_INT_STA, val);
+
+	if (val & RX_INT)
+		awg_rxintr(sc);
+
+	if (val & (TX_INT|TX_BUF_UA_INT)) {
+		awg_txintr(sc);
+		if (!if_sendq_empty(sc->ifp))
+			awg_start_locked(sc);
+	}
+
+	AWG_UNLOCK(sc);
+}
+
+static int
+awg_ioctl(if_t ifp, u_long cmd, caddr_t data)
+{
+	struct awg_softc *sc;
+	struct mii_data *mii;
+	struct ifreq *ifr;
+	int flags, mask, error;
+
+	sc = if_getsoftc(ifp);
+	mii = device_get_softc(sc->miibus);
+	ifr = (struct ifreq *)data;
+	error = 0;
+
+	switch (cmd) {
+	case SIOCSIFFLAGS:
+		AWG_LOCK(sc);
+		if (if_getflags(ifp) & IFF_UP) {
+			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
+				flags = if_getflags(ifp) ^ sc->if_flags;
+				if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
+					awg_setup_rxfilter(sc);
+			} else
+				awg_init_locked(sc);
+		} else {
+			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
+				awg_stop(sc);
+		}
+		sc->if_flags = if_getflags(ifp);
+		AWG_UNLOCK(sc);
+		break;
+	case SIOCADDMULTI:
+	case SIOCDELMULTI:
+		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
+			AWG_LOCK(sc);
+			awg_setup_rxfilter(sc);
+			AWG_UNLOCK(sc);
+		}
+		break;
+	case SIOCSIFMEDIA:
+	case SIOCGIFMEDIA:
+		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
+		break;
+	case SIOCSIFCAP:
+		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
+		if (mask & IFCAP_VLAN_MTU)
+			if_togglecapenable(ifp, IFCAP_VLAN_MTU);
+		if (mask & IFCAP_RXCSUM)
+			if_togglecapenable(ifp, IFCAP_RXCSUM);
+		if (mask & IFCAP_TXCSUM)
+			if_togglecapenable(ifp, IFCAP_TXCSUM);
+		if ((if_getcapenable(ifp) & (IFCAP_RXCSUM|IFCAP_TXCSUM)) != 0)
+			if_sethwassistbits(ifp, CSUM_IP, 0);
+		else
+			if_sethwassistbits(ifp, 0, CSUM_IP);
+		break;
+	default:
+		error = ether_ioctl(ifp, cmd, data);
+		break;
+	}
+
+	return (error);
+}
+
+static int
+awg_setup_extres(device_t dev)
+{
+	struct awg_softc *sc;
+	hwreset_t rst_ahb;
+	clk_t clk_ahb, clk_tx, clk_tx_parent;
+	regulator_t reg;
+	const char *tx_parent_name;
+	char *phy_type;
+	phandle_t node;
+	uint64_t freq;
+	int error, div;
+
+	sc = device_get_softc(dev);
+	node = ofw_bus_get_node(dev);
+	rst_ahb = NULL;
+	clk_ahb = NULL;
+	clk_tx = NULL;
+	clk_tx_parent = NULL;
+	reg = NULL;
+	phy_type = NULL;
+
+	/* Get AHB clock and reset resources */
+	error = hwreset_get_by_ofw_name(dev, "ahb", &rst_ahb);
+	if (error != 0) {
+		device_printf(dev, "cannot get ahb reset\n");
+		goto fail;
+	}
+	error = clk_get_by_ofw_name(dev, "ahb", &clk_ahb);
+	if (error != 0) {
+		device_printf(dev, "cannot get ahb clock\n");
+		goto fail;
+	}
+	
+	/* Configure PHY for MII or RGMII mode */
+	if (OF_getprop_alloc(node, "phy-mode", 1, (void **)&phy_type)) {
+		if (bootverbose)
+			device_printf(dev, "PHY type: %s\n", phy_type);
+
+		if (strcmp(phy_type, "rgmii") == 0)
+			tx_parent_name = "emac_int_tx";
+		else
+			tx_parent_name = "mii_phy_tx";
+		free(phy_type, M_OFWPROP);
+
+		/* Get the TX clock */
+		error = clk_get_by_ofw_name(dev, "tx", &clk_tx);
+		if (error != 0) {
+			device_printf(dev, "cannot get tx clock\n");
+			goto fail;
+		}
+
+		/* Find the desired parent clock based on phy-mode property */
+		error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent);
+		if (error != 0) {
+			device_printf(dev, "cannot get clock '%s'\n",
+			    tx_parent_name);
+			goto fail;
+		}
+
+		/* Set TX clock parent */
+		error = clk_set_parent_by_clk(clk_tx, clk_tx_parent);
+		if (error != 0) {
+			device_printf(dev, "cannot set tx clock parent\n");
+			goto fail;
+		}
+
+		/* Enable TX clock */
+		error = clk_enable(clk_tx);
+		if (error != 0) {
+			device_printf(dev, "cannot enable tx clock\n");
+			goto fail;
+		}
+	}
+
+	/* Enable AHB clock */
+	error = clk_enable(clk_ahb);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201605042006.u44K6K7l008072>