Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 30 Jun 2011 15:22:49 +0300
From:      Aleksandr Rybalko <ray@freebsd.org>
To:        freebsd-net@freebsd.org
Subject:   Ralink Ethernet MAC support patch
Message-ID:  <20110630152249.f369822f.ray@freebsd.org>

next in thread | raw e-mail | index | archive | help
This is a multi-part message in MIME format.

--Multipart=_Thu__30_Jun_2011_15_22_49_+0300_Nm_S4zP.J.=dEWOr
Content-Type: text/plain; charset=US-ASCII
Content-Transfer-Encoding: 7bit

Hi folks,

I have a patch that enable support of Ethernet MAC on most Ralink
system-on-chip.

I use it more than half year and it works well. But still have some 
problem, one of it: I still not found why driver stop receive frames
after ifconfig rt0 down/ifconfig rt0 up. Maybe somebody want to test
and/or help me with development.

Driver still don't support VLAN/PPPoE offload, but able to transmit at
88Mbps on 384MHz MIPS32 SoC.

Usage:
# Enable if_rt
device		rt

# Enable debug messages 
options		IF_RT_DEBUG
# Enable PHY support (untested because I don't seen such devices
# with PHYs attached to it yet, only attached to internal switch in
# RT305xF SoCs)
options		IF_RT_PHY_SUPPORT              opt_if_rt.h
# count of allocated dma ring buffers
options		IF_RT_RING_DATA_COUNT          opt_if_rt.h

I will glad to see any comments/feedback about it.

URL: http://my.ddteam.net/files/2011-06-30_if_rt.patch

-- 
Aleksandr Rybalko <ray@freebsd.org>

--Multipart=_Thu__30_Jun_2011_15_22_49_+0300_Nm_S4zP.J.=dEWOr
Content-Type: text/x-diff;
 name="if_rt.patch"
Content-Disposition: attachment;
 filename="if_rt.patch"
Content-Transfer-Encoding: 7bit

Index: sys/conf/options.mips
===================================================================
--- sys/conf/options.mips	(revision 223691)
+++ sys/conf/options.mips	(working copy)
@@ -70,3 +70,11 @@
 # Options that control the Atheros SoC peripherals
 #
 ARGE_DEBUG			opt_global.h
+
+#
+# Options that control the Ralink RT305xF Etherenet MAC.
+#
+IF_RT_DEBUG			opt_if_rt.h
+IF_RT_PHY_SUPPORT		opt_if_rt.h
+IF_RT_RING_DATA_COUNT		opt_if_rt.h
+
Index: sys/conf/files.mips
===================================================================
--- sys/conf/files.mips	(revision 223691)
+++ sys/conf/files.mips	(working copy)
@@ -106,4 +106,5 @@
 dev/hwpmc/hwpmc_mips.c		optional hwpmc
 dev/hwpmc/hwpmc_mips24k.c	optional hwpmc
 
+dev/rt/if_rt.c			optional 	rt
 dev/nvram2env/nvram2env.c	optional	nvram2env
Index: sys/dev/rt/if_rtvar.h
===================================================================
--- sys/dev/rt/if_rtvar.h	(revision 0)
+++ sys/dev/rt/if_rtvar.h	(revision 0)
@@ -0,0 +1,276 @@
+
+/*-
+ * Copyright (c) 2010-2011 Aleksandr Rybalko <ray@ddteam.net>
+ * Copyright (c) 2009-2010 Alexander Egorenkov <egorenar@gmail.com>
+ * Copyright (c) 2009 Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _RT_SOFTC_H_
+#define _RT_SOFTC_H_
+
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/kernel.h>
+#include <sys/socket.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/taskqueue.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+
+#include "opt_if_rt.h"
+
+#define RT_SOFTC_LOCK(sc)		mtx_lock(&(sc)->lock)
+#define RT_SOFTC_UNLOCK(sc)		mtx_unlock(&(sc)->lock)
+#define	RT_SOFTC_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->lock, MA_OWNED)
+
+#define RT_SOFTC_TX_RING_LOCK(ring)		mtx_lock(&(ring)->lock)
+#define RT_SOFTC_TX_RING_UNLOCK(ring)		mtx_unlock(&(ring)->lock)
+#define	RT_SOFTC_TX_RING_ASSERT_LOCKED(ring) mtx_assert(&(ring)->lock, MA_OWNED)
+
+#define RT_SOFTC_TX_RING_COUNT		4
+
+#ifndef IF_RT_RING_DATA_COUNT
+#define IF_RT_RING_DATA_COUNT	128
+#endif
+
+#define RT_SOFTC_RX_RING_DATA_COUNT	IF_RT_RING_DATA_COUNT
+
+#define RT_SOFTC_MAX_SCATTER		10
+
+#define RT_SOFTC_TX_RING_DATA_COUNT	(IF_RT_RING_DATA_COUNT/4)
+#define RT_SOFTC_TX_RING_DESC_COUNT				\
+	(RT_SOFTC_TX_RING_DATA_COUNT * RT_SOFTC_MAX_SCATTER)
+
+
+#define RT_TXDESC_SDL1_BURST		(1 << 15)
+#define RT_TXDESC_SDL1_LASTSEG		(1 << 14)
+#define RT_TXDESC_SDL0_DDONE		(1 << 15)
+#define RT_TXDESC_SDL0_LASTSEG		(1 << 14)
+struct rt_txdesc
+{
+	uint32_t sdp0;
+	uint16_t sdl1;
+	uint16_t sdl0;
+	uint32_t sdp1;
+	uint8_t vid;
+#define TXDSCR_INS_VLAN_TAG	0x80
+#define TXDSCR_VLAN_PRIO_MASK	0x70
+#define TXDSCR_VLAN_IDX_MASK	0x0f
+	uint8_t	pppoe;
+#define TXDSCR_USR_DEF_FLD	0x80
+#define TXDSCR_INS_PPPOE_HDR	0x10
+#define TXDSCR_PPPOE_SID_MASK	0x0f
+	uint8_t qn;
+#define TXDSCR_QUEUE_MASK	0x07
+	uint8_t	dst;
+#define TXDSCR_IP_CSUM_GEN	0x80
+#define TXDSCR_UDP_CSUM_GEN	0x40
+#define TXDSCR_TCP_CSUM_GEN	0x20
+#define TXDSCR_DST_PORT_MASK	0x07
+#define TXDSCR_DST_PORT_CPU	0x00
+#define TXDSCR_DST_PORT_GDMA1	0x01
+#define TXDSCR_DST_PORT_GDMA2	0x02
+#define TXDSCR_DST_PORT_PPE	0x06
+#define TXDSCR_DST_PORT_DISC	0x07
+} __packed;
+
+
+#define RT_RXDESC_SDL0_DDONE		(1 << 15)
+struct rt_rxdesc
+{
+	uint32_t sdp0;
+	uint16_t sdl1;
+	uint16_t sdl0;
+	uint32_t sdp1;
+	uint16_t foe;
+#define RXDSXR_FOE_ENTRY_VALID		0x40
+#define RXDSXR_FOE_ENTRY_MASK		0x3f
+	uint8_t ai;
+#define RXDSXR_AI_COU_REASON		0xff
+#define RXDSXR_AI_PARSER_RSLT_MASK	0xff
+	uint8_t src;
+#define RXDSXR_SRC_IPFVLD		0x80
+#define RXDSXR_SRC_L4FVLD		0x40
+#define RXDSXR_SRC_IP_CSUM_FAIL	0x20
+#define RXDSXR_SRC_L4_CSUM_FAIL	0x10
+#define RXDSXR_SRC_AIS			0x08
+#define RXDSXR_SRC_PORT_MASK		0x07
+} __packed;
+
+
+
+struct rt_softc_rx_data
+{
+	bus_dmamap_t dma_map;
+	struct mbuf *m;
+};
+
+struct rt_softc_rx_ring
+{
+	bus_dma_tag_t desc_dma_tag;
+	bus_dmamap_t desc_dma_map;
+	bus_addr_t desc_phys_addr;
+	struct rt_rxdesc *desc;
+	bus_dma_tag_t data_dma_tag;
+	bus_dmamap_t spare_dma_map;
+	struct rt_softc_rx_data data[RT_SOFTC_RX_RING_DATA_COUNT];
+	int cur;
+};
+
+struct rt_softc_tx_data
+{
+	bus_dmamap_t dma_map;
+	struct mbuf *m;
+};
+
+struct rt_softc_tx_ring
+{
+	struct mtx lock;
+	bus_dma_tag_t desc_dma_tag;
+	bus_dmamap_t desc_dma_map;
+	bus_addr_t desc_phys_addr;
+	struct rt_txdesc *desc;
+	int desc_queued;
+	int desc_cur;
+	int desc_next;
+	bus_dma_tag_t seg0_dma_tag;
+	bus_dmamap_t seg0_dma_map;
+	bus_addr_t seg0_phys_addr;
+	uint8_t *seg0;
+	bus_dma_tag_t data_dma_tag;
+	struct rt_softc_tx_data data[RT_SOFTC_TX_RING_DATA_COUNT];
+	int data_queued;
+	int data_cur;
+	int data_next;
+	int qid;
+};
+
+struct rt_softc
+{
+	device_t 	 dev;
+	struct mtx 	 lock;
+	uint32_t 	 flags;
+
+	int		 mem_rid;
+	struct resource	*mem;
+	int		 irq_rid;
+	struct resource *irq;
+	void		*irqh;
+
+	bus_space_tag_t	 bst;
+	bus_space_handle_t bsh;
+
+	struct ifnet	*ifp;
+	int 		 if_flags;
+	struct ifmedia	 rt_ifmedia;
+
+	uint32_t	 mac_rev;
+	uint8_t		 mac_addr[ETHER_ADDR_LEN];
+	device_t	 rt_miibus;
+
+	uint32_t	 intr_enable_mask;
+	uint32_t	 intr_disable_mask;
+	uint32_t	 intr_pending_mask;
+
+	struct task	 rx_done_task;
+	int		 rx_process_limit;
+	struct task	 tx_done_task;
+	struct task	 periodic_task;
+	struct callout	 periodic_ch;
+	unsigned long	 periodic_round;
+	struct taskqueue *taskqueue;
+
+	struct rt_softc_rx_ring rx_ring;
+	struct rt_softc_tx_ring tx_ring[RT_SOFTC_TX_RING_COUNT];
+	int		 tx_ring_mgtqid;
+
+	struct callout	 tx_watchdog_ch;
+	int		 tx_timer;
+
+	/* statistic counters */
+
+	unsigned long	 interrupts;
+	unsigned long	 tx_coherent_interrupts;
+	unsigned long	 rx_coherent_interrupts;
+	unsigned long	 rx_interrupts;
+	unsigned long	 rx_delay_interrupts;
+	unsigned long	 tx_interrupts[RT_SOFTC_TX_RING_COUNT];
+	unsigned long	 tx_delay_interrupts;
+	unsigned long	 tx_data_queue_full[RT_SOFTC_TX_RING_COUNT];
+	unsigned long	 tx_watchdog_timeouts;
+	unsigned long	 tx_defrag_packets;
+	unsigned long	 no_tx_desc_avail;
+	unsigned long	 rx_mbuf_alloc_errors;
+	unsigned long	 rx_mbuf_dmamap_errors;
+	unsigned long	 tx_queue_not_empty[2];
+
+	unsigned long	 rx_bytes;
+	unsigned long	 rx_packets;
+	unsigned long	 rx_crc_err;
+	unsigned long	 rx_phy_err;
+	unsigned long	 rx_dup_packets;
+	unsigned long	 rx_fifo_overflows;
+	unsigned long	 rx_short_err;
+	unsigned long	 rx_long_err;
+	unsigned long	 tx_bytes;
+	unsigned long	 tx_packets;
+	unsigned long	 tx_skip;
+	unsigned long	 tx_collision;
+
+	int		 phy_addr;
+
+#ifdef IF_RT_DEBUG
+	int		 debug;
+#endif
+};
+
+#ifdef IF_RT_DEBUG
+
+enum
+{
+	RT_DEBUG_RX = 0x00000001,
+	RT_DEBUG_TX = 0x00000002,
+	RT_DEBUG_INTR = 0x00000004,
+	RT_DEBUG_STATE = 0x00000008,
+	RT_DEBUG_STATS = 0x00000010,
+	RT_DEBUG_PERIODIC = 0x00000020,
+	RT_DEBUG_WATCHDOG = 0x00000040,
+	RT_DEBUG_ANY = 0xffffffff
+};
+
+#define RT_DPRINTF(sc, m, fmt, ...)		\
+	do { if ((sc)->debug & (m)) 		\
+	    device_printf(sc->dev, fmt, __VA_ARGS__); } while (0)
+#else
+#define RT_DPRINTF(sc, m, fmt, ...)
+#endif /* #ifdef IF_RT_DEBUG */
+
+#endif /* #ifndef _RT_SOFTC_H_ */

Property changes on: sys/dev/rt/if_rtvar.h
___________________________________________________________________
Added: svn:mime-type
   + text/plain
Added: svn:keywords
   + FreeBSD=%H
Added: svn:eol-style
   + native

Index: sys/dev/rt/if_rt.c
===================================================================
--- sys/dev/rt/if_rt.c	(revision 0)
+++ sys/dev/rt/if_rt.c	(revision 0)
@@ -0,0 +1,2625 @@
+/*-
+ * Copyright (c) 2011, Aleksandr Rybalko
+ * based on hard work 
+ * by Alexander Egorenkov <egorenar@gmail.com>
+ * and by Damien Bergamini <damien.bergamini@free.fr>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "if_rtvar.h"
+#include "if_rtreg.h"
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <net/bpf.h>
+
+#include <machine/bus.h>
+#include <machine/cache.h>
+#include <machine/cpufunc.h>
+#include <machine/resource.h>
+#include <vm/vm_param.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/pmap.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <mips/rt305x/rt305x_sysctlvar.h>
+#include <mips/rt305x/rt305xreg.h>
+
+#ifdef IF_RT_PHY_SUPPORT
+#include "miibus_if.h"
+#endif
+
+/*
+ * Defines and macros
+ */
+
+#define RT_MAX_AGG_SIZE			3840
+
+#define RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
+
+
+#define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
+#define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
+
+#define RT_TX_WATCHDOG_TIMEOUT		5
+
+#define RT_WCID_RESERVED		0xff
+#define RT_WCID_MCAST			0xf7
+
+/*
+ * Data structures and types
+ */
+
+/*
+ * Static function prototypes
+ */
+
+static int rt_probe(device_t dev);
+static int rt_attach(device_t dev);
+static int rt_detach(device_t dev);
+static int rt_shutdown(device_t dev);
+static int rt_suspend(device_t dev);
+static int rt_resume(device_t dev);
+static void rt_init_locked(void *priv);
+static void rt_init(void *priv);
+static void rt_stop_locked(void *priv);
+static void rt_stop(void *priv);
+static void rt_start(struct ifnet *ifp);
+static int rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
+static void rt_periodic(void *arg);
+static void rt_tx_watchdog(void *arg);
+static void rt_intr(void *arg);
+static void rt_tx_coherent_intr(struct rt_softc *sc);
+static void rt_rx_coherent_intr(struct rt_softc *sc);
+static void rt_rx_delay_intr(struct rt_softc *sc);
+static void rt_tx_delay_intr(struct rt_softc *sc);
+static void rt_rx_intr(struct rt_softc *sc);
+static void rt_tx_intr(struct rt_softc *sc, int qid);
+static void rt_rx_done_task(void *context, int pending);
+static void rt_tx_done_task(void *context, int pending);
+static void rt_periodic_task(void *context, int pending);
+static int rt_rx_eof(struct rt_softc *sc, int limit);
+static void rt_tx_eof(struct rt_softc *sc,
+	struct rt_softc_tx_ring *ring);
+static void rt_update_stats(struct rt_softc *sc);
+static void rt_watchdog(struct rt_softc *sc);
+static void rt_update_raw_counters(struct rt_softc *sc);
+static void rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
+static void rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
+static int rt_txrx_enable(struct rt_softc *sc);
+static int rt_alloc_rx_ring(struct rt_softc *sc,
+	struct rt_softc_rx_ring *ring);
+static void rt_reset_rx_ring(struct rt_softc *sc,
+	struct rt_softc_rx_ring *ring);
+static void rt_free_rx_ring(struct rt_softc *sc,
+	struct rt_softc_rx_ring *ring);
+static int rt_alloc_tx_ring(struct rt_softc *sc,
+	struct rt_softc_tx_ring *ring, int qid);
+static void rt_reset_tx_ring(struct rt_softc *sc,
+	struct rt_softc_tx_ring *ring);
+static void rt_free_tx_ring(struct rt_softc *sc,
+	struct rt_softc_tx_ring *ring);
+static void rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
+	int nseg, int error);
+static void rt_sysctl_attach(struct rt_softc *sc);
+#ifdef IF_RT_PHY_SUPPORT
+void		rt_miibus_statchg(device_t);
+static int	rt_miibus_readreg(device_t, int, int);
+static int	rt_miibus_writereg(device_t, int, int, int);
+#endif
+static int	rt_ifmedia_upd(struct ifnet *);
+static void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
+
+SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
+
+#ifdef IF_RT_DEBUG
+static int rt_debug = 0;
+SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RW, &rt_debug, 0, "RT debug level");
+TUNABLE_INT("hw.rt.debug", &rt_debug);
+#endif
+
+/*
+ * rt_probe
+ */
+static int
+rt_probe(device_t dev)
+{
+	device_set_desc(dev, "Ralink RT305XF onChip Ethernet MAC");
+	return 0;
+}
+
+/*
+ * macaddr_atoi - translate string MAC address to uint8_t array
+ */
+static int
+macaddr_atoi(const char *str, uint8_t *mac)
+{
+	int count, i;
+	unsigned int amac[6];	/* Aligned version */
+
+	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
+	    &amac[0], &amac[1], &amac[2], 
+	    &amac[3], &amac[4], &amac[5]);
+	if (count < 6) {
+		memset(mac, 0, 6);
+		return (1);
+	}
+
+	/* Copy aligned to result */
+	for (i = 0; i < count; i ++)
+		mac[i] = (amac[i] & 0xff);
+
+	return (0);
+}
+
+#ifdef USE_GENERATED_MAC_ADDRES
+static char *
+kernenv_next(char *cp)
+{
+
+	if (cp != NULL) {
+		while (*cp != 0)
+			cp++;
+		cp++;
+		if (*cp == 0)
+			cp = NULL;
+	}
+	return (cp);
+}
+
+/*
+ * generate_mac(uin8_t *mac) implemented MAC address generator.
+ *
+ * They use 'b','s','d' signature and 3 octets from CRC32 on kenv.
+ *
+ * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
+ *
+ * As result we have MAC address, that do not change between reboots, if we
+ * not change hints or bootloader info.
+ */
+static void
+generate_mac(uint8_t *mac)
+{
+	unsigned char *cp;
+	int i = 0;
+	uint32_t crc = 0xffffffff;
+
+	/* Generate CRC32 on kenv */
+	if (dynamic_kenv) {
+		for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
+			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
+		}
+
+	} else {
+		for (cp = kern_envp; cp != NULL; cp = kernenv_next(cp)) {
+			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
+		}
+	}
+	crc = ~crc;
+
+	mac[0] = 'b';
+	mac[1] = 's';
+	mac[2] = 'd';
+	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
+	mac[4] = (crc >> 8) & 0xff;
+	mac[5] = crc & 0xff;
+}
+#endif
+
+static int
+ether_request_mac(device_t dev, uint8_t *mac)
+{
+	char *var;
+
+	/*
+	 * "ethaddr" is passed via envp on RedBoot platforms
+	 * "kmac" is passed via argv on RouterBOOT platforms
+	 */
+#if defined(__U_BOOT__) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
+	if ((var = getenv("ethaddr")) != NULL ||
+	    (var = getenv("kmac")) != NULL ) {
+
+		if(!macaddr_atoi(var, mac)) {
+			printf("%s: use %s macaddr from KENV\n",
+			    device_get_nameunit(dev), var);
+			freeenv(var);
+			return (0);
+		}
+		freeenv(var);
+	}
+#endif
+
+	/*
+	 * Try from hints
+	 * hint.[dev].[unit].macaddr
+	 */
+	if (!resource_string_value(device_get_name(dev), device_get_unit(dev),
+	    "macaddr", (const char **)&var)) {
+
+		if(!macaddr_atoi(var, mac)) {
+			printf("%s: use %s macaddr from hints\n",
+			    device_get_nameunit(dev), var);
+			return (0);
+		}
+	}
+
+#ifdef USE_GENERATED_MAC_ADDRES
+	generate_mac(mac);
+
+	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
+	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+#else
+	/* Hardcoded */
+	mac[0] = 0x00;
+	mac[1] = 0x18;
+	mac[2] = 0xe7;
+	mac[3] = 0xd5;
+	mac[4] = 0x83;
+	mac[5] = 0x90;
+
+	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
+#endif
+
+	return (0);
+}
+
+/*
+ * rt_attach
+ */
+static int
+rt_attach(device_t dev)
+{
+	struct rt_softc *sc;
+	struct ifnet *ifp;
+	int error, i;
+
+	sc = device_get_softc(dev);
+
+	sc->dev = dev;
+
+	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
+	    MTX_DEF | MTX_RECURSE);
+
+	sc->mem_rid = 0;
+	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
+	    RF_ACTIVE);
+	if (sc->mem == NULL) {
+		device_printf(dev, "could not allocate memory resource\n");
+		error = ENXIO;
+		goto fail;
+	}
+
+	sc->bst = rman_get_bustag(sc->mem);
+	sc->bsh = rman_get_bushandle(sc->mem);
+
+	sc->irq_rid = 0;
+	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
+	    RF_ACTIVE);
+	if (sc->irq == NULL) {
+		device_printf(dev, "could not allocate interrupt resource\n");
+		error = ENXIO;
+		goto fail;
+	}
+
+#ifdef IF_RT_DEBUG
+	sc->debug = rt_debug;
+
+	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
+#endif
+
+	device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n", sc->mac_rev);
+
+	/* Reset hardware */
+	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
+
+	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG, 
+	    (
+	    GDM_ICS_EN | /* Enable IP Csum */
+	    GDM_TCS_EN | /* Enable TCP Csum */
+	    GDM_UCS_EN | /* Enable UDP Csum */
+	    GDM_STRPCRC | /* Strip CRC from packet */
+	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
+	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
+	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
+	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
+	    ));
+
+
+	/* allocate Tx and Rx rings */
+	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
+		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
+		if (error != 0) {
+			device_printf(dev, "could not allocate Tx ring #%d\n", 
+			    i);
+			goto fail;
+		}
+	}
+
+	sc->tx_ring_mgtqid = 5;
+
+	error = rt_alloc_rx_ring(sc, &sc->rx_ring);
+	if (error != 0) {
+		device_printf(dev, "could not allocate Rx ring\n");
+		goto fail;
+	}
+
+	callout_init(&sc->periodic_ch, 0);
+	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
+
+	ifp = sc->ifp = if_alloc(IFT_ETHER);
+	if (ifp == NULL) {
+		device_printf(dev, "could not if_alloc()\n");
+		error = ENOMEM;
+		goto fail;
+	}
+
+	ifp->if_softc = sc;
+	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
+	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+	ifp->if_init = rt_init;
+	ifp->if_ioctl = rt_ioctl;
+	ifp->if_start = rt_start;
+	ifp->if_mtu = ETHERMTU;
+#define RT_TX_QLEN	256
+
+	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
+	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
+	IFQ_SET_READY(&ifp->if_snd);
+
+#ifdef IF_RT_PHY_SUPPORT
+	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd, 
+	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
+	if (error != 0) {
+		device_printf(dev, "attaching PHYs failed\n");
+		error = ENXIO;
+		goto fail;
+	}
+#else
+	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
+	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
+	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
+
+#endif /* IF_RT_PHY_SUPPORT */
+
+	ether_request_mac(dev, sc->mac_addr);
+	ether_ifattach(ifp, sc->mac_addr);
+
+	/*
+	 * Tell the upper layer(s) we support long frames.
+	 */
+	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+	ifp->if_capabilities |= IFCAP_VLAN_MTU;
+	ifp->if_capenable |= IFCAP_VLAN_MTU;
+	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
+	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
+
+	/* init task queue */
+	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
+	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
+	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
+
+	sc->rx_process_limit = 100;
+
+	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
+	    taskqueue_thread_enqueue, &sc->taskqueue);
+
+	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
+	    device_get_nameunit(sc->dev));
+
+	rt_sysctl_attach(sc);
+
+	/* set up interrupt */
+	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
+	    NULL, rt_intr, sc, &sc->irqh);
+	if (error != 0) {
+		printf("%s: could not set up interrupt\n",
+			device_get_nameunit(dev));
+		goto fail;
+	}
+#ifdef IF_RT_DEBUG
+	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
+#endif
+
+	return 0;
+
+fail:
+
+	/* free Tx and Rx rings */
+	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
+		rt_free_tx_ring(sc, &sc->tx_ring[i]);
+
+	rt_free_rx_ring(sc, &sc->rx_ring);
+
+	mtx_destroy(&sc->lock);
+
+	if (sc->mem != NULL)
+		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
+
+	if (sc->irq != NULL)
+		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
+
+	return error;
+}
+
+/*
+ * Set media options.
+ */
+static int
+rt_ifmedia_upd(struct ifnet *ifp)
+{
+	struct rt_softc *sc;
+#ifdef IF_RT_PHY_SUPPORT
+	struct mii_data *mii;
+	int error = 0;
+
+	sc = ifp->if_softc;
+	RT_SOFTC_LOCK(sc);
+
+	mii = device_get_softc(sc->rt_miibus);
+	if (mii->mii_instance) {
+		struct mii_softc *miisc;
+		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
+				miisc = LIST_NEXT(miisc, mii_list))
+			mii_phy_reset(miisc);
+	}
+	if (mii)
+		error = mii_mediachg(mii);
+	RT_SOFTC_UNLOCK(sc);
+
+	return (error);
+
+#else /* !IF_RT_PHY_SUPPORT */
+
+	struct ifmedia *ifm;
+	struct ifmedia_entry *ife;
+
+	sc = ifp->if_softc;
+	ifm = &sc->rt_ifmedia;
+	ife = ifm->ifm_cur;
+
+	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+		return (EINVAL);
+
+	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
+		device_printf(sc->dev, 
+		    "AUTO is not supported for multiphy MAC");
+		return (EINVAL);
+	}
+
+	/*
+	 * Ignore everything
+	 */
+	return (0);
+#endif /* IF_RT_PHY_SUPPORT */
+}
+
+/*
+ * Report current media status.
+ */
+static void
+rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+#ifdef IF_RT_PHY_SUPPORT
+	struct rt_softc *sc;
+	struct mii_data *mii;
+
+	sc = ifp->if_softc;
+
+	RT_SOFTC_LOCK(sc);
+	mii = device_get_softc(sc->rt_miibus);
+	mii_pollstat(mii);
+	ifmr->ifm_active = mii->mii_media_active;
+	ifmr->ifm_status = mii->mii_media_status;
+	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
+	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+	RT_SOFTC_UNLOCK(sc);
+#else /* !IF_RT_PHY_SUPPORT */
+
+	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
+#endif /* IF_RT_PHY_SUPPORT */
+}
+
+
+/*
+ * rt_detach
+ */
+static int
+rt_detach(device_t dev)
+{
+	struct rt_softc *sc;
+	struct ifnet *ifp;
+	int i;
+
+	sc = device_get_softc(dev);
+	ifp = sc->ifp;
+
+	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
+
+	RT_SOFTC_LOCK(sc);
+
+	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+	callout_stop(&sc->periodic_ch);
+	callout_stop(&sc->tx_watchdog_ch);
+
+	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
+	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
+	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
+
+	/* free Tx and Rx rings */
+	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
+		rt_free_tx_ring(sc, &sc->tx_ring[i]);
+
+	rt_free_rx_ring(sc, &sc->rx_ring);
+
+	RT_SOFTC_UNLOCK(sc);
+
+#ifdef IF_RT_PHY_SUPPORT
+	if (sc->rt_miibus != NULL)
+		device_delete_child(dev, sc->rt_miibus);
+#endif
+
+	ether_ifdetach(ifp);
+	if_free(ifp);
+
+	taskqueue_free(sc->taskqueue);
+
+	mtx_destroy(&sc->lock);
+
+	bus_generic_detach(dev);
+	bus_teardown_intr(dev, sc->irq, sc->irqh);
+	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
+	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
+
+	return 0;
+}
+
+/*
+ * rt_shutdown
+ */
+static int
+rt_shutdown(device_t dev)
+{
+	struct rt_softc *sc;
+
+	sc = device_get_softc(dev);
+	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
+	rt_stop(sc);
+
+	return 0;
+}
+
+/*
+ * rt_suspend
+ */
+static int
+rt_suspend(device_t dev)
+{
+	struct rt_softc *sc;
+
+	sc = device_get_softc(dev);
+	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
+	rt_stop(sc);
+
+	return 0;
+}
+
+/*
+ * rt_resume
+ */
+static int
+rt_resume(device_t dev)
+{
+	struct rt_softc *sc;
+	struct ifnet *ifp;
+
+	sc = device_get_softc(dev);
+	ifp = sc->ifp;
+
+	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
+
+	if (ifp->if_flags & IFF_UP)
+		rt_init(sc);
+
+	return 0;
+}
+
+/*
+ * rt_init_locked
+ */
+static void
+rt_init_locked(void *priv)
+{
+	struct rt_softc *sc;
+	struct ifnet *ifp;
+#ifdef IF_RT_PHY_SUPPORT
+	struct mii_data *mii;
+#endif
+	int i, ntries;
+	uint32_t tmp;
+
+	sc = priv;
+	ifp = sc->ifp;
+#ifdef IF_RT_PHY_SUPPORT
+	mii = device_get_softc(sc->rt_miibus);
+#endif
+
+	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
+
+	RT_SOFTC_ASSERT_LOCKED(sc);
+
+	/* hardware reset */
+	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
+	rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
+
+	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
+	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG, 
+	    (
+	    GDM_ICS_EN | /* Enable IP Csum */
+	    GDM_TCS_EN | /* Enable TCP Csum */
+	    GDM_UCS_EN | /* Enable UDP Csum */
+	    GDM_STRPCRC | /* Strip CRC from packet */
+	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
+	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
+	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
+	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
+	    ));
+
+	/* disable DMA engine */
+	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, 0);
+
+	RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, 0xffffffff);
+
+
+	/* wait while DMA engine is busy */
+	for (ntries = 0; ntries < 100; ntries++) {
+		tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
+		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
+			break;
+
+		DELAY(1000);
+	}
+
+	if (ntries == 100) {
+		device_printf(sc->dev, "timeout waiting for DMA engine\n");
+		goto fail;
+	}
+
+
+	/* reset Rx and Tx rings */
+	tmp = FE_RST_DRX_IDX0 |
+		FE_RST_DTX_IDX3 |
+		FE_RST_DTX_IDX2 |
+		FE_RST_DTX_IDX1 |
+		FE_RST_DTX_IDX0;
+
+	RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, tmp);
+
+	/* XXX switch set mac address */
+
+	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
+		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
+
+	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
+		/* update TX_BASE_PTRx */
+		RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
+			sc->tx_ring[i].desc_phys_addr);
+		RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
+			RT_SOFTC_TX_RING_DESC_COUNT);
+		RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
+	}
+
+	/* init Rx ring */
+	rt_reset_rx_ring(sc, &sc->rx_ring);
+
+	/* update RX_BASE_PTR0 */
+	RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
+		sc->rx_ring.desc_phys_addr);
+	RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
+		RT_SOFTC_RX_RING_DATA_COUNT);
+	RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
+		RT_SOFTC_RX_RING_DATA_COUNT - 1);
+
+	/* write back DDONE, 16byte burst enable RX/TX DMA */
+	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, 
+	    FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN);
+
+	/* disable interrupts mitigation */
+	RT_WRITE(sc, PDMA_BASE + DELAY_INT_CFG, 0);
+
+	/* clear pending interrupts */
+	RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, 0xffffffff);
+
+	/* enable interrupts */
+	tmp = 	CNT_PPE_AF |
+		CNT_GDM_AF |
+		PSE_P2_FC |
+		GDM_CRC_DROP |
+		PSE_BUF_DROP |
+		GDM_OTHER_DROP |
+		PSE_P1_FC |
+		PSE_P0_FC |
+		PSE_FQ_EMPTY |
+		INT_TX_COHERENT |
+		INT_RX_COHERENT |
+		INT_TXQ3_DONE |
+		INT_TXQ2_DONE |
+		INT_TXQ1_DONE |
+		INT_TXQ0_DONE |
+		INT_RX_DONE;
+
+	sc->intr_enable_mask = tmp;
+
+	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
+
+	if (rt_txrx_enable(sc) != 0)
+		goto fail;
+
+#ifdef IF_RT_PHY_SUPPORT
+	if (mii) mii_mediachg(mii);
+#endif /* IF_RT_PHY_SUPPORT */
+
+	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+	ifp->if_drv_flags |= IFF_DRV_RUNNING;
+
+	sc->periodic_round = 0;
+
+	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
+
+	return;
+
+fail:
+
+	rt_stop_locked(sc);
+}
+
+/*
+ * rt_init
+ */
+static void
+rt_init(void *priv)
+{
+	struct rt_softc *sc;
+
+	sc = priv;
+	RT_SOFTC_LOCK(sc);
+	rt_init_locked(sc);
+	RT_SOFTC_UNLOCK(sc);
+}
+
+/*
+ * rt_stop_locked
+ */
+static void
+rt_stop_locked(void *priv)
+{
+	struct rt_softc *sc;
+	struct ifnet *ifp;
+
+	sc = priv;
+	ifp = sc->ifp;
+
+	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
+
+	RT_SOFTC_ASSERT_LOCKED(sc);
+	sc->tx_timer = 0;
+	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+	callout_stop(&sc->periodic_ch);
+	callout_stop(&sc->tx_watchdog_ch);
+	RT_SOFTC_UNLOCK(sc);
+	taskqueue_block(sc->taskqueue);
+
+	/* 
+	 * Sometime rt_stop_locked called from isr and we get panic 
+	 * When found, I fix it
+	 */
+#ifdef notyet
+	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
+	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
+	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
+#endif
+	RT_SOFTC_LOCK(sc);
+
+	/* disable interrupts */
+	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, 0);
+
+
+	/* reset adapter */
+	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
+
+	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
+	    (
+	    GDM_ICS_EN | /* Enable IP Csum */
+	    GDM_TCS_EN | /* Enable TCP Csum */
+	    GDM_UCS_EN | /* Enable UDP Csum */
+	    GDM_STRPCRC | /* Strip CRC from packet */
+	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
+	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
+	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
+	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
+	    ));
+
+}
+
+/*
+ * rt_stop
+ */
+static void
+rt_stop(void *priv)
+{
+	struct rt_softc *sc;
+
+	sc = priv;
+	RT_SOFTC_LOCK(sc);
+	rt_stop_locked(sc);
+	RT_SOFTC_UNLOCK(sc);
+}
+
+/*
+ * rt_tx_data
+ */
+static int
+rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
+{
+	struct ifnet *ifp;
+	struct rt_softc_tx_ring *ring;
+	struct rt_softc_tx_data *data;
+	struct rt_txdesc *desc;
+	struct mbuf *m_d;
+	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
+
+	int error, ndmasegs, ndescs, i;
+
+	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
+		("%s: Tx data: invalid qid=%d\n",
+		 device_get_nameunit(sc->dev), qid));
+
+	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
+
+	ifp = sc->ifp;
+	ring = &sc->tx_ring[qid];
+	desc = &ring->desc[ring->desc_cur];
+	data = &ring->data[ring->data_cur];
+
+	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
+	    dma_seg, &ndmasegs, 0);
+	if (error != 0)	{
+		/* too many fragments, linearize */
+
+		RT_DPRINTF(sc, RT_DEBUG_TX,
+			"could not load mbuf DMA map, trying to linearize "
+			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
+			ndmasegs, m->m_pkthdr.len, error);
+
+		m_d = m_collapse(m, M_DONTWAIT, 16);
+		if (m_d == NULL) {
+			m_freem(m);
+			m = NULL;
+			return (ENOMEM);
+		}
+		m = m_d;
+
+		sc->tx_defrag_packets++;
+
+		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, 
+		    data->dma_map, m, dma_seg, &ndmasegs, 0);
+		if (error != 0)	{
+			device_printf(sc->dev, "could not load mbuf DMA map: "
+			    "ndmasegs=%d, len=%d, error=%d\n",
+			    ndmasegs, m->m_pkthdr.len, error);
+			m_freem(m);
+			return error;
+		}
+	}
+
+	if (m->m_pkthdr.len == 0)
+		ndmasegs = 0;
+
+	/* determine how many Tx descs are required */
+
+	ndescs = 1 + ndmasegs / 2;
+	if ((ring->desc_queued + ndescs) > (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
+		RT_DPRINTF(sc, RT_DEBUG_TX, "there are not enough Tx descs\n");
+
+		sc->no_tx_desc_avail++;
+
+		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
+		m_freem(m);
+		return EFBIG;
+	}
+
+	data->m = m;
+
+	/* set up Tx descs */
+	for (i = 0; i < ndmasegs; i += 2) {
+
+		/* Set destenation */
+		desc->dst = (TXDSCR_DST_PORT_GDMA1);
+		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
+			desc->dst |= (TXDSCR_IP_CSUM_GEN|TXDSCR_UDP_CSUM_GEN|
+			    TXDSCR_TCP_CSUM_GEN);
+		/* Set queue id */
+		desc->qn = qid;
+		/* No PPPoE */
+		desc->pppoe = 0;
+		/* No VLAN */
+		desc->vid = 0;
+
+		desc->sdp0 = htole32(dma_seg[i].ds_addr);
+		desc->sdl0 = htole16(dma_seg[i].ds_len | 
+		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
+
+		if ((i+1) < ndmasegs) {
+			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
+			desc->sdl1 = htole16(dma_seg[i+1].ds_len | 
+			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
+		} else {
+			desc->sdp1 = 0;
+			desc->sdl1 = 0;
+		}
+
+		if ((i+2) < ndmasegs) {
+			ring->desc_queued++;
+			ring->desc_cur = (ring->desc_cur + 1) % 
+			    RT_SOFTC_TX_RING_DESC_COUNT;
+		}
+		desc = &ring->desc[ring->desc_cur];
+	}
+
+	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
+	    "DMA ds_len=%d/%d/%d/%d/%d\n",
+	    m->m_pkthdr.len, ndmasegs,
+	    (int) dma_seg[0].ds_len,
+	    (int) dma_seg[1].ds_len,
+	    (int) dma_seg[2].ds_len,
+	    (int) dma_seg[3].ds_len,
+	    (int) dma_seg[4].ds_len);
+
+	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
+		BUS_DMASYNC_PREWRITE);
+	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
+		BUS_DMASYNC_PREWRITE);
+	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
+		BUS_DMASYNC_PREWRITE);
+
+	ring->desc_queued++;
+	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
+
+	ring->data_queued++;
+	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
+
+	/* kick Tx */
+	RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(qid), ring->desc_cur);
+
+	return 0;
+}
+
+/*
+ * rt_start
+ */
+static void
+rt_start(struct ifnet *ifp)
+{
+	struct rt_softc *sc;
+	struct mbuf *m;
+	int qid = 0 /* XXX must check QoS priority */;
+
+	sc = ifp->if_softc;
+
+	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+		return;
+
+	for (;;) {
+		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+		if (m == NULL)
+			break;
+
+		m->m_pkthdr.rcvif = NULL;
+
+		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
+
+		if (sc->tx_ring[qid].data_queued >= RT_SOFTC_TX_RING_DATA_COUNT) {
+			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
+
+			RT_DPRINTF(sc, RT_DEBUG_TX,
+				"if_start: Tx ring with qid=%d is full\n", qid);
+
+			m_freem(m);
+
+			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+			ifp->if_oerrors++;
+
+			sc->tx_data_queue_full[qid]++;
+
+			break;
+		}
+
+		if (rt_tx_data(sc, m, qid) != 0) {
+			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
+
+			ifp->if_oerrors++;
+
+			break;
+		}
+
+		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
+		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
+		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
+	}
+}
+
+/*
+ * rt_update_promisc
+ */
+static void 
+rt_update_promisc(struct ifnet *ifp)
+{
+	struct rt_softc *sc;
+
+	sc = ifp->if_softc;
+	printf("%s: %s promiscuous mode\n",
+		device_get_nameunit(sc->dev),
+		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
+}
+
+/*
+ * rt_ioctl
+ */
+static int
+rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+	struct rt_softc *sc;
+	struct ifreq *ifr;
+#ifdef IF_RT_PHY_SUPPORT
+	struct mii_data *mii;
+#endif /* IF_RT_PHY_SUPPORT */
+	int error, startall;
+
+	sc = ifp->if_softc;
+	ifr = (struct ifreq *) data;
+
+	error = 0;
+
+	switch (cmd) {
+	case SIOCSIFFLAGS:
+		startall = 0;
+		RT_SOFTC_LOCK(sc);
+		if (ifp->if_flags & IFF_UP) {
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+				if ((ifp->if_flags ^ sc->if_flags) & 
+				    IFF_PROMISC)
+					rt_update_promisc(ifp);
+			} else {
+				rt_init_locked(sc);
+				startall = 1;
+			}
+		} else {
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+				rt_stop_locked(sc);
+		}
+
+		sc->if_flags = ifp->if_flags;
+
+		RT_SOFTC_UNLOCK(sc);
+		break;
+
+	case SIOCGIFMEDIA:
+	case SIOCSIFMEDIA:
+#ifdef IF_RT_PHY_SUPPORT
+		mii = device_get_softc(sc->rt_miibus);
+		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
+#else
+		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
+#endif /* IF_RT_PHY_SUPPORT */
+		break;
+
+	default:
+		error = ether_ioctl(ifp, cmd, data);
+		break;
+	}
+
+	return error;
+}
+
+/*
+ * rt_periodic
+ */
+static void
+rt_periodic(void *arg)
+{
+	struct rt_softc *sc;
+
+	sc = arg;
+	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
+	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
+}
+
+/*
+ * rt_tx_watchdog
+ */
+static void
+rt_tx_watchdog(void *arg)
+{
+	struct rt_softc *sc;
+	struct ifnet *ifp;
+
+	sc = arg;
+	ifp = sc->ifp;
+
+	if (sc->tx_timer == 0)
+		return;
+
+	if (--sc->tx_timer == 0) {
+		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
+
+#ifdef notyet
+		/*
+		 * XXX: Commented out, because reset break input.
+		 */
+		rt_stop_locked(sc);
+		rt_init_locked(sc);
+#endif
+
+		ifp->if_oerrors++;
+		sc->tx_watchdog_timeouts++;
+	}
+
+	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
+}
+
+static void
+rt_cnt_ppe_af(struct rt_softc *sc)
+{
+
+	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
+}
+
+static void
+rt_cnt_gdm_af(struct rt_softc *sc)
+{
+
+	RT_DPRINTF(sc, RT_DEBUG_INTR,
+	    "GDMA 1 & 2 Counter Table Almost Full\n");
+}
+
+static void
+rt_pse_p2_fc(struct rt_softc *sc)
+{
+	RT_DPRINTF(sc, RT_DEBUG_INTR,
+	    "PSE port2 (GDMA 2) flow control asserted.\n");
+}
+
+static void
+rt_gdm_crc_drop(struct rt_softc *sc)
+{
+	RT_DPRINTF(sc, RT_DEBUG_INTR,
+	    "GDMA 1 & 2 discard a packet due to CRC error\n");
+}
+
+static void
+rt_pse_buf_drop(struct rt_softc *sc)
+{
+	RT_DPRINTF(sc, RT_DEBUG_INTR,
+	    "PSE discards a packet due to buffer sharing limitation\n");
+}
+
+static void
+rt_gdm_other_drop(struct rt_softc *sc)
+{
+	RT_DPRINTF(sc, RT_DEBUG_INTR,
+	    "GDMA 1 & 2 discard a packet due to other reason\n");
+}
+
+static void
+rt_pse_p1_fc(struct rt_softc *sc)
+{
+	RT_DPRINTF(sc, RT_DEBUG_INTR,
+	    "PSE port1 (GDMA 1) flow control asserted.\n");
+}
+
+static void
+rt_pse_p0_fc(struct rt_softc *sc)
+{
+	RT_DPRINTF(sc, RT_DEBUG_INTR,
+	    "PSE port0 (CDMA) flow control asserted.\n");
+}
+
+static void
+rt_pse_fq_empty(struct rt_softc *sc)
+{
+	RT_DPRINTF(sc, RT_DEBUG_INTR,
+	    "PSE free Q empty threshold reached & forced drop "
+		    "condition occurred.\n");
+}
+
+/*
+ * rt_intr
+ */
+static void
+rt_intr(void *arg)
+{
+	struct rt_softc *sc;
+	struct ifnet *ifp;
+	uint32_t status;
+
+	sc = arg;
+	ifp = sc->ifp;
+
+	/* acknowledge interrupts */
+
+	status = RT_READ(sc, GE_PORT_BASE + FE_INT_STATUS);
+	RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, status);
+
+	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status = 0x%08x\n", status);
+
+	if (status == 0xffffffff ||		/* device likely went away */
+		status == 0)				/* not for us */
+		return;
+
+	sc->interrupts++;
+
+	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+		return;
+
+	if (status & CNT_PPE_AF)
+		rt_cnt_ppe_af(sc);
+
+	if (status & CNT_GDM_AF)
+		rt_cnt_gdm_af(sc);
+
+	if (status & PSE_P2_FC)
+		rt_pse_p2_fc(sc);
+
+	if (status & GDM_CRC_DROP)
+		rt_gdm_crc_drop(sc);
+
+	if (status & PSE_BUF_DROP)
+		rt_pse_buf_drop(sc);
+
+	if (status & GDM_OTHER_DROP)
+		rt_gdm_other_drop(sc);
+
+	if (status & PSE_P1_FC)
+		rt_pse_p1_fc(sc);
+
+	if (status & PSE_P0_FC)
+		rt_pse_p0_fc(sc);
+
+	if (status & PSE_FQ_EMPTY)
+		rt_pse_fq_empty(sc);
+
+	if (status & INT_TX_COHERENT)
+		rt_tx_coherent_intr(sc);
+
+	if (status & INT_RX_COHERENT)
+		rt_rx_coherent_intr(sc);
+
+	if (status & RX_DLY_INT)
+		rt_rx_delay_intr(sc);
+
+	if (status & TX_DLY_INT)
+		rt_tx_delay_intr(sc);
+
+	if (status & INT_RX_DONE)
+		rt_rx_intr(sc);
+
+	if (status & INT_TXQ3_DONE)
+		rt_tx_intr(sc, 3);
+
+	if (status & INT_TXQ2_DONE)
+		rt_tx_intr(sc, 2);
+
+	if (status & INT_TXQ1_DONE)
+		rt_tx_intr(sc, 1);
+
+	if (status & INT_TXQ0_DONE)
+		rt_tx_intr(sc, 0);
+}
+
+/*
+ * rt_tx_coherent_intr
+ */
+static void
+rt_tx_coherent_intr(struct rt_softc *sc)
+{
+	uint32_t tmp;
+	int i;
+
+	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
+
+	sc->tx_coherent_interrupts++;
+
+	/* restart DMA engine */
+	tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
+
+	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
+
+	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
+
+	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
+		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
+
+	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
+		RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
+			sc->tx_ring[i].desc_phys_addr);
+		RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
+			RT_SOFTC_TX_RING_DESC_COUNT);
+		RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
+	}
+
+	rt_txrx_enable(sc);
+}
+
+/*
+ * rt_rx_coherent_intr
+ */
+static void
+rt_rx_coherent_intr(struct rt_softc *sc)
+{
+	uint32_t tmp;
+
+	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
+
+	sc->rx_coherent_interrupts++;
+
+	/* restart DMA engine */
+	tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
+	tmp &= ~(FE_RX_DMA_EN);
+	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
+
+	/* init Rx ring */
+	rt_reset_rx_ring(sc, &sc->rx_ring);
+	RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
+		sc->rx_ring.desc_phys_addr);
+	RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
+		RT_SOFTC_RX_RING_DATA_COUNT);
+	RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
+		RT_SOFTC_RX_RING_DATA_COUNT - 1);
+
+	rt_txrx_enable(sc);
+}
+
+/*
+ * rt_rx_intr
+ */
+static void
+rt_rx_intr(struct rt_softc *sc)
+{
+
+	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
+	sc->rx_interrupts++;
+	RT_SOFTC_LOCK(sc);
+
+	if (!(sc->intr_disable_mask & INT_RX_DONE)) {
+		rt_intr_disable(sc, INT_RX_DONE);
+		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
+	}
+
+	sc->intr_pending_mask |= INT_RX_DONE;
+	RT_SOFTC_UNLOCK(sc);
+
+}
+
+/*
+ * rt_rx_delay_intr
+ */
+static void
+rt_rx_delay_intr(struct rt_softc *sc)
+{
+
+	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
+	sc->rx_delay_interrupts++;
+}
+
+/*
+ * rt_tx_delay_intr
+ */
+static void
+rt_tx_delay_intr(struct rt_softc *sc)
+{
+
+	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
+	sc->tx_delay_interrupts++;
+}
+
+
+/*
+ * rt_tx_intr
+ */
+static void
+rt_tx_intr(struct rt_softc *sc, int qid)
+{
+	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
+		("%s: Tx interrupt: invalid qid=%d\n",
+		 device_get_nameunit(sc->dev), qid));
+
+	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
+
+	sc->tx_interrupts[qid]++;
+	RT_SOFTC_LOCK(sc);
+
+	if (!(sc->intr_disable_mask & (INT_TXQ0_DONE << qid))) {
+		rt_intr_disable(sc, (INT_TXQ0_DONE << qid));
+		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
+	}
+
+	sc->intr_pending_mask |= (INT_TXQ0_DONE << qid);
+	RT_SOFTC_UNLOCK(sc);
+}
+
+/*
+ * rt_rx_done_task
+ */
+static void
+rt_rx_done_task(void *context, int pending)
+{
+	struct rt_softc *sc;
+	struct ifnet *ifp;
+	int again;
+
+	sc = context;
+	ifp = sc->ifp;
+
+	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
+
+	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+		return;
+
+	sc->intr_pending_mask &= ~INT_RX_DONE;
+
+	again = rt_rx_eof(sc, sc->rx_process_limit);
+
+	RT_SOFTC_LOCK(sc);
+
+	if ((sc->intr_pending_mask & INT_RX_DONE) || again) {
+		RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task: scheduling again\n");
+		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
+	} else {
+		rt_intr_enable(sc, INT_RX_DONE);
+	}
+
+	RT_SOFTC_UNLOCK(sc);
+}
+
+/*
+ * rt_tx_done_task
+ */
+static void
+rt_tx_done_task(void *context, int pending)
+{
+	struct rt_softc *sc;
+	struct ifnet *ifp;
+	uint32_t intr_mask;
+	int i;
+
+	sc = context;
+	ifp = sc->ifp;
+
+	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
+
+	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+		return;
+
+	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
+		if (sc->intr_pending_mask & (INT_TXQ0_DONE << i)) {
+			sc->intr_pending_mask &= ~(INT_TXQ0_DONE << i);
+			rt_tx_eof(sc, &sc->tx_ring[i]);
+		}
+	}
+
+	sc->tx_timer = 0;
+
+	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+	intr_mask = (
+		INT_TXQ3_DONE |
+		INT_TXQ2_DONE |
+		INT_TXQ1_DONE |
+		INT_TXQ0_DONE);
+
+	RT_SOFTC_LOCK(sc);
+
+	rt_intr_enable(sc, ~sc->intr_pending_mask &
+	    (sc->intr_disable_mask & intr_mask));
+
+	if (sc->intr_pending_mask & intr_mask) {
+		RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task: scheduling again\n");
+		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
+	}
+
+	RT_SOFTC_UNLOCK(sc);
+
+	if (!IFQ_IS_EMPTY(&ifp->if_snd))
+		rt_start(ifp);
+}
+
+/*
+ * rt_periodic_task
+ */
+static void
+rt_periodic_task(void *context, int pending)
+{
+	struct rt_softc *sc;
+	struct ifnet *ifp;
+
+	sc = context;
+	ifp = sc->ifp;
+
+	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
+	    sc->periodic_round);
+
+	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+		return;
+
+	RT_SOFTC_LOCK(sc);
+	sc->periodic_round++;
+	rt_update_stats(sc);
+
+	if ((sc->periodic_round % 10) == 0) {
+		rt_update_raw_counters(sc);
+		rt_watchdog(sc);
+	}
+
+	RT_SOFTC_UNLOCK(sc);
+	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
+}
+
+/*
+ * rt_rx_eof
+ */
+static int
+rt_rx_eof(struct rt_softc *sc, int limit)
+{
+	struct ifnet *ifp;
+	struct rt_softc_rx_ring *ring;
+	struct rt_rxdesc *desc;
+	struct rt_softc_rx_data *data;
+	struct mbuf *m, *mnew;
+	bus_dma_segment_t segs[1];
+	bus_dmamap_t dma_map;
+	uint32_t index, desc_flags;
+	int error, nsegs, len, nframes;
+
+	ifp = sc->ifp;
+	ring = &sc->rx_ring;
+
+	nframes = 0;
+
+	while (limit != 0) {
+		index = RT_READ(sc, PDMA_BASE + RX_DRX_IDX0);
+		if (ring->cur == index)
+			break;
+
+		desc = &ring->desc[ring->cur];
+		data = &ring->data[ring->cur];
+
+		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
+		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+#ifdef IF_RT_DEBUG
+		if ( sc->debug & RT_DEBUG_RX ) {
+			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
+		        hexdump(desc, 16, 0, 0);
+			printf("-----------------------------------\n");
+		}
+#endif
+
+		/* XXX Sometime device don`t set DDONE bit */
+#ifdef DDONE_FIXED
+		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
+			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
+			break;
+		}
+#endif
+
+		len = le16toh(desc->sdl0) & 0x3fff;
+		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
+
+		nframes++;
+
+		mnew = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
+		if (mnew == NULL) {
+			sc->rx_mbuf_alloc_errors++;
+			ifp->if_ierrors++;
+			goto skip;
+		}
+
+		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
+
+		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, ring->spare_dma_map,
+			mnew, segs, &nsegs, BUS_DMA_NOWAIT);
+		if (error != 0) {
+			RT_DPRINTF(sc, RT_DEBUG_RX,
+			    "could not load Rx mbuf DMA map: error=%d, "
+			    "nsegs=%d\n",
+			    error, nsegs);
+
+			m_freem(mnew);
+
+			sc->rx_mbuf_dmamap_errors++;
+			ifp->if_ierrors++;
+
+			goto skip;
+		}
+
+		KASSERT(nsegs == 1, ("%s: too many DMA segments",
+			device_get_nameunit(sc->dev)));
+
+		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
+			BUS_DMASYNC_POSTREAD);
+		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
+
+		dma_map = data->dma_map;
+		data->dma_map = ring->spare_dma_map;
+		ring->spare_dma_map = dma_map;
+
+		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
+			BUS_DMASYNC_PREREAD);
+
+		m = data->m;
+		desc_flags = desc->src;
+
+		data->m = mnew;
+		/* Add 2 for proper align of RX IP header */
+		desc->sdp0 = htole32(segs[0].ds_addr+2);
+		desc->sdl0 = htole32(segs[0].ds_len-2);
+		desc->src = 0;
+		desc->ai = 0;
+		desc->foe = 0;
+
+		RT_DPRINTF(sc, RT_DEBUG_RX, "Rx frame: rxdesc flags=0x%08x\n",
+		    desc_flags);
+
+		m->m_pkthdr.rcvif = ifp;
+		/* Add 2 to fix data align, after sdp0 = addr + 2 */
+		m->m_data += 2;
+		m->m_pkthdr.len = m->m_len = len;
+
+		/* check for crc errors */
+		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
+			/*check for valid checksum*/
+			if (desc_flags & (RXDSXR_SRC_IP_CSUM_FAIL|RXDSXR_SRC_L4_CSUM_FAIL)) {
+				RT_DPRINTF(sc, RT_DEBUG_RX, 
+				    "rxdesc: crc error\n");
+
+				ifp->if_ierrors++;
+
+				if (!(ifp->if_flags & IFF_PROMISC)) {
+				    m_freem(m);
+				    goto skip;
+				}
+			}
+			if ((desc_flags & RXDSXR_SRC_IP_CSUM_FAIL) != 0) {
+				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+				m->m_pkthdr.csum_data = 0xffff;
+			}
+			m->m_flags &= ~M_HASFCS;
+		}
+
+		(*ifp->if_input)(ifp, m);
+skip:
+		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
+
+		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
+			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
+
+		limit--;
+	}
+
+	if (ring->cur == 0)
+		RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
+			RT_SOFTC_RX_RING_DATA_COUNT - 1);
+	else
+		RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
+			ring->cur - 1);
+
+	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
+
+	sc->rx_packets += nframes;
+
+	return (limit == 0);
+}
+
+/*
+ * rt_tx_eof
+ */
+static void
+rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
+{
+	struct ifnet *ifp;
+	struct rt_txdesc *desc;
+	struct rt_softc_tx_data *data;
+	uint32_t index;
+	int ndescs, nframes;
+
+	ifp = sc->ifp;
+
+	ndescs = 0;
+	nframes = 0;
+
+	for (;;) {
+		index = RT_READ(sc, PDMA_BASE + TX_DTX_IDX(ring->qid));
+		if (ring->desc_next == index)
+			break;
+
+		ndescs++;
+
+		desc = &ring->desc[ring->desc_next];
+
+		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
+			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
+			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
+			nframes++;
+
+			data = &ring->data[ring->data_next];
+
+			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
+				BUS_DMASYNC_POSTWRITE);
+			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
+
+			m_freem(data->m);
+
+			data->m = NULL;
+
+			ifp->if_opackets++;
+
+			RT_SOFTC_TX_RING_LOCK(ring);
+			ring->data_queued--;
+			ring->data_next = (ring->data_next + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
+			RT_SOFTC_TX_RING_UNLOCK(ring);
+		}
+
+		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
+
+		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
+			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+		RT_SOFTC_TX_RING_LOCK(ring);
+		ring->desc_queued--;
+		ring->desc_next = (ring->desc_next + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
+		RT_SOFTC_TX_RING_UNLOCK(ring);
+	}
+
+	RT_DPRINTF(sc, RT_DEBUG_TX,
+	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs, 
+	    nframes);
+}
+
+/*
+ * rt_update_stats
+ */
+static void
+rt_update_stats(struct rt_softc *sc)
+{
+	struct ifnet *ifp;
+
+	ifp = sc->ifp;
+	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
+	/* XXX do update stats here */
+
+}
+
+/*
+ * rt_watchdog
+ */
+static void
+rt_watchdog(struct rt_softc *sc)
+{
+	uint32_t tmp;
+#ifdef notyet
+	int ntries;
+#endif
+
+	tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
+
+	RT_DPRINTF(sc, RT_DEBUG_WATCHDOG, "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
+
+	/* XXX: do not reset */
+#ifdef notyet
+	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
+		sc->tx_queue_not_empty[0]++;
+
+		for (ntries = 0; ntries < 10; ntries++) {
+			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
+			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
+				break;
+
+			DELAY(1);
+		}
+	}
+
+	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
+		sc->tx_queue_not_empty[1]++;
+
+		for (ntries = 0; ntries < 10; ntries++) {
+			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
+			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
+				break;
+
+			DELAY(1);
+		}
+	}
+#endif
+}
+
+
+/*
+ * rt_update_raw_counters
+ */
+static void
+rt_update_raw_counters(struct rt_softc *sc)
+{
+
+	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
+	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
+	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
+	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
+
+	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
+	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
+	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
+	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
+	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
+	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
+	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
+}
+
+/*
+ * rt_intr_enable
+ */
+static void
+rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
+{
+	uint32_t tmp;
+
+	sc->intr_disable_mask &= ~intr_mask;
+	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
+	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
+}
+
+/*
+ * rt_intr_disable
+ */
+static void
+rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
+{
+	uint32_t tmp;
+
+	sc->intr_disable_mask |= intr_mask;
+	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
+	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
+}
+
+/*
+ * rt_txrx_enable
+ */
+static int
+rt_txrx_enable(struct rt_softc *sc)
+{
+	struct ifnet *ifp;
+	uint32_t tmp;
+	int ntries;
+
+	ifp = sc->ifp;
+
+	/* enable Tx/Rx DMA engine */
+	for (ntries = 0; ntries < 200; ntries++) {
+		tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
+		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
+			break;
+
+		DELAY(1000);
+	}
+
+	if (ntries == 200) {
+		device_printf(sc->dev, "timeout waiting for DMA engine\n");
+		return -1;
+	}
+
+	DELAY(50);
+
+	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
+	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
+
+	/* XXX set Rx filter */
+	return 0;
+}
+
+/*
+ * rt_alloc_rx_ring
+ */
+static int
+rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
+{
+	struct rt_rxdesc *desc;
+	struct rt_softc_rx_data *data;
+	bus_dma_segment_t segs[1];
+	int i, nsegs, error;
+
+	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, 
+		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
+		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
+		0, NULL, NULL, &ring->desc_dma_tag);
+	if (error != 0)	{
+		device_printf(sc->dev, "could not create Rx desc DMA tag\n");
+		goto fail;
+	}
+
+	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
+	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
+	if (error != 0) {
+		device_printf(sc->dev, "could not allocate Rx desc DMA memory\n");
+		goto fail;
+	}
+
+	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
+		ring->desc,
+		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
+		rt_dma_map_addr, &ring->desc_phys_addr, 0);
+	if (error != 0) {
+		device_printf(sc->dev, "could not load Rx desc DMA map\n");
+		goto fail;
+	}
+
+	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, 
+	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
+		&ring->data_dma_tag);
+	if (error != 0)	{
+		device_printf(sc->dev, "could not create Rx data DMA tag\n");
+		goto fail;
+	}
+
+	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
+		desc = &ring->desc[i];
+		data = &ring->data[i];
+
+		error = bus_dmamap_create(ring->data_dma_tag, 0, &data->dma_map);
+		if (error != 0)	{
+			device_printf(sc->dev, "could not create Rx data DMA "
+			    "map\n");
+			goto fail;
+		}
+
+		data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
+		if (data->m == NULL) {
+			device_printf(sc->dev, "could not allocate Rx mbuf\n");
+			error = ENOMEM;
+			goto fail;
+		}
+
+		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
+
+		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map,
+			data->m, segs, &nsegs, BUS_DMA_NOWAIT);
+		if (error != 0)	{
+			device_printf(sc->dev, "could not load Rx mbuf DMA map\n");
+			goto fail;
+		}
+
+		KASSERT(nsegs == 1, ("%s: too many DMA segments",
+			device_get_nameunit(sc->dev)));
+
+		/* Add 2 for proper align of RX IP header */
+		desc->sdp0 = htole32(segs[0].ds_addr+2);
+		desc->sdl0 = htole32(segs[0].ds_len-2);
+	}
+
+	error = bus_dmamap_create(ring->data_dma_tag, 0, &ring->spare_dma_map);
+	if (error != 0) {
+		device_printf(sc->dev, "could not create Rx spare DMA map\n");
+		goto fail;
+	}
+
+	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
+		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+	return 0;
+
+fail:
+	rt_free_rx_ring(sc, ring);
+
+	return error;
+}
+
+/*
+ * rt_reset_rx_ring
+ */
+static void
+rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
+{
+	struct rt_rxdesc *desc;
+	int i;
+
+	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
+		desc = &ring->desc[i];
+		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
+	}
+
+	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
+		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+	ring->cur = 0;
+}
+
+/*
+ * rt_free_rx_ring
+ */
+static void
+rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
+{
+	struct rt_softc_rx_data *data;
+	int i;
+
+	if (ring->desc != NULL) {
+		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
+			BUS_DMASYNC_POSTWRITE);
+		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
+		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
+			ring->desc_dma_map);
+	}
+
+	if (ring->desc_dma_tag != NULL)
+		bus_dma_tag_destroy(ring->desc_dma_tag);
+
+	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
+		data = &ring->data[i];
+
+		if (data->m != NULL) {
+			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
+				BUS_DMASYNC_POSTREAD);
+			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
+			m_freem(data->m);
+		}
+
+		if (data->dma_map != NULL)
+			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
+	}
+
+	if (ring->spare_dma_map != NULL)
+		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
+
+	if (ring->data_dma_tag != NULL)
+		bus_dma_tag_destroy(ring->data_dma_tag);
+}
+
+/*
+ * rt_alloc_tx_ring
+ */
+static int
+rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
+{
+	struct rt_softc_tx_data *data;
+	int error, i;
+
+	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
+
+	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, 
+		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
+		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
+		0, NULL, NULL, &ring->desc_dma_tag);
+	if (error != 0) {
+		device_printf(sc->dev, "could not create Tx desc DMA tag\n");
+		goto fail;
+	}
+
+	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
+	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
+	if (error != 0)	{
+		device_printf(sc->dev, "could not allocate Tx desc DMA memory\n");
+		goto fail;
+	}
+
+	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
+	    ring->desc,	RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
+	    rt_dma_map_addr, &ring->desc_phys_addr, 0);
+	if (error != 0) {
+		device_printf(sc->dev, "could not load Tx desc DMA map\n");
+		goto fail;
+	}
+
+	ring->desc_queued = 0;
+	ring->desc_cur = 0;
+	ring->desc_next = 0;
+
+	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, 
+	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
+	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
+	    0, NULL, NULL, &ring->seg0_dma_tag);
+	if (error != 0) {
+		device_printf(sc->dev, "could not create Tx seg0 DMA tag\n");
+		goto fail;
+	}
+
+	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
+	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
+	if (error != 0) {
+		device_printf(sc->dev, "could not allocate Tx seg0 DMA memory\n");
+		goto fail;
+	}
+
+	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
+	    ring->seg0,	RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
+	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
+	if (error != 0) {
+		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
+		goto fail;
+	}
+
+	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, 
+	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
+	    &ring->data_dma_tag);
+	if (error != 0) {
+		device_printf(sc->dev, "could not create Tx data DMA tag\n");
+		goto fail;
+	}
+
+	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
+		data = &ring->data[i];
+
+		error = bus_dmamap_create(ring->data_dma_tag, 0, &data->dma_map);
+		if (error != 0) {
+			device_printf(sc->dev, "could not create Tx data DMA "
+			    "map\n");
+			goto fail;
+		}
+	}
+
+	ring->data_queued = 0;
+	ring->data_cur = 0;
+	ring->data_next = 0;
+
+	ring->qid = qid;
+
+	return 0;
+
+fail:
+	rt_free_tx_ring(sc, ring);
+
+	return error;
+}
+
+/*
+ * rt_reset_tx_ring
+ */
+static void
+rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
+{
+	struct rt_softc_tx_data *data;
+	struct rt_txdesc *desc;
+	int i;
+
+	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
+		desc = &ring->desc[i];
+
+		desc->sdl0 = 0;
+		desc->sdl1 = 0;
+	}
+
+	ring->desc_queued = 0;
+	ring->desc_cur = 0;
+	ring->desc_next = 0;
+
+	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
+		BUS_DMASYNC_PREWRITE);
+
+	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
+		BUS_DMASYNC_PREWRITE);
+
+	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
+		data = &ring->data[i];
+
+		if (data->m != NULL) {
+			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
+				BUS_DMASYNC_POSTWRITE);
+			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
+			m_freem(data->m);
+			data->m = NULL;
+		}
+	}
+
+	ring->data_queued = 0;
+	ring->data_cur = 0;
+	ring->data_next = 0;
+}
+
+/*
+ * rt_free_tx_ring
+ */
+static void
+rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
+{
+	struct rt_softc_tx_data *data;
+	int i;
+
+	if (ring->desc != NULL) {
+		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
+			BUS_DMASYNC_POSTWRITE);
+		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
+		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
+			ring->desc_dma_map);
+	}
+
+	if (ring->desc_dma_tag != NULL)
+		bus_dma_tag_destroy(ring->desc_dma_tag);
+
+	if (ring->seg0 != NULL) {
+		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
+			BUS_DMASYNC_POSTWRITE);
+		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
+		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
+			ring->seg0_dma_map);
+	}
+
+	if (ring->seg0_dma_tag != NULL)
+		bus_dma_tag_destroy(ring->seg0_dma_tag);
+
+	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
+		data = &ring->data[i];
+
+		if (data->m != NULL) {
+			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
+				BUS_DMASYNC_POSTWRITE);
+			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
+			m_freem(data->m);
+		}
+
+		if (data->dma_map != NULL)
+			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
+	}
+
+	if (ring->data_dma_tag != NULL)
+		bus_dma_tag_destroy(ring->data_dma_tag);
+
+	mtx_destroy(&ring->lock);
+}
+
+/*
+ * rt_dma_map_addr
+ */
+static void
+rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+	if (error != 0)
+		return;
+
+	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
+
+	*(bus_addr_t *) arg = segs[0].ds_addr;
+}
+
+/*
+ * rt_sysctl_attach
+ */
+static void
+rt_sysctl_attach(struct rt_softc *sc)
+{
+	struct sysctl_ctx_list *ctx;
+	struct sysctl_oid *tree;
+	struct sysctl_oid *stats;
+
+	ctx = device_get_sysctl_ctx(sc->dev);
+	tree = device_get_sysctl_tree(sc->dev);
+
+	/* statistic counters */
+	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+		"stats", CTLFLAG_RD, 0, "statistic");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"interrupts", CTLFLAG_RD, &sc->interrupts, 0,
+		"all interrupts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts, 0,
+		"Tx coherent interrupts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts, 0,
+		"Rx coherent interrupts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts, 0,
+		"Rx interrupts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts, 0,
+		"Rx delay interrupts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3], 0,
+		"Tx AC3 interrupts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2], 0,
+		"Tx AC2 interrupts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1], 0,
+		"Tx AC1 interrupts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0], 0,
+		"Tx AC0 interrupts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts, 0,
+		"Tx delay interrupts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued, 0,
+		"Tx AC3 descriptors queued");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued, 0,
+		"Tx AC3 data queued");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued, 0,
+		"Tx AC2 descriptors queued");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued, 0,
+		"Tx AC2 data queued");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued, 0,
+		"Tx AC1 descriptors queued");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued, 0,
+		"Tx AC1 data queued");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued, 0,
+		"Tx AC0 descriptors queued");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued, 0,
+		"Tx AC0 data queued");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3], 0,
+		"Tx AC3 data queue full");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2], 0,
+		"Tx AC2 data queue full");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1], 0,
+		"Tx AC1 data queue full");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0], 0,
+		"Tx AC0 data queue full");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts, 0,
+		"Tx watchdog timeouts");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets, 0,
+		"Tx defragmented packets");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail, 0,
+		"no Tx descriptors available");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors, 0,
+		"Rx mbuf allocation errors");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors, 0,
+		"Rx mbuf DMA mapping errors");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0], 0,
+		"Tx queue 0 not empty");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1], 0,
+		"Tx queue 1 not empty");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_packets", CTLFLAG_RD, &sc->rx_packets, 0,
+		"Rx packets");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err, 0,
+		"Rx CRC errors");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err, 0,
+		"Rx PHY errors");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets, 0,
+		"Rx duplicate packets");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows, 0,
+		"Rx FIFO overflows");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_bytes", CTLFLAG_RD, &sc->rx_bytes, 0,
+		"Rx bytes");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_long_err", CTLFLAG_RD, &sc->rx_long_err, 0,
+		"Rx too long frame errors");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"rx_short_err", CTLFLAG_RD, &sc->rx_short_err, 0,
+		"Rx too short frame errors");
+
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"tx_bytes", CTLFLAG_RD, &sc->tx_bytes, 0,
+		"Tx bytes");
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"tx_packets", CTLFLAG_RD, &sc->tx_packets, 0,
+		"Tx packets");
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"tx_skip", CTLFLAG_RD, &sc->tx_skip, 0,
+		"Tx skip count for GDMA ports");
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
+		"tx_collision", CTLFLAG_RD, &sc->tx_collision, 0,
+		"Tx collision count for GDMA ports");
+}
+
+#ifdef IF_RT_PHY_SUPPORT
+static int
+rt_miibus_readreg(device_t dev, int phy, int reg)
+{
+	struct rt_softc *sc = device_get_softc(dev);
+
+	/*
+	 * PSEUDO_PHYAD is a special value for indicate switch attached.
+	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
+	 */
+	if (phy == 31) {
+		/* Fake PHY ID for bfeswitch attach */
+		switch (reg) {
+		case MII_BMSR:
+			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
+		case MII_PHYIDR1:
+			return 0x40;		/* As result of faking */
+		case MII_PHYIDR2:		/* PHY will detect as */
+			return 0x6250;		/* bfeswitch */
+		}
+	}
+
+
+	/* Wait prev command done if any */
+	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
+	RT_WRITE(sc, MDIO_ACCESS,
+	    MDIO_CMD_ONGO ||
+	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
+	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
+	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
+
+	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
+}
+
+static int
+rt_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+	struct rt_softc *sc = device_get_softc(dev);
+
+	/* Wait prev command done if any */
+	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
+	RT_WRITE(sc, MDIO_ACCESS,
+	    MDIO_CMD_ONGO || MDIO_CMD_WR ||
+	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
+	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
+	    (val & MDIO_PHY_DATA_MASK));
+	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
+
+	return (0);
+}
+
+void
+rt_miibus_statchg(device_t dev)
+{
+	struct rt_softc *sc = device_get_softc(dev);
+	struct mii_data *mii;
+
+	mii = device_get_softc(sc->rt_miibus);
+
+	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
+	    (IFM_ACTIVE | IFM_AVALID)) {
+		switch (IFM_SUBTYPE(mii->mii_media_active)) {
+		case IFM_10_T:
+		case IFM_100_TX:
+			/* XXX check link here */
+			sc->flags |= 1;
+			break;
+		default:
+			break;
+		}
+	}
+}
+#endif /* IF_RT_PHY_SUPPORT */
+
+static device_method_t rt_dev_methods[] =
+{
+
+	DEVMETHOD(device_probe, rt_probe),
+	DEVMETHOD(device_attach, rt_attach),
+	DEVMETHOD(device_detach, rt_detach),
+	DEVMETHOD(device_shutdown, rt_shutdown),
+	DEVMETHOD(device_suspend, rt_suspend),
+	DEVMETHOD(device_resume, rt_resume),
+
+	/* bus interface */
+	DEVMETHOD(bus_print_child,	bus_generic_print_child),
+	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
+
+#ifdef IF_RT_PHY_SUPPORT
+	/* MII interface */
+	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
+	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
+	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
+#endif
+	{ 0, 0 }
+};
+
+static driver_t rt_driver =
+{
+	"rt",
+	rt_dev_methods,
+	sizeof(struct rt_softc)
+};
+
+static devclass_t rt_dev_class;
+
+DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
+MODULE_DEPEND(rt, ether, 1, 1, 1);
+MODULE_DEPEND(rt, miibus, 1, 1, 1);
+

Property changes on: sys/dev/rt/if_rt.c
___________________________________________________________________
Added: svn:mime-type
   + text/plain
Added: svn:keywords
   + FreeBSD=%H
Added: svn:eol-style
   + native

Index: sys/dev/rt/if_rtreg.h
===================================================================
--- sys/dev/rt/if_rtreg.h	(revision 0)
+++ sys/dev/rt/if_rtreg.h	(revision 0)
@@ -0,0 +1,289 @@
+/*-
+ * Copyright (c) 2009, Aleksandr Rybalko
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __IF_RAFREG_H__
+#define __IF_RAFREG_H__
+
+#define RT_READ(sc, reg)				\
+	bus_space_read_4((sc)->bst, (sc)->bsh, reg)
+
+#define RT_WRITE(sc, reg, val)				\
+	bus_space_write_4((sc)->bst, (sc)->bsh, reg, val)
+
+#define GE_PORT_BASE 0x0000
+
+#define MDIO_ACCESS	0x00
+#define 	MDIO_CMD_ONGO		(1<<31)
+#define 	MDIO_CMD_WR		(1<<30)
+#define 	MDIO_PHY_ADDR_MASK	0x1f000000
+#define 	MDIO_PHY_ADDR_SHIFT	24
+#define 	MDIO_PHYREG_ADDR_MASK	0x001f0000
+#define 	MDIO_PHYREG_ADDR_SHIFT	16
+#define 	MDIO_PHY_DATA_MASK	0x0000ffff
+#define 	MDIO_PHY_DATA_SHIFT	0
+
+#define FE_GLO_CFG	0x08 /*Frame Engine Global Configuration */
+#define 	EXT_VLAN_TYPE_MASK	0xffff0000
+#define 	EXT_VLAN_TYPE_SHIFT	16
+#define 	EXT_VLAN_TYPE_DFLT	0x81000000
+#define 	US_CYC_CNT_MASK		0x0000ff00
+#define 	US_CYC_CNT_SHIFT	8
+#define 	US_CYC_CNT_DFLT		(132<<8) /* sys clocks per 1uS */
+#define 	L2_SPACE		(8<<4) /* L2 space. Unit is 8 bytes */
+
+#define FE_RST_GLO	0x0C /*Frame Engine Global Reset*/
+#define 	FC_DROP_CNT_MASK	0xffff0000 /*Flow control drop count.*/
+#define 	FC_DROP_CNT_SHIFT	16
+#define 	PSE_RESET		(1<<0)
+
+#define FE_INT_STATUS	0x10
+#define		CNT_PPE_AF		(1<<31)
+#define		CNT_GDM_AF		(1<<29)
+#define		PSE_P2_FC		(1<<26)
+#define		GDM_CRC_DROP		(1<<25)
+#define		PSE_BUF_DROP		(1<<24)
+#define		GDM_OTHER_DROP		(1<<23)
+#define		PSE_P1_FC		(1<<22)
+#define		PSE_P0_FC		(1<<21)
+#define		PSE_FQ_EMPTY		(1<<20)
+#define		INT_TX_COHERENT		(1<<17)
+#define		INT_RX_COHERENT		(1<<16)
+#define		INT_TXQ3_DONE		(1<<11)
+#define		INT_TXQ2_DONE		(1<<10)
+#define		INT_TXQ1_DONE		(1<<9)
+#define		INT_TXQ0_DONE		(1<<8)
+#define		INT_RX_DONE			(1<<2)
+#define		TX_DLY_INT		(1<<1) /* TXQ[0|1]_DONE with delay */
+#define		RX_DLY_INT		(1<<0) /* RX_DONE with delay */
+#define FE_INT_ENABLE	0x14
+#define MDIO_CFG2	0x18
+#define FOE_TS_T	0x1c
+#define		PSE_FQ_PCNT_MASK	0xff000000
+#define		PSE_FQ_PCNT_SHIFT	24
+#define		FOE_TS_TIMESTAMP_MASK	0x0000ffff
+#define		FOE_TS_TIMESTAMP_SHIFT	0
+
+#define GDMA1_BASE 0x0020
+#define GDMA2_BASE 0x0060
+#define CDMA_BASE  0x0080
+
+#define GDMA_FWD_CFG	0x00	/* Only GDMA */
+#define 	GDM_DROP_256B		(1<<23)
+#define 	GDM_ICS_EN		(1<<22)
+#define 	GDM_TCS_EN		(1<<21)
+#define 	GDM_UCS_EN		(1<<20)
+#define 	GDM_DISPAD		(1<<18)
+#define 	GDM_DISCRC		(1<<17)
+#define 	GDM_STRPCRC		(1<<16)
+#define 	GDM_UFRC_P_SHIFT	12
+#define 	GDM_BFRC_P_SHIFT	8
+#define 	GDM_MFRC_P_SHIFT	4
+#define 	GDM_OFRC_P_SHIFT	0
+#define 	GDM_XFRC_P_MASK		0x07
+#define 	GDM_DST_PORT_CPU	0
+#define 	GDM_DST_PORT_GDMA1	1
+#define 	GDM_DST_PORT_GDMA2	2
+#define 	GDM_DST_PORT_PPE	6
+#define 	GDM_DST_PORT_DISCARD	7
+
+#define CDMA_CSG_CFG	0x00	/* Only CDMA */
+#define 	INS_VLAN_TAG		(0x8100<<16)
+#define 	ICS_GEN_EN		(1<<2)
+#define 	TCS_GEN_EN		(1<<1)
+#define 	UCS_GEN_EN		(1<<0)
+
+#define GDMA_SCH_CFG	0x04
+#define 	GDM1_SCH_MOD_MASK	0x03000000
+#define 	GDM1_SCH_MOD_SHIFT	24
+#define 	GDM1_SCH_MOD_WRR	0
+#define 	GDM1_SCH_MOD_STRICT	1
+#define 	GDM1_SCH_MOD_MIXED	2
+#define 	GDM1_WT_1	0
+#define 	GDM1_WT_2	1
+#define 	GDM1_WT_4	2
+#define 	GDM1_WT_8	3
+#define 	GDM1_WT_16	4
+#define 	GDM1_WT_Q3_SHIFT	12
+#define 	GDM1_WT_Q2_SHIFT	8
+#define 	GDM1_WT_Q1_SHIFT	4
+#define 	GDM1_WT_Q0_SHIFT	0
+
+#define GDMA_SHPR_CFG	0x08
+#define 	GDM1_SHPR_EN	(1<<24)
+#define 	GDM1_BK_SIZE_MASK	0x00ff0000 /* Bucket size 1kB units */
+#define 	GDM1_BK_SIZE_SHIFT	16
+#define 	GDM1_TK_RATE_MASK	0x00003fff /* Shaper token rate 8B/ms */
+#define 	GDM1_TK_RATE_SHIFT	0
+
+#define GDMA_MAC_ADRL	 0x0C
+#define GDMA_MAC_ADRH	 0x10
+
+#define PPPOE_SID_0001		0x08 /* 0..15 SID0, 15..31 SID1 */
+#define PPPOE_SID_0203		0x0c
+#define PPPOE_SID_0405		0x10
+#define PPPOE_SID_0607		0x14
+#define PPPOE_SID_0809		0x18
+#define PPPOE_SID_1011		0x1c
+#define PPPOE_SID_1213		0x20
+#define PPPOE_SID_1415		0x24
+#define VLAN_ID_0001		0x28 /* 0..11 VID0, 15..26 VID1 */
+#define VLAN_ID_0203		0x2c
+#define VLAN_ID_0405		0x30
+#define VLAN_ID_0607		0x34
+#define VLAN_ID_0809		0x38
+#define VLAN_ID_1011		0x3c
+#define VLAN_ID_1213		0x40
+#define VLAN_ID_1415		0x44
+
+#define PSE_BASE 0x0040
+#define PSE_FQFC_CFG        0x00
+#define 	FQ_MAX_PCNT_MASK	0xff000000
+#define 	FQ_MAX_PCNT_SHIFT	24
+#define 	FQ_FC_RLS_MASK		0x00ff0000
+#define 	FQ_FC_RLS_SHIFT		16
+#define 	FQ_FC_ASRT_MASK		0x0000ff00
+#define 	FQ_FC_ASRT_SHIFT	8
+#define 	FQ_FC_DROP_MASK		0x000000ff
+#define 	FQ_FC_DROP_SHIFT	0
+
+#define CDMA_FC_CFG         0x04
+#define GDMA1_FC_CFG        0x08
+#define GDMA2_FC_CFG        0x0C
+#define 	P_SHARING		(1<<28)
+#define 	P_HQ_DEF_MASK		0x0f000000
+#define 	P_HQ_DEF_SHIFT		24
+#define 	P_HQ_RESV_MASK		0x00ff0000
+#define 	P_HQ_RESV_SHIFT	16
+#define 	P_LQ_RESV_MASK		0x0000ff00
+#define 	P_LQ_RESV_SHIFT	8
+#define 	P_IQ_ASRT_MASK		0x000000ff
+#define 	P_IQ_ASRT_SHIFT	0
+
+#define CDMA_OQ_STA         0x10
+#define GDMA1_OQ_STA        0x14
+#define GDMA2_OQ_STA        0x18
+#define 	P_OQ3_PCNT_MASK		0xff000000
+#define 	P_OQ3_PCNT_SHIFT	24
+#define 	P_OQ2_PCNT_MASK		0x00ff0000
+#define 	P_OQ2_PCNT_SHIFT	16
+#define 	P_OQ1_PCNT_MASK		0x0000ff00
+#define 	P_OQ1_PCNT_SHIFT	8
+#define 	P_OQ0_PCNT_MASK		0x000000ff
+#define 	P_OQ0_PCNT_SHIFT	0
+
+#define PSE_IQ_STA          0x1C
+#define 	P6_OQ0_PCNT_MASK	0xff000000
+#define 	P6_OQ0_PCNT_SHIFT	24
+#define 	P2_IQ_PCNT_MASK		0x00ff0000
+#define 	P2_IQ_PCNT_SHIFT	16
+#define 	P1_IQ_PCNT_MASK		0x0000ff00
+#define 	P1_IQ_PCNT_SHIFT	8
+#define 	P0_IQ_PCNT_MASK		0x000000ff
+#define 	P0_IQ_PCNT_SHIFT	0
+
+#define PDMA_BASE 0x0100
+#define PDMA_GLO_CFG        0x00
+#define 	FE_TX_WB_DDONE       (1<<6)
+#define 	FE_DMA_BT_SIZE4      (0<<4)
+#define 	FE_DMA_BT_SIZE8      (1<<4)
+#define 	FE_DMA_BT_SIZE16     (2<<4)
+#define 	FE_RX_DMA_BUSY       (1<<3)
+#define 	FE_RX_DMA_EN         (1<<2)
+#define 	FE_TX_DMA_BUSY       (1<<1)
+#define 	FE_TX_DMA_EN         (1<<0)
+#define PDMA_RST_IDX        0x04
+#define 	FE_RST_DRX_IDX0		(1<<16)
+#define 	FE_RST_DTX_IDX3		(1<<3)
+#define 	FE_RST_DTX_IDX2		(1<<2)
+#define 	FE_RST_DTX_IDX1		(1<<1)
+#define 	FE_RST_DTX_IDX0		(1<<0)
+
+#define PDMA_SCH_CFG        0x08
+#define DELAY_INT_CFG       0x0C
+#define 	TXDLY_INT_EN 		(1<<31)
+#define 	TXMAX_PINT_SHIFT	24
+#define 	TXMAX_PTIME_SHIFT	16
+#define 	RXDLY_INT_EN		(1<<15)
+#define 	RXMAX_PINT_SHIFT	8
+#define 	RXMAX_PTIME_SHIFT	0
+
+#define TX_BASE_PTR0        0x10
+#define TX_MAX_CNT0         0x14
+#define TX_CTX_IDX0         0x18
+#define TX_DTX_IDX0         0x1C
+
+#define TX_BASE_PTR1        0x20
+#define TX_MAX_CNT1         0x24
+#define TX_CTX_IDX1         0x28
+#define TX_DTX_IDX1         0x2C
+
+#define RX_BASE_PTR0        0x30
+#define RX_MAX_CNT0         0x34
+#define RX_CALC_IDX0        0x38
+#define RX_DRX_IDX0         0x3C
+
+#define TX_BASE_PTR2        0x40
+#define TX_MAX_CNT2         0x44
+#define TX_CTX_IDX2         0x48
+#define TX_DTX_IDX2         0x4C
+
+#define TX_BASE_PTR3        0x50
+#define TX_MAX_CNT3         0x54
+#define TX_CTX_IDX3         0x58
+#define TX_DTX_IDX3         0x5C
+
+#define TX_BASE_PTR(qid)		(((qid>1)?(0x20):(0x10)) + (qid) * 16)
+#define TX_MAX_CNT(qid)			(((qid>1)?(0x24):(0x14)) + (qid) * 16)
+#define TX_CTX_IDX(qid)			(((qid>1)?(0x28):(0x18)) + (qid) * 16)
+#define TX_DTX_IDX(qid)			(((qid>1)?(0x2c):(0x1c)) + (qid) * 16)
+
+#define PPE_BASE 0x0200
+
+#define CNTR_BASE 0x0400
+#define PPE_AC_BCNT0		0x000
+#define PPE_AC_PCNT0		0x004
+#define PPE_AC_BCNT63		0x1F8
+#define PPE_AC_PCNT63		0x1FC
+#define PPE_MTR_CNT0		0x200
+#define PPE_MTR_CNT63		0x2FC
+#define GDMA_TX_GBCNT0		0x300
+#define GDMA_TX_GPCNT0		0x304
+#define GDMA_TX_SKIPCNT0	0x308
+#define GDMA_TX_COLCNT0		0x30C
+#define GDMA_RX_GBCNT0		0x320
+#define GDMA_RX_GPCNT0		0x324
+#define GDMA_RX_OERCNT0		0x328
+#define GDMA_RX_FERCNT0		0x32C
+#define GDMA_RX_SHORT_ERCNT0	0x330
+#define GDMA_RX_LONG_ERCNT0	0x334
+#define GDMA_RX_CSUM_ERCNT0	0x338
+
+#define POLICYTABLE_BASE 	0x1000
+
+#endif /* __IF_RAFREG_H__ */

Property changes on: sys/dev/rt/if_rtreg.h
___________________________________________________________________
Added: svn:mime-type
   + text/plain
Added: svn:keywords
   + FreeBSD=%H
Added: svn:eol-style
   + native


--Multipart=_Thu__30_Jun_2011_15_22_49_+0300_Nm_S4zP.J.=dEWOr--



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20110630152249.f369822f.ray>