Date: Mon, 23 Apr 2007 04:51:30 GMT From: Kip Macy <kmacy@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 118631 for review Message-ID: <200704230451.l3N4pUxs030552@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=118631 Change 118631 by kmacy@kmacy_vt-x:opentoe_init on 2007/04/23 04:51:05 bring FreeBSD driver somewhat more inline with functionality in the Linux driver - add most of the missing ioctls - allocate interrupts when bringing up first port / de-allocate when taking down last - build l2t - add missing tunables Affected files ... .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_adapter.h#9 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_ioctl.h#3 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_l2t.c#3 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_l2t.h#3 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_main.c#6 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_offload.c#6 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_offload.h#6 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_osdep.h#7 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_sge.c#8 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_toedev.h#4 edit .. //depot/projects/opentoe/sys/modules/cxgb/Makefile#6 edit Differences ... ==== //depot/projects/opentoe/sys/dev/cxgb/cxgb_adapter.h#9 (text+ko) ==== @@ -141,14 +141,20 @@ uint32_t holdoff_tmr; uint32_t next_holdoff; uint32_t imm_data; + struct rsp_desc *desc; + uint32_t cntxt_id; + struct mtx lock; + struct mbuf *rx_head; /* offload packet receive queue head */ + struct mbuf *rx_tail; /* offload packet receive queue tail */ + + uint32_t offload_pkts; + uint32_t offload_bundles; uint32_t pure_rsps; - struct rsp_desc *desc; + bus_addr_t phys_addr; - uint32_t cntxt_id; bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; struct mbuf *m; - struct mtx lock; }; struct rx_desc; @@ -242,6 +248,7 @@ bus_space_tag_t bt; bus_size_t mmio_len; uint32_t link_width; + /* DMA resources */ bus_dma_tag_t parent_dmat; @@ -292,6 +299,8 @@ uint32_t open_device_map; uint32_t registered_device_map; struct mtx lock; + driver_intr_t *cxgb_intr; + int msi_count; }; struct t3_rx_mode { @@ -391,6 +400,7 @@ int, struct port_info *); void t3_free_sge_resources(adapter_t *); void t3_sge_start(adapter_t *); +void t3_sge_stop(adapter_t *); void t3b_intr(void *data); void t3_intr_msi(void *data); void t3_intr_msix(void *data); ==== //depot/projects/opentoe/sys/dev/cxgb/cxgb_ioctl.h#3 (text+ko) ==== @@ -68,7 +68,8 @@ CH_IFCONF_GETREGS, CH_GETMIIREGS, CH_SETMIIREGS, - + CH_SET_FILTER, + CH_SET_HW_SCHED, }; struct ch_reg { @@ -217,6 +218,7 @@ #define CHELSIO_SETREG _IOW('f', CH_SETREG, struct ch_reg) #define CHELSIO_GETREG _IOWR('f', CH_GETREG, struct ch_reg) +#define CHELSIO_READ_TCAM_WORD _IOR('f', CH_READ_TCAM_WORD, struct ch_tcam) #define CHELSIO_GET_MEM _IOWR('f', CH_GET_MEM, struct ch_mem_range) #define CHELSIO_GET_SGE_CONTEXT _IOWR('f', CH_GET_SGE_CONTEXT, struct ch_cntxt) #define CHELSIO_GET_SGE_DESC _IOWR('f', CH_GET_SGE_DESC, struct ch_desc) @@ -224,6 +226,8 @@ #define CHELSIO_SET_QSET_PARAMS _IOW('f', CH_SET_QSET_PARAMS, struct ch_qset_params) #define CHELSIO_GET_QSET_NUM _IOWR('f', CH_GET_QSET_NUM, struct ch_reg) #define CHELSIO_SET_QSET_NUM _IOW('f', CH_SET_QSET_NUM, struct ch_reg) +#define CHELSIO_GETMTUTAB _IOR('f', CH_GET_QSET_NUM, struct ch_mtus) +#define CHELSIO_SETMTUTAB _IOW('f', CH_SET_QSET_NUM, struct ch_mtus) #define CHELSIO_SET_TRACE_FILTER _IOW('f', CH_SET_TRACE_FILTER, struct ch_trace) @@ -231,4 +235,6 @@ #define CHELSIO_IFCONF_GETREGS _IOWR('f', CH_IFCONF_GETREGS, struct ifconf_regs) #define SIOCGMIIREG _IOWR('f', CH_GETMIIREGS, struct mii_data) #define SIOCSMIIREG _IOWR('f', CH_SETMIIREGS, struct mii_data) +#define CHELSIO_SET_HW_SCHED _IOWR('f', CH_SET_HW_SCHED, struct ch_hw_sched) +#define CHELSIO_DEVUP _IO('f', CH_DEVUP) #endif ==== //depot/projects/opentoe/sys/dev/cxgb/cxgb_l2t.c#3 (text+ko) ==== @@ -1,14 +1,62 @@ -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/if.h> -#include <linux/if_vlan.h> -#include <linux/jhash.h> -#include <net/neighbour.h> -#include "cxgb3_defs.h" -#include "l2t.h" -#include "t3_cpl.h" -#include "firmware_exports.h" -#include "cxgb3_compat.h" +/************************************************************************** + +Copyright (c) 2007, Chelsio Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Chelsio Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/kernel.h> +#include <sys/module.h> +#include <sys/bus.h> +#include <sys/lock.h> +#include <sys/mutex.h> +#include <sys/rwlock.h> + + + + +#include <dev/cxgb/common/cxgb_common.h> +#include <dev/cxgb/common/cxgb_regs.h> +#include <dev/cxgb/common/cxgb_sge_defs.h> +#include <dev/cxgb/common/cxgb_t3_cpl.h> +#include <dev/cxgb/common/cxgb_firmware_exports.h> +#include <dev/cxgb/common/jhash.h> +#include <dev/cxgb/cxgb_offload.h> + +#include <net/if_vlan_var.h> + +#define neigh_release(...) #define VLAN_NONE 0xfff @@ -26,146 +74,160 @@ * these perform lookups. */ -static inline unsigned int vlan_prio(const struct l2t_entry *e) +static inline unsigned int +vlan_prio(const struct l2t_entry *e) { return e->vlan >> 13; } -static inline unsigned int arp_hash(u32 key, int ifindex, - const struct l2t_data *d) +static inline unsigned int +arp_hash(u32 key, int ifindex, const struct l2t_data *d) { return jhash_2words(key, ifindex, 0) & (d->nentries - 1); } -static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n) +static inline void +neigh_replace(struct l2t_entry *e, struct ifnet *n) { +#if 0 neigh_hold(n); if (e->neigh) neigh_release(e->neigh); e->neigh = n; +#endif } - +#if 0 /* * Set up an L2T entry and send any packets waiting in the arp queue. The - * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the + * supplied mbuf is used for the CPL_L2T_WRITE_REQ. Must be called with the * entry locked. */ -static int setup_l2e_send_pending(struct toedev *dev, struct sk_buff *skb, - struct l2t_entry *e) +static int +setup_l2e_send_pending(struct toedev *dev, struct mbuf *m, + struct l2t_entry *e) { struct cpl_l2t_write_req *req; - if (!skb) { - skb = alloc_skb(sizeof(*req), GFP_ATOMIC); - if (!skb) - return -ENOMEM; + if (!m) { + if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL) + return (ENOMEM); } - - req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + /* + * XXX MH_ALIGN + */ + req = mtod(m, struct cpl_l2t_write_req *); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | - V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) | + V_L2T_W_VLAN(e->vlan & EVL_VLID_MASK) | V_L2T_W_PRIO(vlan_prio(e))); +#if 0 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); +#endif memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); - skb->priority = CPL_PRIORITY_CONTROL; - cxgb3_ofld_send(dev, skb); + m->m_priority = CPL_PRIORITY_CONTROL; + cxgb_ofld_send(dev, m); while (e->arpq_head) { - skb = e->arpq_head; - e->arpq_head = skb->next; - skb->next = NULL; - cxgb3_ofld_send(dev, skb); + m = e->arpq_head; + e->arpq_head = m->m_next; + m->m_next = NULL; + cxgb_ofld_send(dev, m); } e->arpq_tail = NULL; e->state = L2T_STATE_VALID; return 0; } - +#endif /* * Add a packet to the an L2T entry's queue of packets awaiting resolution. * Must be called with the entry's lock held. */ -static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) +static inline void +arpq_enqueue(struct l2t_entry *e, struct mbuf *m) { - skb->next = NULL; + m->m_next = NULL; if (e->arpq_head) - e->arpq_tail->next = skb; + e->arpq_tail->m_next = m; else - e->arpq_head = skb; - e->arpq_tail = skb; + e->arpq_head = m; + e->arpq_tail = m; } -int t3_l2t_send_slow(struct toedev *dev, struct sk_buff *skb, +int +t3_l2t_send_slow(struct toedev *dev, struct mbuf *m, struct l2t_entry *e) { again: switch (e->state) { case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ +#if 0 neigh_event_send(e->neigh, NULL); - spin_lock_bh(&e->lock); +#endif + mtx_lock(&e->lock); if (e->state == L2T_STATE_STALE) e->state = L2T_STATE_VALID; - spin_unlock_bh(&e->lock); + mtx_unlock(&e->lock); case L2T_STATE_VALID: /* fast-path, send the packet on */ - return cxgb3_ofld_send(dev, skb); + return cxgb_ofld_send(dev, m); case L2T_STATE_RESOLVING: - spin_lock_bh(&e->lock); + mtx_lock(&e->lock); if (e->state != L2T_STATE_RESOLVING) { // ARP already completed - spin_unlock_bh(&e->lock); + mtx_unlock(&e->lock); goto again; } - arpq_enqueue(e, skb); - spin_unlock_bh(&e->lock); + arpq_enqueue(e, m); + mtx_unlock(&e->lock); /* * Only the first packet added to the arpq should kick off - * resolution. However, because the alloc_skb below can fail, + * resolution. However, because the m_gethdr below can fail, * we allow each packet added to the arpq to retry resolution * as a way of recovering from transient memory exhaustion. * A better way would be to use a work request to retry L2T * entries when there's no memory. */ +#if 0 if (!neigh_event_send(e->neigh, NULL)) { - skb = alloc_skb(sizeof(struct cpl_l2t_write_req), - GFP_ATOMIC); - if (!skb) + if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL) break; - spin_lock_bh(&e->lock); + mtx_lock(&e->lock); if (e->arpq_head) - setup_l2e_send_pending(dev, skb, e); + setup_l2e_send_pending(dev, m, e); else /* we lost the race */ - __kfree_skb(skb); - spin_unlock_bh(&e->lock); + m_free(m); + mtx_unlock(&e->lock); } +#endif } return 0; } -EXPORT_SYMBOL(t3_l2t_send_slow); -void t3_l2t_send_event(struct toedev *dev, struct l2t_entry *e) +void +t3_l2t_send_event(struct toedev *dev, struct l2t_entry *e) { again: switch (e->state) { case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ +#if 0 neigh_event_send(e->neigh, NULL); - spin_lock_bh(&e->lock); +#endif + mtx_lock(&e->lock); if (e->state == L2T_STATE_STALE) { e->state = L2T_STATE_VALID; } - spin_unlock_bh(&e->lock); + mtx_unlock(&e->lock); return; case L2T_STATE_VALID: /* fast-path, send the packet on */ return; case L2T_STATE_RESOLVING: - spin_lock_bh(&e->lock); + mtx_lock(&e->lock); if (e->state != L2T_STATE_RESOLVING) { // ARP already completed - spin_unlock_bh(&e->lock); + mtx_unlock(&e->lock); goto again; } - spin_unlock_bh(&e->lock); + mtx_unlock(&e->lock); /* * Only the first packet added to the arpq should kick off @@ -175,31 +237,33 @@ * A better way would be to use a work request to retry L2T * entries when there's no memory. */ +#if 0 neigh_event_send(e->neigh, NULL); +#endif } return; } -EXPORT_SYMBOL(t3_l2t_send_event); - +#if 0 /* * Allocate a free L2T entry. Must be called with l2t_data.lock held. */ -static struct l2t_entry *alloc_l2e(struct l2t_data *d) +static struct l2t_entry * +alloc_l2e(struct l2t_data *d) { struct l2t_entry *end, *e, **p; - if (!atomic_read(&d->nfree)) + if (!atomic_load_acq_int(&d->nfree)) return NULL; /* there's definitely a free entry */ for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e) - if (atomic_read(&e->refcnt) == 0) + if (atomic_load_acq_int(&e->refcnt) == 0) goto found; - for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ; + for (e = &d->l2tab[1]; atomic_load_acq_int(&e->refcnt); ++e) ; found: d->rover = e + 1; - atomic_dec(&d->nfree); + atomic_add_int(&d->nfree, -1); /* * The entry we found may be an inactive entry that is @@ -217,7 +281,7 @@ } return e; } - +#endif /* * Called when an L2T entry has no more users. The entry is left in the hash * table since it is likely to be reused but we also bump nfree to indicate @@ -229,29 +293,31 @@ * drops to 0 we need to take the entry's lock to avoid races with a new * incarnation. */ -void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e) +void +t3_l2e_free(struct l2t_data *d, struct l2t_entry *e) { - spin_lock_bh(&e->lock); - if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + mtx_lock(&e->lock); + if (atomic_load_acq_int(&e->refcnt) == 0) { /* hasn't been recycled */ if (e->neigh) { neigh_release(e->neigh); e->neigh = NULL; } } - spin_unlock_bh(&e->lock); - atomic_inc(&d->nfree); + mtx_unlock(&e->lock); + atomic_add_int(&d->nfree, 1); } -EXPORT_SYMBOL(t3_l2e_free); /* * Update an L2T entry that was previously used for the same next hop as neigh. * Must be called with softirqs disabled. */ -static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) +static inline void +reuse_entry(struct l2t_entry *e, struct ifnet *neigh) { +#ifdef notyet unsigned int nud_state; - spin_lock(&e->lock); /* avoid race with t3_l2t_free */ + mtx_lock(&e->lock); /* avoid race with t3_l2t_free */ if (neigh != e->neigh) neigh_replace(e, neigh); @@ -263,24 +329,27 @@ e->state = L2T_STATE_VALID; else e->state = L2T_STATE_STALE; - spin_unlock(&e->lock); + mtx_unlock(&e->lock); +#endif } -struct l2t_entry *t3_l2t_get(struct toedev *dev, struct neighbour *neigh, +struct l2t_entry * +t3_l2t_get(struct toedev *dev, struct ifnet *neigh, unsigned int smt_idx) { +#if 0 struct l2t_entry *e; struct l2t_data *d = L2DATA(dev); u32 addr = *(u32 *) neigh->primary_key; - int ifidx = neigh->dev->ifindex; + int ifidx = neigh->if_index; int hash = arp_hash(addr, ifidx, d); - write_lock_bh(&d->lock); + rw_wlock(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (e->addr == addr && e->ifindex == ifidx && e->smt_idx == smt_idx) { l2t_hold(d, e); - if (atomic_read(&e->refcnt) == 1) + if (atomic_load_acq_int(&e->refcnt) == 1) reuse_entry(e, neigh); goto done; } @@ -288,26 +357,27 @@ /* Need to allocate a new entry */ e = alloc_l2e(d); if (e) { - spin_lock(&e->lock); /* avoid race with t3_l2t_free */ + mtx_lock(&e->lock); /* avoid race with t3_l2t_free */ e->next = d->l2tab[hash].first; d->l2tab[hash].first = e; e->state = L2T_STATE_RESOLVING; e->addr = addr; e->ifindex = ifidx; e->smt_idx = smt_idx; - atomic_set(&e->refcnt, 1); + atomic_store_rel_int(&e->refcnt, 1); neigh_replace(e, neigh); if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id; else e->vlan = VLAN_NONE; - spin_unlock(&e->lock); + mtx_unlock(&e->lock); } done: - write_unlock_bh(&d->lock); + rw_wunlock(&d->lock); return e; +#endif + return (NULL); } -EXPORT_SYMBOL(t3_l2t_get); /* * Called when address resolution fails for an L2T entry to handle packets @@ -317,46 +387,53 @@ * XXX: maybe we should abandon the latter behavior and just require a failure * handler. */ -static void handle_failed_resolution(struct toedev *dev, struct sk_buff *arpq) +#if 0 +static void +handle_failed_resolution(struct toedev *dev, struct mbuf *arpq) { + while (arpq) { - struct sk_buff *skb = arpq; - struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + struct mbuf *m = arpq; + struct l2t_mbuf_cb *cb = L2T_MBUF_CB(m); - arpq = skb->next; - skb->next = NULL; + arpq = m->m_next; + m->m_next = NULL; if (cb->arp_failure_handler) - cb->arp_failure_handler(dev, skb); + cb->arp_failure_handler(dev, m); else - cxgb3_ofld_send(dev, skb); + cxgb_ofld_send(dev, m); } + } +#endif #if defined(NETEVENT) || !defined(CONFIG_CHELSIO_T3_MODULE) /* * Called when the host's ARP layer makes a change to some entry that is * loaded into the HW L2 table. */ -void t3_l2t_update(struct toedev *dev, struct neighbour *neigh) +void +t3_l2t_update(struct toedev *dev, struct ifnet *neigh) { +#if 0 struct l2t_entry *e; - struct sk_buff *arpq = NULL; + struct mbuf *arpq = NULL; struct l2t_data *d = L2DATA(dev); u32 addr = *(u32 *) neigh->primary_key; int ifidx = neigh->dev->ifindex; int hash = arp_hash(addr, ifidx, d); - read_lock_bh(&d->lock); + rw_rlock(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (e->addr == addr && e->ifindex == ifidx) { - spin_lock(&e->lock); + mtx_lock(&e->lock); goto found; } - read_unlock_bh(&d->lock); + rw_runlock(&d->lock); return; found: - read_unlock(&d->lock); - if (atomic_read(&e->refcnt)) { + rw_runlock(&d->lock); + if (atomic_load_acq_int(&e->refcnt)) { if (neigh != e->neigh) neigh_replace(e, neigh); @@ -373,16 +450,18 @@ setup_l2e_send_pending(dev, NULL, e); } } - spin_unlock_bh(&e->lock); + mtx_unlock(&e->lock); if (arpq) handle_failed_resolution(dev, arpq); +#endif } #else /* * Called from a kprobe, interrupts are off. */ -void t3_l2t_update(struct toedev *dev, struct neighbour *neigh) +void +t3_l2t_update(struct toedev *dev, struct ifnet *neigh) { struct l2t_entry *e; struct l2t_data *d = L2DATA(dev); @@ -390,37 +469,38 @@ int ifidx = neigh->dev->ifindex; int hash = arp_hash(addr, ifidx, d); - read_lock(&d->lock); + rw_rlock(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (e->addr == addr && e->ifindex == ifidx) { - spin_lock(&e->lock); - if (atomic_read(&e->refcnt)) { + mtx_lock(&e->lock); + if (atomic_load_acq_int(&e->refcnt)) { if (neigh != e->neigh) neigh_replace(e, neigh); e->tdev = dev; mod_timer(&e->update_timer, jiffies + 1); } - spin_unlock(&e->lock); + mtx_unlock(&e->lock); break; } - read_unlock(&d->lock); + rw_runlock(&d->lock); } -static void update_timer_cb(unsigned long data) +static void +update_timer_cb(unsigned long data) { - struct sk_buff *arpq = NULL; + struct mbuf *arpq = NULL; struct l2t_entry *e = (struct l2t_entry *)data; - struct neighbour *neigh = e->neigh; + struct ifnet *neigh = e->neigh; struct toedev *dev = e->tdev; barrier(); - if (!atomic_read(&e->refcnt)) + if (!atomic_load_acq_int(&e->refcnt)) return; - read_lock(&neigh->lock); - spin_lock(&e->lock); + rw_rlock(&neigh->lock); + mtx_lock(&e->lock); - if (atomic_read(&e->refcnt)) { + if (atomic_load_acq_int(&e->refcnt)) { if (e->state == L2T_STATE_RESOLVING) { if (neigh->nud_state & NUD_FAILED) { arpq = e->arpq_head; @@ -434,15 +514,16 @@ setup_l2e_send_pending(dev, NULL, e); } } - spin_unlock(&e->lock); - read_unlock(&neigh->lock); + mtx_unlock(&e->lock); + rw_runlock(&neigh->lock); if (arpq) handle_failed_resolution(dev, arpq); } #endif -struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) +struct l2t_data * +t3_init_l2t(unsigned int l2t_capacity) { struct l2t_data *d; int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); @@ -453,14 +534,14 @@ d->nentries = l2t_capacity; d->rover = &d->l2tab[1]; /* entry 0 is not used */ - atomic_set(&d->nfree, l2t_capacity - 1); - rwlock_init(&d->lock); + atomic_store_rel_int(&d->nfree, l2t_capacity - 1); + rw_init(&d->lock, "L2T"); for (i = 0; i < l2t_capacity; ++i) { d->l2tab[i].idx = i; d->l2tab[i].state = L2T_STATE_UNUSED; - spin_lock_init(&d->l2tab[i].lock); - atomic_set(&d->l2tab[i].refcnt, 0); + mtx_init(&d->l2tab[i].lock, "L2TAB", NULL, MTX_DEF); + atomic_store_rel_int(&d->l2tab[i].refcnt, 0); #ifndef NETEVENT #ifdef CONFIG_CHELSIO_T3_MODULE setup_timer(&d->l2tab[i].update_timer, update_timer_cb, @@ -471,7 +552,8 @@ return d; } -void t3_free_l2t(struct l2t_data *d) +void +t3_free_l2t(struct l2t_data *d) { #ifndef NETEVENT #ifdef CONFIG_CHELSIO_T3_MODULE @@ -490,19 +572,22 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> -static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) +static inline void * +l2t_get_idx(struct seq_file *seq, loff_t pos) { struct l2t_data *d = seq->private; return pos >= d->nentries ? NULL : &d->l2tab[pos]; } -static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) +static void * +l2t_seq_start(struct seq_file *seq, loff_t *pos) { return *pos ? l2t_get_idx(seq, *pos) : SEQ_START_TOKEN; } -static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) +static void * +l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) { v = l2t_get_idx(seq, *pos + 1); if (v) @@ -510,11 +595,13 @@ return v; } -static void l2t_seq_stop(struct seq_file *seq, void *v) +static void +l2t_seq_stop(struct seq_file *seq, void *v) { } -static char l2e_state(const struct l2t_entry *e) +static char +l2e_state(const struct l2t_entry *e) { switch (e->state) { case L2T_STATE_VALID: return 'V'; /* valid, fast-path entry */ @@ -526,7 +613,8 @@ } } -static int l2t_seq_show(struct seq_file *seq, void *v) +static int +l2t_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Index IP address Ethernet address VLAN " @@ -535,70 +623,18 @@ char ip[20]; struct l2t_entry *e = v; - spin_lock_bh(&e->lock); + mtx_lock(&e->lock); sprintf(ip, "%u.%u.%u.%u", NIPQUAD(e->addr)); seq_printf(seq, "%-5u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d" " %3u %c %7u %4u %s\n", e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2], e->dmac[3], e->dmac[4], e->dmac[5], - e->vlan & VLAN_VID_MASK, vlan_prio(e), - l2e_state(e), atomic_read(&e->refcnt), e->smt_idx, + e->vlan & EVL_VLID_MASK, vlan_prio(e), + l2e_state(e), atomic_load_acq_int(&e->refcnt), e->smt_idx, e->neigh ? e->neigh->dev->name : ""); - spin_unlock_bh(&e->lock); + mtx_unlock(&e->lock); } return 0; } -static struct seq_operations l2t_seq_ops = { - .start = l2t_seq_start, - .next = l2t_seq_next, - .stop = l2t_seq_stop, - .show = l2t_seq_show -}; - -static int l2t_seq_open(struct inode *inode, struct file *file) -{ - int rc = seq_open(file, &l2t_seq_ops); - - if (!rc) { - struct proc_dir_entry *dp = PDE(inode); - struct seq_file *seq = file->private_data; - - seq->private = dp->data; - } - return rc; -} - -static struct file_operations l2t_seq_fops = { - .owner = THIS_MODULE, - .open = l2t_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * Create the proc entries for the L2 table under dir. - */ -int t3_l2t_proc_setup(struct proc_dir_entry *dir, struct l2t_data *d) -{ - struct proc_dir_entry *p; - - if (!dir) - return -EINVAL; - - p = create_proc_entry("l2t", S_IRUGO, dir); - if (!p) - return -ENOMEM; - - p->proc_fops = &l2t_seq_fops; - p->data = d; - return 0; -} - -void t3_l2t_proc_free(struct proc_dir_entry *dir) -{ - if (dir) - remove_proc_entry("l2t", dir); -} #endif ==== //depot/projects/opentoe/sys/dev/cxgb/cxgb_l2t.h#3 (text+ko) ==== @@ -26,7 +26,7 @@ int ifindex; /* neighbor's net_device's ifindex */ uint16_t smt_idx; /* SMT index */ uint16_t vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ - struct ifnet *ifp; /* associated neighbour */ + struct ifnet *neigh; /* associated neighbour */ struct l2t_entry *first; /* start of hash chain */ struct l2t_entry *next; /* next l2t_entry on chain */ struct mbuf *arpq_head; /* queue of packets awaiting resolution */ ==== //depot/projects/opentoe/sys/dev/cxgb/cxgb_main.c#6 (text+ko) ==== @@ -104,6 +104,7 @@ static int setup_sge_qsets(adapter_t *); static void cxgb_async_intr(void *); static void cxgb_ext_intr_handler(void *, int); +static void cxgb_down(struct adapter *sc); static void cxgb_tick(void *); static void setup_rss(adapter_t *sc); @@ -118,6 +119,10 @@ unsigned int end); static void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf); static int cxgb_get_regs_len(void); +static int offload_open(struct port_info *pi); +static int offload_close(struct toedev *tdev); + + static device_method_t cxgb_controller_methods[] = { DEVMETHOD(device_probe, cxgb_controller_probe), @@ -180,11 +185,28 @@ */ static int msi_allowed = 2; TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed); - SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters"); SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0, "MSI-X, MSI, INTx selector"); +/* + * The driver enables offload as a default. + * To disable it, use ofld_disable = 1. + */ +static int ofld_disable = 0; +TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable); +SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0, + "disable ULP offload"); + +/* + * The driver uses an auto-queue algorithm by default. + * To disable it and force a single queue-set per port, use singleq = 1. + */ +static int singleq = 0; +TUNABLE_INT("hw.cxgb.singleq", &singleq); +SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0, + "use a single queue-set per port"); + enum { MAX_TXQ_ENTRIES = 16384, MAX_CTRL_TXQ_ENTRIES = 1024, @@ -269,7 +291,7 @@ } static int -cxgb_fw_download(adapter_t *sc, device_t dev) +upgrade_fw(adapter_t *sc) { char buf[32]; #ifdef FIRMWARE_LATEST @@ -279,15 +301,14 @@ #endif int status; - snprintf(&buf[0], sizeof(buf), "t3fw%d%d", FW_VERSION_MAJOR, - FW_VERSION_MINOR); + snprintf(&buf[0], sizeof(buf), "t3fw%d%d%d", FW_VERSION_MAJOR, + FW_VERSION_MINOR, FW_VERSION_MICRO); fw = firmware_get(buf); - if (fw == NULL) { - device_printf(dev, "Could not find firmware image %s\n", buf); - return ENOENT; + device_printf(sc->dev, "Could not find firmware image %s\n", buf); + return (ENOENT); } status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize); @@ -297,7 +318,6 @@ return (status); } - static int cxgb_controller_attach(device_t dev) { @@ -305,13 +325,14 @@ device_t child; const struct adapter_info *ai; struct adapter *sc; - int i, reg, msi_needed, msi_count = 0, error = 0; + int i, reg, msi_needed, error = 0; uint32_t vers; int port_qsets = 1; sc = device_get_softc(dev); sc->dev = dev; - + sc->msi_count = 0; + /* find the PCIe link width and set max read request to 4KB*/ if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { uint16_t lnk, pectl; @@ -367,13 +388,14 @@ (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->msix_regs_rid, RF_ACTIVE)) != NULL) { - msi_needed = msi_count = SGE_MSIX_COUNT; + msi_needed = sc->msi_count = SGE_MSIX_COUNT; - if ((pci_alloc_msix(dev, &msi_count) != 0) || - (msi_count != msi_needed)) { - device_printf(dev, "msix allocation failed" >>> TRUNCATED FOR MAIL (1000 lines) <<<
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200704230451.l3N4pUxs030552>