From owner-p4-projects@FreeBSD.ORG Thu Apr 19 05:38:41 2007 Return-Path: X-Original-To: p4-projects@freebsd.org Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id A8E4716A411; Thu, 19 Apr 2007 05:38:40 +0000 (UTC) X-Original-To: perforce@freebsd.org Delivered-To: perforce@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 75C9616A40F for ; Thu, 19 Apr 2007 05:38:40 +0000 (UTC) (envelope-from kmacy@freebsd.org) Received: from repoman.freebsd.org (repoman.freebsd.org [69.147.83.41]) by mx1.freebsd.org (Postfix) with ESMTP id 658BB13C4CA for ; Thu, 19 Apr 2007 05:38:40 +0000 (UTC) (envelope-from kmacy@freebsd.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.13.8/8.13.8) with ESMTP id l3J5cejg002172 for ; Thu, 19 Apr 2007 05:38:40 GMT (envelope-from kmacy@freebsd.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.13.8/8.13.8/Submit) id l3J5cd2g002169 for perforce@freebsd.org; Thu, 19 Apr 2007 05:38:39 GMT (envelope-from kmacy@freebsd.org) Date: Thu, 19 Apr 2007 05:38:39 GMT Message-Id: <200704190538.l3J5cd2g002169@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to kmacy@freebsd.org using -f From: Kip Macy To: Perforce Change Reviews Cc: Subject: PERFORCE change 118391 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 19 Apr 2007 05:38:41 -0000 http://perforce.freebsd.org/chv.cgi?CH=118391 Change 118391 by kmacy@kmacy_vt-x:opentoe_init on 2007/04/19 05:38:09 make cxgb_offload compile Affected files ... .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_adapter.h#7 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_l2t.h#2 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_offload.c#3 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_offload.h#3 edit .. //depot/projects/opentoe/sys/dev/cxgb/cxgb_toedev.h#3 edit .. //depot/projects/opentoe/sys/modules/cxgb/Makefile#4 edit Differences ... ==== //depot/projects/opentoe/sys/dev/cxgb/cxgb_adapter.h#7 (text+ko) ==== @@ -59,6 +59,7 @@ #include #include +#include struct adapter; struct sge_qset; extern int cxgb_debug; @@ -240,6 +241,7 @@ struct adapter { device_t dev; int flags; + TAILQ_ENTRY(adapter) adapter_entry; /* PCI register resources */ uint32_t regs_rid; @@ -293,7 +295,7 @@ struct port_info port[MAX_NPORTS]; device_t portdev[MAX_NPORTS]; - struct t3cdev tdev; + struct toedev tdev; char fw_version[64]; uint32_t open_device_map; struct mtx lock; @@ -384,6 +386,7 @@ void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed, int duplex, int fc); void t3_sge_err_intr_handler(adapter_t *adapter); +int t3_offload_tx(struct toedev *, struct mbuf *); void t3_os_ext_intr_handler(adapter_t *adapter); void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]); int t3_mgmt_tx(adapter_t *adap, struct mbuf *m); @@ -437,6 +440,19 @@ return container_of(q, struct sge_qset, txq[qidx]); } +static __inline struct adapter * +tdev2adap(struct toedev *d) +{ + return container_of(d, struct adapter, tdev); +} + #undef container_of +#define OFFLOAD_DEVMAP_BIT 15 +static inline int offload_running(adapter_t *adapter) +{ + return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT); +} + + #endif ==== //depot/projects/opentoe/sys/dev/cxgb/cxgb_l2t.h#2 (text+ko) ==== @@ -1,42 +1,8 @@ -/* - * Copyright (c) 2006 Chelsio, Inc. All rights reserved. - * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ #ifndef _CHELSIO_L2T_H #define _CHELSIO_L2T_H -#include -#include -#include "t3cdev.h" -#include +#include +#include enum { L2T_STATE_VALID, /* entry is up to date */ @@ -45,9 +11,6 @@ L2T_STATE_UNUSED /* entry not in use */ }; -struct neighbour; -struct sk_buff; - /* * Each L2T entry plays multiple roles. First of all, it keeps state for the * corresponding entry of the HW L2 table and maintains a queue of offload @@ -57,24 +20,24 @@ * first element in its chain through its first pointer. */ struct l2t_entry { - u16 state; /* entry state */ - u16 idx; /* entry index */ - u32 addr; /* dest IP address */ + uint16_t state; /* entry state */ + uint16_t idx; /* entry index */ + uint32_t addr; /* dest IP address */ int ifindex; /* neighbor's net_device's ifindex */ - u16 smt_idx; /* SMT index */ - u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ - struct neighbour *neigh; /* associated neighbour */ + uint16_t smt_idx; /* SMT index */ + uint16_t vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ + struct ifnet *ifp; /* associated neighbour */ struct l2t_entry *first; /* start of hash chain */ struct l2t_entry *next; /* next l2t_entry on chain */ - struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ - struct sk_buff *arpq_tail; - spinlock_t lock; - atomic_t refcnt; /* entry reference count */ - u8 dmac[6]; /* neighbour's MAC address */ + struct mbuf *arpq_head; /* queue of packets awaiting resolution */ + struct mbuf *arpq_tail; + struct mtx lock; + volatile uint32_t refcnt; /* entry reference count */ + uint8_t dmac[6]; /* neighbour's MAC address */ #ifndef NETEVENT #ifdef CONFIG_CHELSIO_T3_MODULE struct timer_list update_timer; - struct t3cdev *tdev; + struct toedev *tdev; #endif #endif }; @@ -82,41 +45,45 @@ struct l2t_data { unsigned int nentries; /* number of entries */ struct l2t_entry *rover; /* starting point for next allocation */ - atomic_t nfree; /* number of free entries */ - rwlock_t lock; + volatile uint32_t nfree; /* number of free entries */ + struct rwlock lock; struct l2t_entry l2tab[0]; }; -typedef void (*arp_failure_handler_func)(struct t3cdev *dev, - struct sk_buff *skb); +typedef void (*arp_failure_handler_func)(struct toedev *dev, + struct mbuf *m); /* * Callback stored in an skb to handle address resolution failure. */ -struct l2t_skb_cb { +struct l2t_mbuf_cb { arp_failure_handler_func arp_failure_handler; }; -#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) +/* + * XXX + */ +#define L2T_MBUF_CB(skb) ((struct l2t_mbuf_cb *)(skb)->cb) -static inline void set_arp_failure_handler(struct sk_buff *skb, +#ifdef notyet +static inline void set_arp_failure_handler(struct mbuf *m, arp_failure_handler_func hnd) { L2T_SKB_CB(skb)->arp_failure_handler = hnd; } - +#endif /* * Getting to the L2 data from an offload device. */ #define L2DATA(dev) ((dev)->l2opt) void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e); -void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh); -struct l2t_entry *t3_l2t_get(struct t3cdev *dev, struct neighbour *neigh, +void t3_l2t_update(struct toedev *dev, struct ifnet *ifp); +struct l2t_entry *t3_l2t_get(struct toedev *dev, struct ifnet *neigh, unsigned int smt_idx); -int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, +int t3_l2t_send_slow(struct toedev *dev, struct mbuf *m, struct l2t_entry *e); -void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); +void t3_l2t_send_event(struct toedev *dev, struct l2t_entry *e); struct l2t_data *t3_init_l2t(unsigned int l2t_capacity); void t3_free_l2t(struct l2t_data *d); @@ -128,26 +95,26 @@ #define l2t_proc_free(dir) #endif -int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb); +int cxgb_ofld_send(struct toedev *dev, struct mbuf *m); -static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, +static inline int l2t_send(struct toedev *dev, struct mbuf *m, struct l2t_entry *e) { - if (likely(e->state == L2T_STATE_VALID)) - return cxgb3_ofld_send(dev, skb); - return t3_l2t_send_slow(dev, skb, e); + if (__predict_true(e->state == L2T_STATE_VALID)) + return cxgb_ofld_send(dev, m); + return t3_l2t_send_slow(dev, m, e); } static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) { - if (atomic_dec_and_test(&e->refcnt)) + if (atomic_fetchadd_int(&e->refcnt, -1) == 1) t3_l2e_free(d, e); } static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) { - if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ - atomic_dec(&d->nfree); + if (atomic_fetchadd_int(&e->refcnt, 1) == 1) /* 0 -> 1 transition */ + atomic_add_int(&d->nfree, 1); } #endif ==== //depot/projects/opentoe/sys/dev/cxgb/cxgb_offload.c#3 (text+ko) ==== @@ -64,12 +64,24 @@ #include #include #include +#include #include +#include +#include -static LIST_HEAD(client_list); -static LIST_HEAD(ofld_dev_list); -static LIST_HEAD(adapter_list); +/* + * XXX + */ +#define LOG_ERR 1 +#define LOG_NOTICE 2 +#define BUG_ON(...) +#define VALIDATE_TID 0 + + +TAILQ_HEAD(, cxgb_client) client_list; +TAILQ_HEAD(, toedev) ofld_dev_list; +TAILQ_HEAD(, adapter) adapter_list; static struct mtx cxgb_db_lock; static struct rwlock adapter_list_lock; @@ -84,7 +96,7 @@ { struct adapter *adapter = tdev2adap(tdev); - return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)); + return (isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT)); } /** @@ -184,7 +196,7 @@ TAILQ_FOREACH(adapter, &adapter_list, adapter_entry) { for_each_port(adapter, port) { - if (dev == adapter->port[port].dev) { + if (ifp == adapter->port[port].ifp) { rw_runlock(&adapter_list_lock); return 1; } @@ -197,14 +209,15 @@ static struct net_device * get_iff_from_mac(adapter_t *adapter, const uint8_t *mac, unsigned int vlan) { +#ifdef notyet int i; for_each_port(adapter, i) { const struct vlan_group *grp; const struct port_info *p = &adapter->port[i]; - struct net_device *dev = p->dev; + struct ifnet *ifnet = p->ifp; - if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { + if (!memcmp(port->hw_addr, mac, ETH_ALEN)) { if (vlan && vlan != VLAN_VID_MASK) { grp = p->vlan_grp; dev = grp ? grp->vlan_devices[vlan] : NULL; @@ -214,6 +227,7 @@ return dev; } } +#endif return NULL; } @@ -221,9 +235,9 @@ failover_fixup(adapter_t *adapter, int port) { if (adapter->params.rev == 0) { - struct net_device *dev = adapter->port[port].dev; + struct ifnet *ifp = adapter->port[port].ifp; struct cmac *mac = &adapter->port[port].mac; - if (!(dev->flags & IFF_UP)) { + if (!(ifp->if_flags & IFF_UP)) { /* Failover triggered by the interface ifdown */ t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset, F_TXEN); @@ -246,7 +260,6 @@ switch (req) { case ULP_ISCSI_GET_PARAMS: - uiip->pdev = adapter->pdev; uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); @@ -258,11 +271,11 @@ t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); /* on rx, the iscsi pdu has to be < rx page size and the whole pdu + cpl headers has to fit into one sge buffer */ - uiip->max_rxsz = min_t(unsigned int, - adapter->params.tp.rx_pg_size, - (adapter->sge.qs[0].fl[1].buf_size - - sizeof(struct cpl_rx_data) * 2 - - sizeof(struct cpl_rx_data_ddp)) ); + uiip->max_rxsz = + (unsigned int)min(adapter->params.tp.rx_pg_size, + (adapter->sge.qs[0].fl[1].buf_size - + sizeof(struct cpl_rx_data) * 2 - + sizeof(struct cpl_rx_data_ddp)) ); break; case ULP_ISCSI_SET_PARAMS: t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); @@ -283,30 +296,32 @@ switch (req) { case RDMA_GET_PARAMS: { + struct rdma_info *req = data; - struct pci_dev *pdev = adapter->pdev; +#ifdef notyet req->udbell_physbase = pci_resource_start(pdev, 2); req->udbell_len = pci_resource_len(pdev, 2); +#endif req->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); req->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); +#ifdef notyet req->kdb_addr = adapter->regs + A_SG_KDOORBELL; - req->pdev = pdev; +#endif break; } case RDMA_CQ_OP: { - unsigned long flags; struct rdma_cq_op *req = data; /* may be called in any context */ - spin_lock_irqsave(&adapter->sge.reg_lock, flags); + mtx_lock(&adapter->sge.reg_lock); ret = t3_sge_cqcntxt_op(adapter, req->id, req->op, req->credits); - spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); + mtx_unlock(&adapter->sge.reg_lock); break; } case RDMA_GET_MEM: { @@ -332,28 +347,28 @@ case RDMA_CQ_SETUP: { struct rdma_cq_setup *req = data; - spin_lock_irq(&adapter->sge.reg_lock); + mtx_lock(&adapter->sge.reg_lock); ret = t3_sge_init_cqcntxt(adapter, req->id, req->base_addr, req->size, ASYNC_NOTIF_RSPQ, req->ovfl_mode, req->credits, req->credit_thres); - spin_unlock_irq(&adapter->sge.reg_lock); + mtx_unlock(&adapter->sge.reg_lock); break; } case RDMA_CQ_DISABLE: - spin_lock_irq(&adapter->sge.reg_lock); + mtx_lock(&adapter->sge.reg_lock); ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); - spin_unlock_irq(&adapter->sge.reg_lock); + mtx_unlock(&adapter->sge.reg_lock); break; case RDMA_CTRL_QP_SETUP: { struct rdma_ctrlqp_setup *req = data; - spin_lock_irq(&adapter->sge.reg_lock); + mtx_lock(&adapter->sge.reg_lock); ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, SGE_CNTXT_RDMA, ASYNC_NOTIF_RSPQ, req->base_addr, req->size, FW_RI_TID_START, 1, 0); - spin_unlock_irq(&adapter->sge.reg_lock); + mtx_unlock(&adapter->sge.reg_lock); break; } default: @@ -409,20 +424,19 @@ case GET_IFF_FROM_MAC: iffmacp = data; iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, - iffmacp->vlan_tag & VLAN_VID_MASK); + iffmacp->vlan_tag & EVL_VLID_MASK); break; case GET_DDP_PARAMS: ddpp = data; ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); - ddpp->pdev = adapter->pdev; break; case GET_PORTS: ports = data; ports->nports = adapter->params.nports; for_each_port(adapter, port) - ports->lldevs[port] = adapter->port[port].dev; + ports->lldevs[port] = adapter->port[port].ifp; break; case FAILOVER: port = *(int *)data; @@ -465,14 +479,14 @@ rx_offload_blackhole(struct toedev *dev, struct mbuf **m, int n) { CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n", - n, ntohl(*(u32 *)m[0]->data)); + n, ntohl(*mtod(m[0], uint32_t *))); while (n--) m_freem(m[n]); return 0; } static void -dummy_neigh_update(struct toedev *dev, struct neighbour *neigh) +dummy_neigh_update(struct toedev *dev, struct ifnet *neigh) { } @@ -489,9 +503,9 @@ void * cxgb_free_atid(struct toedev *tdev, int atid) { - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + struct tid_info *t = &(TOE_DATA(tdev))->tid_maps; union active_open_entry *p = atid2entry(t, atid); - void *ctx = p->t3c_tid.ctx; + void *ctx = p->toe_tid.ctx; mtx_lock(&t->atid_lock); p->next = t->afree; @@ -508,7 +522,7 @@ void cxgb_free_stid(struct toedev *tdev, int stid) { - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + struct tid_info *t = &(TOE_DATA(tdev))->tid_maps; union listen_entry *p = stid2entry(t, stid); mtx_lock(&t->stid_lock); @@ -522,45 +536,44 @@ cxgb_insert_tid(struct toedev *tdev, struct cxgb_client *client, void *ctx, unsigned int tid) { - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + struct tid_info *t = &(TOE_DATA(tdev))->tid_maps; t->tid_tab[tid].client = client; t->tid_tab[tid].ctx = ctx; - atomic_inc(&t->tids_in_use); + atomic_add_int(&t->tids_in_use, 1); } /* - * Populate a TID_RELEASE WR. The skb must be already propely sized. + * Populate a TID_RELEASE WR. The mbuf must be already propely sized. */ static inline void mk_tid_release(struct mbuf *m, unsigned int tid) { struct cpl_tid_release *req; - +#if 0 skb->priority = CPL_PRIORITY_SETUP; - req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); +#endif + req = mtod(m, struct cpl_tid_release *); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); } static void -t3_process_tid_release_list(void *data) +t3_process_tid_release_list(void *data, int pending) { struct mbuf *m; struct toedev *tdev = data; - struct t3c_data *td = T3C_DATA(tdev); + struct toe_data *td = TOE_DATA(tdev); mtx_lock(&td->tid_release_lock); while (td->tid_release_list) { - struct t3c_tid_entry *p = td->tid_release_list; + struct toe_tid_entry *p = td->tid_release_list; - td->tid_release_list = (struct t3c_tid_entry *)p->ctx; + td->tid_release_list = (struct toe_tid_entry *)p->ctx; mtx_unlock(&td->tid_release_lock); - - skb = alloc_skb(sizeof(struct cpl_tid_release), - GFP_KERNEL | __GFP_NOFAIL); - mk_tid_release(skb, p - td->tid_maps.tid_tab); - cxgb_ofld_send(tdev, skb); + m = m_get(M_WAIT, MT_DATA); + mk_tid_release(m, p - td->tid_maps.tid_tab); + cxgb_ofld_send(tdev, m); p->ctx = NULL; mtx_lock(&td->tid_release_lock); } @@ -571,14 +584,16 @@ void cxgb_queue_tid_release(struct toedev *tdev, unsigned int tid) { - struct t3c_data *td = T3C_DATA(tdev); - struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; + struct toe_data *td = TOE_DATA(tdev); + struct toe_tid_entry *p = &td->tid_maps.tid_tab[tid]; mtx_lock(&td->tid_release_lock); p->ctx = (void *)td->tid_release_list; td->tid_release_list = p; +#if 0 if (!p->ctx) schedule_work(&td->tid_release_task); +#endif mtx_unlock(&td->tid_release_lock); } @@ -592,23 +607,23 @@ void cxgb_remove_tid(struct toedev *tdev, void *ctx, unsigned int tid) { - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + struct tid_info *t = &(TOE_DATA(tdev))->tid_maps; BUG_ON(tid >= t->ntids); if (tdev->type == T3A) - (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); + (void)atomic_cmpset_ptr(&t->tid_tab[tid].ctx, ctx, NULL); else { struct mbuf *m; - skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); - if (likely(skb)) { - mk_tid_release(skb, tid); - cxgb_ofld_send(tdev, skb); + m = m_get(M_NOWAIT, MT_DATA); + if (__predict_true(m != NULL)) { + mk_tid_release(m, tid); + cxgb_ofld_send(tdev, m); t->tid_tab[tid].ctx = NULL; } else cxgb_queue_tid_release(tdev, tid); } - atomic_dec(&t->tids_in_use); + atomic_add_int(&t->tids_in_use, -1); } int @@ -616,7 +631,7 @@ void *ctx) { int atid = -1; - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + struct tid_info *t = &(TOE_DATA(tdev))->tid_maps; mtx_lock(&t->atid_lock); if (t->afree) { @@ -624,8 +639,8 @@ atid = (p - t->atid_tab) + t->atid_base; t->afree = p->next; - p->t3c_tid.ctx = ctx; - p->t3c_tid.client = client; + p->toe_tid.ctx = ctx; + p->toe_tid.client = client; t->atids_in_use++; } mtx_unlock(&t->atid_lock); @@ -637,7 +652,7 @@ void *ctx) { int stid = -1; - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + struct tid_info *t = &(TOE_DATA(tdev))->tid_maps; mtx_lock(&t->stid_lock); if (t->sfree) { @@ -645,8 +660,8 @@ stid = (p - t->stid_tab) + t->stid_base; t->sfree = p->next; - p->t3c_tid.ctx = ctx; - p->t3c_tid.client = client; + p->toe_tid.ctx = ctx; + p->toe_tid.client = client; t->stids_in_use++; } mtx_unlock(&t->stid_lock); @@ -656,7 +671,7 @@ static int do_smt_write_rpl(struct toedev *dev, struct mbuf *m) { - struct cpl_smt_write_rpl *rpl = cplhdr(skb); + struct cpl_smt_write_rpl *rpl = cplhdr(m); if (rpl->status != CPL_ERR_NONE) log(LOG_ERR, @@ -669,7 +684,7 @@ static int do_l2t_write_rpl(struct toedev *dev, struct mbuf *m) { - struct cpl_l2t_write_rpl *rpl = cplhdr(skb); + struct cpl_l2t_write_rpl *rpl = cplhdr(m); if (rpl->status != CPL_ERR_NONE) log(LOG_ERR, @@ -682,15 +697,15 @@ static int do_act_open_rpl(struct toedev *dev, struct mbuf *m) { - struct cpl_act_open_rpl *rpl = cplhdr(skb); + struct cpl_act_open_rpl *rpl = cplhdr(m); unsigned int atid = G_TID(ntohl(rpl->atid)); - struct t3c_tid_entry *t3c_tid; + struct toe_tid_entry *toe_tid; - t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); - if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers && - t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { - return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, - t3c_tid->ctx); + toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid); + if (toe_tid->ctx && toe_tid->client && toe_tid->client->handlers && + toe_tid->client->handlers[CPL_ACT_OPEN_RPL]) { + return toe_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, m, + toe_tid->ctx); } else { log(LOG_ERR, "%s: received clientless CPL command 0x%x\n", dev->name, CPL_ACT_OPEN_RPL); @@ -701,14 +716,14 @@ static int do_stid_rpl(struct toedev *dev, struct mbuf *m) { - union opcode_tid *p = cplhdr(skb); + union opcode_tid *p = cplhdr(m); unsigned int stid = G_TID(ntohl(p->opcode_tid)); - struct t3c_tid_entry *t3c_tid; + struct toe_tid_entry *toe_tid; - t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); - if (t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[p->opcode]) { - return t3c_tid->client->handlers[p->opcode] (dev, skb, t3c_tid->ctx); + toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid); + if (toe_tid->ctx && toe_tid->client->handlers && + toe_tid->client->handlers[p->opcode]) { + return toe_tid->client->handlers[p->opcode] (dev, m, toe_tid->ctx); } else { log(LOG_ERR, "%s: received clientless CPL command 0x%x\n", dev->name, p->opcode); @@ -719,15 +734,15 @@ static int do_hwtid_rpl(struct toedev *dev, struct mbuf *m) { - union opcode_tid *p = cplhdr(skb); + union opcode_tid *p = cplhdr(m); unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); - struct t3c_tid_entry *t3c_tid; + struct toe_tid_entry *toe_tid; - t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); - if (t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[p->opcode]) { - return t3c_tid->client->handlers[p->opcode] - (dev, skb, t3c_tid->ctx); + toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid); + if (toe_tid->ctx && toe_tid->client->handlers && + toe_tid->client->handlers[p->opcode]) { + return toe_tid->client->handlers[p->opcode] + (dev, m, toe_tid->ctx); } else { log(LOG_ERR, "%s: received clientless CPL command 0x%x\n", dev->name, p->opcode); @@ -738,15 +753,15 @@ static int do_cr(struct toedev *dev, struct mbuf *m) { - struct cpl_pass_accept_req *req = cplhdr(skb); + struct cpl_pass_accept_req *req = cplhdr(m); unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); - struct t3c_tid_entry *t3c_tid; + struct toe_tid_entry *toe_tid; - t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); - if (t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { - return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] - (dev, skb, t3c_tid->ctx); + toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid); + if (toe_tid->ctx && toe_tid->client->handlers && + toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { + return toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ] + (dev, m, toe_tid->ctx); } else { log(LOG_ERR, "%s: received clientless CPL command 0x%x\n", dev->name, CPL_PASS_ACCEPT_REQ); @@ -757,35 +772,36 @@ static int do_abort_req_rss(struct toedev *dev, struct mbuf *m) { - union opcode_tid *p = cplhdr(skb); + union opcode_tid *p = cplhdr(m); unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); - struct t3c_tid_entry *t3c_tid; + struct toe_tid_entry *toe_tid; - t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); - if (t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[p->opcode]) { - return t3c_tid->client->handlers[p->opcode] - (dev, skb, t3c_tid->ctx); + toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid); + if (toe_tid->ctx && toe_tid->client->handlers && + toe_tid->client->handlers[p->opcode]) { + return toe_tid->client->handlers[p->opcode] + (dev, m, toe_tid->ctx); } else { - struct cpl_abort_req_rss *req = cplhdr(skb); + struct cpl_abort_req_rss *req = cplhdr(m); struct cpl_abort_rpl *rpl; - struct mbuf *m = - alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC); - if (!skb) { - log(LOG_NOTICE, "do_abort_req_rss: couldn't get skb!\n"); + struct mbuf *m = m_get(M_NOWAIT, MT_DATA); + if (!m) { + log(LOG_NOTICE, "do_abort_req_rss: couldn't get mbuf!\n"); goto out; } +#if 0 skb->priority = CPL_PRIORITY_DATA; __skb_put(skb, sizeof(struct cpl_abort_rpl)); - rpl = cplhdr(skb); +#endif + rpl = cplhdr(m); rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req))); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req))); rpl->cmd = req->status; - cxgb_ofld_send(dev, skb); + cxgb_ofld_send(dev, m); out: return CPL_RET_BUF_DONE; } @@ -794,15 +810,15 @@ static int do_act_establish(struct toedev *dev, struct mbuf *m) { - struct cpl_act_establish *req = cplhdr(skb); + struct cpl_act_establish *req = cplhdr(m); unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); - struct t3c_tid_entry *t3c_tid; + struct toe_tid_entry *toe_tid; - t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); - if (t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { - return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] - (dev, skb, t3c_tid->ctx); + toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid); + if (toe_tid->ctx && toe_tid->client->handlers && + toe_tid->client->handlers[CPL_ACT_ESTABLISH]) { + return toe_tid->client->handlers[CPL_ACT_ESTABLISH] + (dev, m, toe_tid->ctx); } else { log(LOG_ERR, "%s: received clientless CPL command 0x%x\n", dev->name, CPL_PASS_ACCEPT_REQ); @@ -813,7 +829,7 @@ static int do_set_tcb_rpl(struct toedev *dev, struct mbuf *m) { - struct cpl_set_tcb_rpl *rpl = cplhdr(skb); + struct cpl_set_tcb_rpl *rpl = cplhdr(m); if (rpl->status != CPL_ERR_NONE) log(LOG_ERR, @@ -825,44 +841,50 @@ static int do_trace(struct toedev *dev, struct mbuf *m) { - struct cpl_trace_pkt *p = cplhdr(skb); +#if 0 + struct cpl_trace_pkt *p = cplhdr(m); + skb->protocol = 0xffff; skb->dev = dev->lldev; skb_pull(skb, sizeof(*p)); - skb->mac.raw = skb->data; + skb->mac.raw = mtod(m, (char *)); netif_receive_skb(skb); +#endif return 0; } static int do_term(struct toedev *dev, struct mbuf *m) { +#if 0 unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff; - unsigned int opcode = G_OPCODE(ntohl(skb->csum)); - struct t3c_tid_entry *t3c_tid; + unsigned int opcode = G_OPCODE(ntohl(m->m_pkthdr.csum_data)); + struct toe_tid_entry *toe_tid; - t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); - if (t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[opcode]) { - return t3c_tid->client->handlers[opcode](dev,skb,t3c_tid->ctx); + toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid); + if (toe_tid->ctx && toe_tid->client->handlers && + toe_tid->client->handlers[opcode]) { + return toe_tid->client->handlers[opcode](dev, m, toe_tid->ctx); } else { log(LOG_ERR, "%s: received clientless CPL command 0x%x\n", dev->name, opcode); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; } +#endif + return (0); } -#if defined (CONFIG_CHELSIO_T3_MODULE) -#include -#include -#include -#include +#if defined(FOO) +#include +#include +#include +#include -static int (*orig_arp_constructor)(struct neighbour *); +static int (*orig_arp_constructor)(struct ifnet *); static void -neigh_suspect(struct neighbour *neigh) +neigh_suspect(struct ifnet *neigh) { struct hh_cache *hh; @@ -943,16 +965,19 @@ } else { neigh->nud_state = NUD_PROBE; neigh->updated = jiffies; - atomic_set(&neigh->probes, 0); + atomic_set_int(&neigh->probes, 0); next = now + neigh->parms->retrans_time; } } else { /* NUD_PROBE|NUD_INCOMPLETE */ next = now + neigh->parms->retrans_time; } - + /* + * Needed for read of probes + */ + mb(); if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && - atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { + neigh->probes >= neigh_max_probes(neigh)) { struct mbuf *m; neigh->nud_state = NUD_FAILED; @@ -983,14 +1008,12 @@ } if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { struct mbuf *m = skb_peek(&neigh->arp_queue); - /* keep skb alive even if arp_queue overflows */ - if (skb) - skb_get(skb); + write_unlock(&neigh->lock); neigh->ops->solicit(neigh, skb); - atomic_inc(&neigh->probes); - if (skb) - kfree_skb(skb); + atomic_add_int(&neigh->probes, 1); + if (m) + m_free(m); } else { out: write_unlock(&neigh->lock); @@ -1006,7 +1029,7 @@ static int arp_constructor_offload(struct neighbour *neigh) { - if (neigh->dev && is_offloading(neigh->dev)) + if (neigh->ifp && is_offloading(neigh->ifp)) neigh->timer.function = neigh_timer_handler_offload; return orig_arp_constructor(neigh); } @@ -1068,7 +1091,7 @@ static inline void restore_arp_sans_t3core(void) {} - +#endif #endif /* * Process a received packet with an unknown/unexpected CPL opcode. @@ -1077,7 +1100,7 @@ do_bad_cpl(struct toedev *dev, struct mbuf *m) { log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name, - *m->m_data); + *mtod(m, uint32_t *)); return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG); } @@ -1108,7 +1131,7 @@ { while (n--) { struct mbuf *m0 = *m++; - unsigned int opcode = G_OPCODE(ntohl(skb->csum)); + unsigned int opcode = G_OPCODE(ntohl(m0->m_pkthdr.csum_data)); int ret = cpl_handlers[opcode] (dev, m0); #if VALIDATE_TID @@ -1142,18 +1165,18 @@ void -cxgb_neigh_update(struct neighbour *neigh) +cxgb_neigh_update(struct ifnet *ifp) { - struct net_device *dev = neigh->dev; - if (dev && (is_offloading(dev))) { - struct toedev *tdev = TOEDEV(dev); + if (is_offloading(ifp)) { >>> TRUNCATED FOR MAIL (1000 lines) <<<