Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 7 May 2007 06:35:46 GMT
From:      Kip Macy <kmacy@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 119402 for review
Message-ID:  <200705070635.l476ZkOX029690@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=119402

Change 119402 by kmacy@kmacy_vt-x:opentoe_init on 2007/05/07 06:34:54

	more incremental BSD-ification of TOE code

Affected files ...

.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_cpl_io.c#5 edit
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_ddp.c#4 edit
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_tom.c#4 edit
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_tom.h#4 edit
.. //depot/projects/opentoe/sys/modules/cxgb/t3_tom/Makefile#2 edit

Differences ...

==== //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_cpl_io.c#5 (text+ko) ====

@@ -144,7 +144,7 @@
 {
 #ifdef notyet
 	if (__predict_true(!skb_cloned(skb))) {
-		BUG_ON(skb->len < len);
+		PANIC_IF(skb->len < len);
 		__skb_trim(skb, len);
 		skb_get(skb);
 	} else {
@@ -299,7 +299,7 @@
 {
 #if 0
 	log("connection_done: TID: %u, state: %d, dead %d, refs %d\n",
-	       TID(sototcpcb(so)), sk->sk_state, sock_flag(sk, SOCK_DEAD),
+	    TID(sototcpcb(so)), sototcpcb(so)->t_state, sock_flag(sk, SOCK_DEAD),
 	       atomic_read(&sk->sk_refcnt));
 //	dump_stack();
 #endif
@@ -562,21 +562,21 @@
 			break;
 
 		__skb_unlink(skb, &sk->sk_write_queue);
-		skb->priority = mkprio(CPL_PRIORITY_DATA, sk);
-		skb->csum = wrs_needed;    /* remember this until the WR_ACK */
+		m->m_priority = mkprio(CPL_PRIORITY_DATA, so);
+		m->csum_data = wrs_needed;    /* remember this until the WR_ACK */
 		WR_AVAIL(so) -= wrs_needed;
 		WR_UNACKED(so) += wrs_needed;
-		enqueue_wr(tp, skb);
+		enqueue_wr(tp, m);
 
 		if (__predict_true(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_NEED_HDR)) {
 			len += ulp_extra_len(m);
-			make_tx_data_wr(sk, skb, len);
+			make_tx_data_wr(so, m, len);
 			tp->snd_nxt += len;
 			tp->lsndtime = tcp_time_stamp;
 #if defined(CONFIG_T3_ZCOPY_SENDMSG) || defined(CONFIG_T3_ZCOPY_SENDMSG_MODULE)
 			atomic_add(skb->len - sizeof (struct tx_data_wr),
 				   &d->tx_dma_pending);
-			skb->sk = sk;
+			m->m_pkthdr.priv = so;
 #endif
 			if ((req_completion && WR_UNACKED(so) == wrs_needed) ||
 			    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_COMPL) ||
@@ -595,7 +595,7 @@
 			sock_set_flag(sk, TX_WAIT_IDLE);
 		set_arp_failure_handler(skb, arp_failure_discard);
 
-		l2t_send(cdev, skb, L2T_ENTRY(so));
+		l2t_send(cdev, m, L2T_ENTRY(so));
 	}
 	sk->sk_wmem_queued -= total_size;
 	return total_size;
@@ -662,7 +662,7 @@
 	t3_set_ca_ops(so, &tcp_init_congestion_ops);
 	TOE_DEV(so) = NULL;
 #if 0
-	log(KERN_INFO "closing TID %u, state %u\n", tid, sk->sk_state);
+	log(KERN_INFO "closing TID %u, state %u\n", tid, sototcpcb(so)->t_state);
 #endif
 }
 
@@ -753,7 +753,6 @@
 	if (ext & (1 << INET_DIAG_MAX)) {
 		struct rtattr *rta;
 		struct t3_inet_diag_info *info;
-		const struct tcpcb *tp = sototcpcb(so);
 
 		rta = __RTA_PUT(skb, INET_DIAG_MAX + 1, sizeof(*info));
 		info = RTA_DATA(rta);
@@ -770,19 +769,22 @@
 #define T3_CONG_OPS(s) \
 	{ .name = s, .owner = THIS_MODULE, .get_info = t3_idiag_get_info }
 
+#if 0
 static struct tcp_congestion_ops t3_cong_ops[] = {
 	T3_CONG_OPS("reno"),        T3_CONG_OPS("tahoe"),
 	T3_CONG_OPS("newreno"),     T3_CONG_OPS("highspeed")
 };
+#endif
 
 static void
-mk_act_open_req(struct socket *so, struct mbuf *skb,
+mk_act_open_req(struct socket *so, struct mbuf *m,
 			    unsigned int atid, const struct l2t_entry *e)
 {
 	struct cpl_act_open_req *req;
 
-	skb->priority = mkprio(CPL_PRIORITY_SETUP, sk);
-	req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
+	m->m_priority = mkprio(CPL_PRIORITY_SETUP, so);
+	MH_ALIGN(m, sizeof(*req));
+	req = mtod(m, struct cpl_act_open_req *);
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid));
 	req->local_port = sotoinpcb(so)->sport;
@@ -836,7 +838,6 @@
 {
 	struct mbuf *m;
 	struct socket *so = (struct socket *)data;
-	struct inet_connection_sock *icsk = inet_csk(sk);
 
 	SOCK_LOCK(so);
 	if (sock_owned_by_user(sk))         /* try in a bit */
@@ -867,7 +868,6 @@
 active_open_failed(struct socket *so, struct mbuf *m)
 {
 	struct cpl_act_open_rpl *rpl = cplhdr(m);
-	struct inet_connection_sock *icsk = inet_csk(sk);
 
 	if (rpl->status == CPL_ERR_CONN_EXIST &&
 	    icsk->icsk_retransmit_timer.function != act_open_retry_timer) {
@@ -903,7 +903,7 @@
 	if (cdev->type != T3A && act_open_has_tid(rpl->status))
 		cxgb_queue_tid_release(cdev, GET_TID(rpl));
 
-	process_cpl_msg_ref(active_open_failed, sk, m);
+	process_cpl_msg_ref(active_open_failed, so, m);
 	return 0;
 }
 
@@ -922,7 +922,7 @@
 
 	SOCK_LOCK(so);
 	soref(so);
-	if (tp->t_state == TCPS_SYN_SENT || sk->sk_state == TCP_SYN_RECV) {
+	if (tp->t_state == TCPS_SYN_SENT || tp->t_state == TCPS_SYN_RECV) {
 		if (!sock_owned_by_user(sk)) {
 			fail_act_open(sk, EHOSTUNREACH);
 			m_freem(m);
@@ -958,7 +958,7 @@
 static unsigned int
 select_rcv_wnd(struct socket *so)
 {
-	unsigned int wnd = tcp_full_space(sk);
+	unsigned int wnd = tcp_full_space(so);
 
 	/*
 	 * For receive coalescing to work effectively we need a receive window
@@ -970,19 +970,6 @@
 	return min(wnd, MAX_RCV_WND);
 }
 
-#if defined(TCP_CONGESTION_CONTROL)
-static void
-pivot_ca_ops(struct socket *so, int cong)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-
-	if (icsk->icsk_ca_ops->release)
-		icsk->icsk_ca_ops->release(sk);
-	module_put(icsk->icsk_ca_ops->owner);
-	icsk->icsk_ca_ops = &t3_cong_ops[cong < 0 ? 2 : cong];
-}
-#endif
-
 #define CTRL_SKB_LEN 120
 
 /*
@@ -996,17 +983,17 @@
 {
 	struct tcpcb *tp = sototcpcb(so);
 
-	TOE_DEV(sk) = dev;
+	TOE_DEV(so) = dev;
 	TID(so) = tid;
-	L2T_ENTRY(sk) = e;
+	L2T_ENTRY(so) = e;
 	WR_MAX(so) = WR_AVAIL(so) = TOM_TUNABLE(dev, max_wrs);
 	WR_UNACKED(so) = 0;
-	DELACK_MODE(sk) = 0;
+	DELACK_MODE(so) = 0;
 	MTU_IDX(so) = select_mss(sk, dst_mtu(dst));
-	tp->rcv_wnd = select_rcv_wnd(sk);
+	tp->rcv_wnd = select_rcv_wnd(so);
 	ULP_MODE(so) = TOM_TUNABLE(dev, ddp) && !sock_flag(sk, NO_DDP) &&
 		       tp->rcv_wnd >= MIN_DDP_RCV_WIN ? ULP_MODE_TCPDDP : 0;
-	QSET_IDX(sk) = 0;
+	QSET_IDX(so) = 0;
 #ifdef CTRL_SKB_CACHE
 	CTRL_SKB_CACHE(so) = alloc_skb(CTRL_SKB_LEN, gfp_any());
 #endif
@@ -1030,9 +1017,9 @@
 	struct l2t_entry *e;
 	struct tom_data *d = TOM_DATA(tdev);
 	struct tcpcb *tp = sototcpcb(so);
-	struct dst_entry *dst = __sk_dst_get(sk);
+	struct dst_entry *dst = __sk_dst_get(so);
 
-	int atid = cxgb_alloc_atid(d->cdev, d->client, sk);
+	int atid = cxgb_alloc_atid(d->cdev, d->client, so);
 	if (atid < 0)
 		goto out_err;
 
@@ -1049,7 +1036,7 @@
 	install_offload_ops(so);
 
 	init_offload_sk(sk, tdev, atid, e, dst);
-	tp->rcv_scale) = select_rcv_wscale(tcp_full_space(sk),
+	tp->rcv_scale) = select_rcv_wscale(tcp_full_space(so),
 					   sysctl_tcp_window_scaling,
 					   tp->window_clamp);
 	sock_reset_flag(sk, SOCK_DONE);
@@ -1106,9 +1093,9 @@
 #endif	
 
 	/* Purge the send queue so we don't send anything after an abort. */
-	t3_purge_write_queue(sk);
+	t3_purge_write_queue(so);
 
-	if (sock_flag(sk, CLOSE_CON_REQUESTED) && is_t3a(TOE_DEV(sk)))
+	if (sock_flag(sk, CLOSE_CON_REQUESTED) && is_t3a(TOE_DEV(so)))
 		mode |= CPL_ABORT_POST_CLOSE_REQ;
 
 	if (!m)
@@ -1150,11 +1137,12 @@
 
 	t3_send_reset(child, CPL_ABORT_SEND_RST, m);
 	sock_orphan(child);
+#ifdef notyet	
 	INC_ORPHAN_COUNT(child);
-	if (child->sk_state == TCP_CLOSE)
+	if (sototcpcb(child)->t_state == TCP_CLOSE)
 		inet_csk_destroy_sock(child);
-	sorele_lock(child);
-
+#endif	
+	sorele(child);
 	SOCK_UNLOCK(child);
 }
 
@@ -1231,7 +1219,7 @@
 	req->mask = cpu_to_be64(mask);
 	req->val = cpu_to_be64(val);
 
-	m->m_priority = mkprio(CPL_PRIORITY_CONTROL, sk);
+	m->m_priority = mkprio(CPL_PRIORITY_CONTROL, so);
 	send_or_defer(so, tp, m, 0);
 }
 
@@ -1377,7 +1365,7 @@
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_GET_TCB, TID(so)));
 	req->cpuno = htons(qset(sototcpcb(so)));
-	if (sk->sk_state == TCP_SYN_SENT)
+	if (sototcpcb(so)->t_state == TCPS_SYN_SENT)
 		__skb_queue_tail(&tp->out_of_order_queue, m);	// defer
 	else
 		cxgb_ofld_send(TOE_DEV(so), m);
@@ -1411,7 +1399,7 @@
 				TID(sototcpcb(so))));
 	req->credit_dack = htonl(dack | V_RX_CREDITS(credits) |
 				 V_RX_FORCE_ACK(nofail));
-	skb->priority = mkprio(CPL_PRIORITY_ACK, sk);
+	m->m_priority = mkprio(CPL_PRIORITY_ACK, so);
 	cxgb_ofld_send(TOE_DEV(so), m);
 	return credits;
 }
@@ -1448,20 +1436,21 @@
 handle_urg_ptr(struct socket *so, uint32_t urg_seq)
 {
 	struct tcpcb *tp = sototcpcb(so);
-
+	struct tcptw *tw = intotw(sotoinpcb(so));
+	
 	urg_seq--;   /* initially points past the urgent data, per BSD */
 #ifdef notyet
 	if (tp->urg_data && !after(urg_seq, tp->urg_seq))
 		return;                                 /* duplicate pointer */
 
-	sk_send_sigurg(sk);
+	sk_send_sigurg(so);
 	if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
 	    !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
 		struct mbuf *m = mbufq_peek(&sk->sk_receive_queue); /* XXX ??? */
 
 		tp->copied_seq++;		
 		if (m && tp->copied_seq - TCP_SKB_CB(skb)->seq >= skb->len)
-			tom_eat_skb(sk, skb, 0);
+			tom_eat_skb(sk, m, 0);
 	}
 	tp->urg_data = TCP_URG_NOTYET;
 	tp->urg_seq = urg_seq;
@@ -1485,7 +1474,7 @@
 {
 	struct cpl_rx_urg_notify *hdr = cplhdr(m);
 
-	if (!sk_no_receive(sk))
+	if (!sk_no_receive(so))
 		handle_urg_ptr(so, ntohl(hdr->seq));
 
 	m_freem(m);
@@ -1677,7 +1666,7 @@
 		if (!m->m_len) {
 			if (!ddp_offset)
 				q->kbuf_posted--;
-			BUG_ON(m->m_len);
+			PANIC_IF(m->m_len);
 			m_freem(m);
 			return;
 		}
@@ -1694,17 +1683,19 @@
 	TCP_SKB_CB(skb)->seq = tp->rcv_nxt;
 #endif	
 	tp->rcv_nxt += skb->len;
-
-	inet_csk(sk)->icsk_ack.lrcvtime = tcp_time_stamp;
+	/*
+	 * XXX ?
+	 */
+	sototcpcb(so)->ts_recent = tcp_time_stamp;
 	skb->h.th = tcphdr_skb->h.th;
 
 #ifdef T3_TRACE
 	T3_TRACE3(TB(q),
 		  "tcb_rpl_as_ddp_complete: seq 0x%x hwbuf %u lskb->len %u",
-		  TCP_SKB_CB(skb)->seq, q->cur_buf, skb->len);
+		  TCP_SKB_CB(skb)->seq, q->cur_buf, m->m_len);
 #endif
-
-	__skb_queue_tail(&sk->sk_receive_queue, skb);
+	
+	sbappend(&so->so_rcv, m);
 
 	if (!sock_flag(so, SOCK_DEAD))
 		sk->sk_data_ready(sk, 0);
@@ -1761,8 +1752,9 @@
 	bsp->cur_offset += skb->len;
 	if (!(bsp->flags & DDP_BF_NOFLIP))
 		q->cur_buf ^= 1;
-	inet_csk(sk)->icsk_ack.lrcvtime = tcp_time_stamp;
-	__skb_queue_tail(&sk->sk_receive_queue, skb);
+	
+	sototcpcb(so)->ts_recent = tcp_time_stamp;
+	sbappend(so->so_rcv, m);
 
 	/* For now, don't re-enable DDP after a connection fell out of  DDP
 	 * mode.
@@ -1779,12 +1771,12 @@
 	struct tcpcb *tp = sototcpcb(so);
 
 	if (__predict_false(sk_no_receive(so))) {
-		handle_excess_rx(sk, skb);
+		handle_excess_rx(so, m);
 		return;
 	}
 
 	if (ULP_MODE(so) == ULP_MODE_TCPDDP)
-		handle_ddp_data(sk, skb);
+		handle_ddp_data(so, m);
 
 	TCP_SKB_CB(skb)->seq = ntohl(hdr->seq);
 	TCP_SKB_CB(skb)->flags = 0;
@@ -1821,11 +1813,10 @@
 #ifdef T3_TRACE
 	T3_TRACE2(TIDTB(so),
 		  "new_rx_data: seq 0x%x len %u",
-		  TCP_SKB_CB(skb)->seq, skb->len);
+		  TCP_SKB_CB(skb)->seq, m->m_len);
 #endif
-
-	inet_csk(so)->icsk_ack.lrcvtime = tcp_time_stamp;
-	__skb_queue_tail(&sk->sk_receive_queue, skb);
+	sototcpcb(so)->ts_recent = tcp_time_stamp;
+	sbappend(so->so_rcv, m);
 	if (!sock_flag(sk, SOCK_DEAD))
 		sk->sk_data_ready(sk, 0);
 }
@@ -1843,7 +1834,7 @@
 	skb->h.th = tcphdr_skb->h.th;
 	skb->mac.raw = NULL;           /* indicates packet is RX_DATA */
 
-	process_cpl_msg(new_rx_data, sk, skb);
+	process_cpl_msg(new_rx_data, so, m);
 	return 0;
 }
 
@@ -1857,7 +1848,7 @@
 	unsigned int ddp_len, rcv_nxt, ddp_report, end_offset, buf_idx;
 
 	if (__predict_false(sk_no_receive(so))) {
-		handle_excess_rx(sk, skb);
+		handle_excess_rx(so, m);
 		return;
 	}
 
@@ -1913,7 +1904,7 @@
 	 */
 	if ((bsp->flags & DDP_BF_NOINVAL) && end_offset != bsp->gl->length) {
 		TCP_SKB_CB(skb)->flags = 0;  /* potential spurious completion */
-		BUG_ON(1);
+		PANIC_IF(1);
 	} else {
 		TCP_SKB_CB(skb)->flags = !!(ddp_report & F_DDP_BUF_COMPLETE);
 		if (TCP_SKB_CB(skb)->flags && !(bsp->flags & DDP_BF_NOFLIP))
@@ -1928,8 +1919,8 @@
 	if (ddp_report & F_DDP_PSH)
 		TCP_SKB_CB(skb)->flags |= DDP_BF_PSH;
 
-	inet_csk(so)->icsk_ack.lrcvtime = tcp_time_stamp;
-	__skb_queue_tail(&sk->sk_receive_queue, skb);
+	sototcpcb(so)->ts_recent = tcp_time_stamp;
+	sbappend(&so->so_rcv, m);
 	if (!sock_flag(sk, SOCK_DEAD))
 		sk->sk_data_ready(sk, 0);
 }
@@ -1957,7 +1948,7 @@
 	}
 
 	skb->h.th = tcphdr_skb->h.th;
-	process_cpl_msg(new_rx_data_ddp, sk, skb);
+	process_cpl_msg(new_rx_data_ddp, so, m);
 	return 0;
 }
 
@@ -2013,8 +2004,9 @@
 	TCP_SKB_CB(skb)->seq = tp->rcv_nxt;
 	tp->rcv_nxt += skb->len;
 
-	inet_csk(so)->icsk_ack.lrcvtime = tcp_time_stamp;
-	__skb_queue_tail(&sk->sk_receive_queue, skb);
+	sototcpcb(so)->ts_recent = tcp_time_stamp;
+	sbappend(so->so_rcv, m);
+
 	if (!sock_flag(sk, SOCK_DEAD))
 		sk->sk_data_ready(sk, 0);
 }
@@ -2030,7 +2022,7 @@
 	VALIDATE_SOCK(so);
 
 	skb->h.th = tcphdr_skb->h.th;
-	process_cpl_msg(process_ddp_complete, sk, skb);
+	process_cpl_msg(process_ddp_complete, so, m);
 	return 0;
 }
 
@@ -2054,7 +2046,7 @@
 
 	TS_RECENT_STAMP(tp) = 0;	     /* defeat recycling */
 	tp->srtt = 0;                        /* defeat tcp_update_metrics */
-	tcp_time_wait(sk, TCP_TIME_WAIT, 0); /* calls tcp_done */
+	tcp_time_wait(so, TCP_TIME_WAIT, 0); /* calls tcp_done */
 }
 
 /*
@@ -2078,7 +2070,7 @@
 		return 0;
 
 	if (__predict_false(sk_no_receive(so))) {
-		handle_excess_rx(sk, skb);
+		handle_excess_rx(so, m);
 
 		/*
 		 * Although we discard the data we want to process the FIN so
@@ -2102,10 +2094,10 @@
 	bsp->cur_offset += skb->len;
 	if (!(bsp->flags & DDP_BF_NOFLIP))
 		q->cur_buf ^= 1;
-	inet_csk(so)->icsk_ack.lrcvtime = tcp_time_stamp;
-	__skb_queue_tail(&sk->sk_receive_queue, skb);
-	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, 0);
+	sototcpcb(so)->ts_recent = tcp_time_stamp;
+	sbappend(&so->so_rcv, m);
+	if (!sock_flag(so, SOCK_DEAD))
+		sk->sk_data_ready(so, 0);
 	return 1;
 }
 
@@ -2116,32 +2108,32 @@
 do_peer_fin(struct socket *so, struct mbuf *skb)
 {
 	struct tcpcb *tp = sototcpcb(so);
-	int keep = 0, dead = sock_flag(sk, SOCK_DEAD);
+	int keep = 0, dead = sock_flag(so, SOCK_DEAD);
 
 #ifdef T3_TRACE
 	T3_TRACE0(TIDTB(so),"do_peer_fin:");
 #endif
 
-	if (!is_t3a(TOE_DEV(so)) && sock_flag(sk, ABORT_RPL_PENDING))
+	if (!is_t3a(TOE_DEV(so)) && sock_flag(so, ABORT_RPL_PENDING))
 		goto out;
 
 	if (ULP_MODE(tp) == ULP_MODE_TCPDDP) {
-		keep = handle_peer_close_data(sk, skb);
+		keep = handle_peer_close_data(so, m);
 		if (keep < 0)
 			return;
 	}
 
 	sk->sk_shutdown |= RCV_SHUTDOWN;
-	sock_set_flag(sk, SOCK_DONE);
-	switch (sk->sk_state) {
-	case TCP_SYN_RECV:
-	case TCP_ESTABLISHED:
+	sock_set_flag(so, SOCK_DONE);
+	switch (sototcpcb(so)->t_state) {
+	case TCPS_SYN_RECEIVED:
+	case TCPS_ESTABLISHED:
 		tcp_set_state(sk, TCP_CLOSE_WAIT);
 		break;
-	case TCP_FIN_WAIT1:
+	case TCPS_FIN_WAIT1:
 		tcp_set_state(sk, TCP_CLOSING);
 		break;
-	case TCP_FIN_WAIT2:
+	case TCPS_FIN_WAIT2:
 		/*
 		 * If we've sent an abort_req we must have sent it too late,
 		 * HW will send us a reply telling us so, and this peer_close
@@ -2160,7 +2152,7 @@
 	default:
 		log(LOG_ERR,
 		       "%s: TID %u received PEER_CLOSE in bad state %d\n",
-		       TOE_DEV(so)->name, TID(so), sk->sk_state);
+		    TOE_DEV(so)->name, TID(so), sototcpcb(so)->t_state);
 	}
 
 	if (!dead) {
@@ -2168,7 +2160,7 @@
 
 		/* Do not send POLL_HUP for half duplex close. */
 		if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
-		    sk->sk_state == TCP_CLOSE)
+		    sototcpcb(so)->t_state == TCP_CLOSE)
 			sk_wake_async(sk, 1, POLL_HUP);
 		else
 			sk_wake_async(sk, 1, POLL_IN);
@@ -2187,7 +2179,7 @@
 
 	VALIDATE_SOCK(so);
 
-	process_cpl_msg_ref(do_peer_fin, sk, skb);
+	process_cpl_msg_ref(do_peer_fin, so, m);
 	return 0;
 }
 
@@ -2200,15 +2192,15 @@
 	if (!is_t3a(TOE_DEV(so)) && sock_flag(sk, ABORT_RPL_PENDING))
 		goto out;
 
-	switch (sk->sk_state) {
-	case TCP_CLOSING:              /* see FIN_WAIT2 case in do_peer_fin */
+	switch (sototcpcb(so)->t_state) {
+	case TCPS_CLOSING:              /* see FIN_WAIT2 case in do_peer_fin */
 		t3_release_offload_resources(so);
 		if (sock_flag(sk, ABORT_RPL_PENDING))
 			connection_done(so);
 		else
 			enter_timewait(so);
 		break;
-	case TCP_LAST_ACK:
+	case TCPS_LAST_ACK:
 		/*
 		 * In this state we don't care about pending abort_rpl.
 		 * If we've sent abort_req it was post-close and was sent too
@@ -2217,7 +2209,7 @@
 		t3_release_offload_resources(so);
 		connection_done(so);
 		break;
-	case TCP_FIN_WAIT1:
+	case TCPS_FIN_WAIT1:
 		tcp_set_state(sk, TCP_FIN_WAIT2);
 		sk->sk_shutdown |= SEND_SHUTDOWN;
 		dst_confirm(sk->sk_dst_cache);
@@ -2226,7 +2218,7 @@
 			sk->sk_state_change(so); // Wake up lingering close()
 		else if (sototcpcb(so)->linger2 < 0 &&
 			 !sock_flag(sk, ABORT_SHUTDOWN))
-			abort_conn(sk, skb, LINUX_MIB_TCPABORTONLINGER);
+			abort_conn(so, m, LINUX_MIB_TCPABORTONLINGER);
 		break;
 	default:
 		log(LOG_ERR,
@@ -2245,7 +2237,7 @@
 {
 	struct socket *so = (struct socket *)ctx;
 
-	VALIDATE_SOCK(sk);
+	VALIDATE_SOCK(so);
 
 	process_cpl_msg_ref(process_close_con_rpl, so, m);
 	return 0;
@@ -2261,22 +2253,22 @@
 process_abort_rpl(struct socket *so, struct mbuf *m)
 {
 #ifdef T3_TRACE
-	T3_TRACE1(TIDTB(sk),
+	T3_TRACE1(TIDTB(so),
 		  "process_abort_rpl: GTS rpl pending %d",
 		  sock_flag(sk, ABORT_RPL_PENDING));
 #endif
 
 	if (sock_flag(sk, ABORT_RPL_PENDING)) {
-		if (!sock_flag(sk, ABORT_RPL_RCVD) && !is_t3a(TOE_DEV(sk)))
+		if (!sock_flag(sk, ABORT_RPL_RCVD) && !is_t3a(TOE_DEV(so)))
 			sock_set_flag(sk, ABORT_RPL_RCVD);
 		else {
 			sock_reset_flag(sk, ABORT_RPL_RCVD);
 			sock_reset_flag(sk, ABORT_RPL_PENDING);
 			if (!sock_flag(sk, ABORT_REQ_RCVD) ||
-			    !is_t3a(TOE_DEV(sk))) {
-				BUG_ON(sock_flag(sk, ABORT_REQ_RCVD));
-				t3_release_offload_resources(sk);
-				connection_done(sk);
+			    !is_t3a(TOE_DEV(so))) {
+				PANIC_IF(sock_flag(sk, ABORT_REQ_RCVD));
+				t3_release_offload_resources(so);
+				connection_done(so);
 			}
 		}
 	}
@@ -2304,7 +2296,7 @@
 		return 0;
 	}
 
-	sk = (struct socket *)ctx;
+	so = (struct socket *)ctx;
 
 	/*
 	 * Sometimes we've already closed the socket, e.g., a post-close
@@ -2313,10 +2305,10 @@
 	 * but FW turns the ABORT_REQ into a regular one and so we get
 	 * ABORT_RPL_RSS with status 0 and no socket.  Only on T3A.
 	 */
-	if (!sk)
+	if (!so)
 		goto discard;
 
-	process_cpl_msg_ref(process_abort_rpl, sk, skb);
+	process_cpl_msg_ref(process_abort_rpl, so, m);
 	return 0;
 }
 
@@ -2333,7 +2325,7 @@
 		NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);	// fall through
 	case CPL_ERR_CONN_RESET:
 		// XXX need to handle SYN_RECV due to crossed SYNs
-		return sk->sk_state == TCP_CLOSE_WAIT ? EPIPE : ECONNRESET;
+		return sototcpcb(so)->t_state == TCP_CLOSE_WAIT ? EPIPE : ECONNRESET;
 	case CPL_ERR_XMIT_TIMEDOUT:
 	case CPL_ERR_PERSIST_TIMEDOUT:
 	case CPL_ERR_FINWAIT2_TIMEDOUT:
@@ -2363,7 +2355,7 @@
 	struct cpl_abort_req_rss *req = cplhdr(m);
 
 	reply_skb = alloc_skb_nofail(sizeof(struct cpl_abort_rpl));
-	reply_skb->priority = CPL_PRIORITY_DATA;
+	reply_skb->m_priority = CPL_PRIORITY_DATA;
 	__skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
 	set_abort_rpl_wr(reply_skb, GET_TID(req), req->status);
 	cxgb_ofld_send(TOM_DATA(tdev)->cdev, reply_skb);
@@ -2395,7 +2387,7 @@
 		return;
 	}
 
-	reply_skb->priority = CPL_PRIORITY_DATA;
+	reply_skb->m_priority = CPL_PRIORITY_DATA;
 	set_abort_rpl_wr(reply_skb, GET_TID(req), rst_status);
 	m_freem(m);
 	/*
@@ -2428,7 +2420,7 @@
 	 * otherwise the server already did the clean up as it was purging
 	 * its SYN queue and the skb was just sitting in its backlog.
 	 */
-	if (__predict_true(parent->sk_state == TCP_LISTEN)) {
+	if (__predict_true(sototcpcb(parent)->t_state == TCP_LISTEN)) {
 		cleanup_syn_rcv_conn(child, parent);
 		t3_release_offload_resources(child);
 		connection_done(child);
@@ -2442,9 +2434,9 @@
 static void
 bl_abort_syn_rcv(struct socket *lso, struct mbuf *m)
 {
-	struct socket *child = skb->sk;
+	struct socket *child = m->m_pkthdr.priv;
 
-	skb->sk = NULL;
+	m->m_pkthdr.priv = NULL;
 	do_abort_syn_rcv(child, lso);
 	send_abort_rpl(skb, BLOG_SKB_CB(skb)->dev, CPL_ABORT_NO_RST);
 }
@@ -2457,7 +2449,7 @@
 abort_syn_rcv(struct socket *so, struct mbuf *m)
 {
 	struct socket *parent;
-	struct toedev *tdev = TOE_DEV(sk);
+	struct toedev *tdev = TOE_DEV(so);
 	struct toedev *cdev = TOM_DATA(tdev)->cdev;
 	const struct request_sock *oreq = sk->sk_user_data;
 	struct toe_tid_entry *toe_stid;
@@ -2475,7 +2467,7 @@
 		do_abort_syn_rcv(sk, parent);
 		send_abort_rpl(skb, tdev, CPL_ABORT_NO_RST);
 	} else {
-		skb->sk = sk;
+		m->m_pkthdr.priv = so;
 		SET_BLOG_CPL_HANDLER(skb, bl_abort_syn_rcv);
 		sk_add_backlog(parent, skb);
 	}
@@ -2511,20 +2503,20 @@
 	 *    That will generate an abort_rpl with status 0, wait for it.
 	 */
 	if (!sock_flag(sk, ABORT_RPL_PENDING) ||
-	    (is_t3a(TOE_DEV(sk)) && sock_flag(sk, CLOSE_CON_REQUESTED))) {
+	    (is_t3a(TOE_DEV(so)) && sock_flag(so, CLOSE_CON_REQUESTED))) {
 		sk->sk_err = abort_status_to_errno(sk, req->status,
 						   &rst_status);
 		if (!sock_flag(sk, SOCK_DEAD))
-			sk->sk_error_report(sk);
+			sk->sk_error_report(so);
 		/*
 		 * SYN_RECV needs special processing.  If abort_syn_rcv()
 		 * returns 0 is has taken care of the abort.
 		 */
-		if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
+		if (sototcpcb(so)->t_state == TCPS_SYN_RECV && !abort_syn_rcv(so, m))
 			return;
 
-		t3_release_offload_resources(sk);
-		connection_done(sk);
+		t3_release_offload_resources(so);
+		connection_done(so);
 	}
 
 	send_abort_rpl(skb, BLOG_SKB_CB(skb)->dev, rst_status);
@@ -2544,13 +2536,13 @@
 		return 0;
 	}
 
-	VALIDATE_SOCK(sk);
+	VALIDATE_SOCK(so);
 
 	/*
 	 * Save the offload device in the skb, we may process this message
 	 * after the socket has closed.
 	 */
-	BLOG_SKB_CB(skb)->dev = TOE_DEV(sk);
+	BLOG_SKB_CB(skb)->dev = TOE_DEV(so);
 
 	process_cpl_msg_ref(process_abort_req, sk, skb);
 	return 0;
@@ -2601,7 +2593,7 @@
 	}
 
 	oreq = sk->sk_user_data;
-	cdev = TOE_DEV(sk);
+	cdev = TOE_DEV(so);
 	t = &(TOE_DATA(cdev))->tid_maps;
 	toe_stid = lookup_stid(t, oreq->ts_recent);
 	parent = ((struct listen_ctx *)toe_stid->ctx)->lso;
@@ -2638,7 +2630,7 @@
 			    .nl_u = { .ip4_u =
 				      { .daddr = req->af.v4_req.rmt_addr,
 					.saddr = req->af.v4_req.loc_addr,
-					.tos = RT_CONN_FLAGS(sk)}},
+					.tos = RT_CONN_FLAGS(so)}},
 			    .proto = IPPROTO_TCP,
 			    .uli_u = { .ports =
 				       { .sport = sotoinpcb(so)->sport,
@@ -2740,7 +2732,7 @@
 	struct cpl_pass_accept_rpl *rpl = cplhdr(reply_skb);
 	unsigned int tid = GET_TID(req);
 
-	reply_skb->priority = CPL_PRIORITY_SETUP;
+	reply_mbuf->m_priority = CPL_PRIORITY_SETUP;
 	rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, tid));
 	rpl->peer_ip = req->peer_ip;   // req->peer_ip not overwritten yet
@@ -2796,11 +2788,11 @@
 		goto out;
 	}
 
-	if (sk->sk_state != TCP_LISTEN)
+	if (sototcpcb(so) != TCP_LISTEN)
 		goto reject;
-	if (inet_csk_reqsk_queue_is_full(sk))
+	if (inet_csk_reqsk_queue_is_full(so))
 		goto reject;
-	if (sk_acceptq_is_full(sk) && d->conf.soft_backlog_limit)
+	if (sk_acceptq_is_full(so) && d->conf.soft_backlog_limit)
 		goto reject;
 
 	tim.mac_addr = req->dst_mac;
@@ -2834,8 +2826,8 @@
 			ULP_MODE(sototcpcb(newso)) = 0;
 	}
 
-	reply_skb->sk = newso;
-	set_arp_failure_handler(reply_skb, pass_accept_rpl_arp_failure);
+	reply_mbuf->m_pkthdr.priv = newso;
+	set_arp_failure_handler(reply_mbuf, pass_accept_rpl_arp_failure);
 
 	e = L2T_ENTRY(newso);
 
@@ -2850,7 +2842,7 @@
 	rpl->opt2 = htonl(calc_opt2(newso));
 
 	rpl->rsvd = rpl->opt2;                /* workaround for HW bug */
-	reply_skb->priority = mkprio(CPL_PRIORITY_SETUP, newso);
+	reply_mbuf->m_priority = mkprio(CPL_PRIORITY_SETUP, newso);
 	l2t_send(cdev, reply_skb, e);
 	m_freem(m);
 	if (ULP_MODE(sotcpcb(newso)))
@@ -2936,7 +2928,7 @@
 	 * If the server is closed it has already killed its embryonic
 	 * children.  There is nothing further to do about child.
 	 */
-	if (lsk->sk_state != TCP_LISTEN)
+	if (sototcpcb(lso)->t_state != TCP_LISTEN)
 		return;
 
 	oreq = child->sk_user_data;
@@ -2966,10 +2958,10 @@
 static void
 bl_add_pass_open_to_parent(struct socket *lso, struct mbuf *m)
 {
-	struct socket *child = skb->sk;
+	struct socket *child = m->m_pkthdr.priv;
 
-	skb->sk = NULL;
-	add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->dev);
+	m->m_pkthdr.priv = NULL;
+	add_pass_open_to_parent(child, lso, BLOG_SKB_CB(skb)->dev);
 	m_freem(m);
 }
 
@@ -2980,7 +2972,7 @@
 static void
 assign_rxopt(struct socket *so, unsigned int opt)
 {
-	const struct toe_data *td = TOE_DATA(TOE_DEV(sk));
+	const struct toe_data *td = TOE_DATA(TOE_DEV(so));
 	struct tcpcb *tp = sototcpcb(so);
 
 	MSS_CLAMP(tp)	      = td->mtus[G_TCPOPT_MSS(opt)] - 40;
@@ -3039,12 +3031,12 @@
 {
 	struct cpl_pass_establish *req = cplhdr(m);
 	struct socket *lso, *so = (struct socket *)ctx;
-	struct toedev *tdev = TOE_DEV(sk);
+	struct toedev *tdev = TOE_DEV(so);
 
-	VALIDATE_SOCK(sk);
+	VALIDATE_SOCK(so);
 
 	SOCK_LOCK(so);
-	if (__predict_false(sock_owned_by_user(sk))) {
+	if (__predict_false(sock_owned_by_user(so))) {
 		// This can only happen in simultaneous opens.  XXX TBD
 		m_freem(m);
 	} else {
@@ -3060,7 +3052,7 @@
 		make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
 
 		if (__predict_false(sk->sk_socket)) {   // simultaneous opens only
-			sk->sk_state_change(sk);
+			sk->sk_state_change(so);
 			sk_wake_async(sk, 0, POLL_OUT);
 		}
 
@@ -3092,7 +3084,7 @@
 			m_freem(m);
 			add_pass_open_to_parent(sk, lso, tdev);
 		} else {
-			skb->sk = sk;
+			m->m_pkthdr.priv = so;
 			BLOG_SKB_CB(skb)->dev = tdev;
 			SET_BLOG_CPL_HANDLER(skb, bl_add_pass_open_to_parent);
 			sk_add_backlog(lso, skb);
@@ -3112,7 +3104,7 @@
 fixup_and_send_ofo(struct socket *so)
 {
 	struct mbuf *skb;
-	struct toedev *tdev = TOE_DEV(sk);
+	struct toedev *tdev = TOE_DEV(so);
 	struct tcpcb *tp = sototcpcb(so);
 	unsigned int tid = TID(so);
 
@@ -3179,13 +3171,12 @@
 	 * defer for lack of a TID.
 	 */
 	if (skb_queue_len(&tp->out_of_order_queue))
-		fixup_and_send_ofo(sk);
+		fixup_and_send_ofo(so);
 
-	if (__predict_true(!sock_flag(sk, SOCK_DEAD))) {
-		sk->sk_state_change(sk);
+	if (__predict_true(!sock_flag(so, SOCK_DEAD))) {
+		sk->sk_state_change(so);
 		sk_wake_async(sk, 0, POLL_OUT);
 	}
-
 	m_freem(m);
 
 	/*
@@ -3197,9 +3188,9 @@
 	 * buffers according to the just learned write_seq, and then we send
 	 * them on their way.
 	 */
-	fixup_pending_writeq_buffers(sk);
-	if (t3_push_frames(sk, 1))
-		sk->sk_write_space(sk);
+	fixup_pending_writeq_buffers(so);
+	if (t3_push_frames(so, 1))
+		sk->sk_write_space(so);
 }
 
 /*
@@ -3212,7 +3203,7 @@
 	unsigned int tid = GET_TID(req);
 	unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
 	struct socket *so = (struct socket *)ctx;
-	struct toedev *tdev = TOE_DEV(sk);
+	struct toedev *tdev = TOE_DEV(so);
 	struct tom_data *d = TOM_DATA(tdev);
 
 	/*
@@ -3251,7 +3242,7 @@
 		if (__predict_false(!p)) {
 			printf(LOG_ERR, "%u WR_ACK credits for TID %u with "
 			       "nothing pending, state %u\n",
-			       credits, TID(so), sk->sk_state);
+			    credits, TID(so), sototcpcb(so)->t_state);
 			break;
 		}
 		if (__predict_false(credits < p->csum)) {
@@ -3282,7 +3273,7 @@
 
 	if (__predict_false(before(snd_una, tp->snd_una))) {
 #if VALIDATE_SEQ
-		struct tom_data *d = TOM_DATA(TOE_DEV(sk));
+		struct tom_data *d = TOM_DATA(TOE_DEV(so));
 
 		log(LOG_ERR, "%s: unexpected sequence # %u in WR_ACK "
 		       "for TID %u, snd_una %u\n", (&d->tdev)->name, snd_una,
@@ -3293,10 +3284,10 @@
 
 	if (tp->snd_una != snd_una) {
 		tp->snd_una = snd_una;
-		dst_confirm(sk->sk_dst_cache);
+		dst_confirm(so->sk_dst_cache);
 		tp->rcv_tstamp = tcp_time_stamp;
 		if (tp->snd_una == tp->snd_nxt)
-			sock_reset_flag(sk, TX_WAIT_IDLE);
+			sock_reset_flag(so, TX_WAIT_IDLE);
 	}
 
 	if (skb_queue_len(&sk->sk_write_queue) && t3_push_frames(sk, 0))
@@ -3394,10 +3385,12 @@
 	unsigned int ppod_addr = tag * PPOD_SIZE + td->ddp_llimit;
 
 	for (i = 0; i < nppods; ++i) {
-		skb = alloc_ctrl_skb(tp, sizeof(*req) + PPOD_SIZE);
-		skb->priority = mkprio(CPL_PRIORITY_CONTROL, sk);
-		req = (struct ulp_mem_io *)__skb_put(skb,
-						     sizeof(*req) + PPOD_SIZE);
+		m = alloc_ctrl_skb(tp, sizeof(*req) + PPOD_SIZE);
+
+		MH_ALIGN(m, sizeof(*req) + PPOD_SIZE);
+		req = mtod(m, struct ulp_mem_io *);
+		m->m_priority = mkprio(CPL_PRIORITY_CONTROL, sk);
+						     
 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
 		req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(ppod_addr >> 5) |
 					   V_ULPTX_CMD(ULP_MEM_WRITE));
@@ -3500,8 +3493,8 @@
 
 	wrlen = sizeof(*wr) + sizeof(*req) + 2 * sizeof(*lock) +
 		sizeof(*getreq);
-	skb = alloc_ctrl_skb(tp, wrlen);
-	skb->priority = mkprio(CPL_PRIORITY_CONTROL, sk);
+	m = alloc_ctrl_skb(tp, wrlen);
+	m->m_priority = mkprio(CPL_PRIORITY_CONTROL, so);
 
 	wr = (struct work_request_hdr *)__skb_put(skb, wrlen);
 	wr->wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
@@ -3536,10 +3529,10 @@
 	p->get_tcb_count++;
 
 #ifdef T3_TRACE
-	T3_TRACE1(TIDTB(sk),

>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200705070635.l476ZkOX029690>