2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
52 static char *states
[] = {
69 module_param(nocong
, int, 0644);
70 MODULE_PARM_DESC(nocong
, "Turn of congestion control (default=0)");
72 static int enable_ecn
;
73 module_param(enable_ecn
, int, 0644);
74 MODULE_PARM_DESC(enable_ecn
, "Enable ECN (default=0/disabled)");
76 static int dack_mode
= 1;
77 module_param(dack_mode
, int, 0644);
78 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=1)");
80 int c4iw_max_read_depth
= 8;
81 module_param(c4iw_max_read_depth
, int, 0644);
82 MODULE_PARM_DESC(c4iw_max_read_depth
, "Per-connection max ORD/IRD (default=8)");
84 static int enable_tcp_timestamps
;
85 module_param(enable_tcp_timestamps
, int, 0644);
86 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
88 static int enable_tcp_sack
;
89 module_param(enable_tcp_sack
, int, 0644);
90 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
92 static int enable_tcp_window_scaling
= 1;
93 module_param(enable_tcp_window_scaling
, int, 0644);
94 MODULE_PARM_DESC(enable_tcp_window_scaling
,
95 "Enable tcp window scaling (default=1)");
98 module_param(c4iw_debug
, int, 0644);
99 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
101 static int peer2peer
= 1;
102 module_param(peer2peer
, int, 0644);
103 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=1)");
105 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
106 module_param(p2p_type
, int, 0644);
107 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
108 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
110 static int ep_timeout_secs
= 60;
111 module_param(ep_timeout_secs
, int, 0644);
112 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
113 "in seconds (default=60)");
115 static int mpa_rev
= 1;
116 module_param(mpa_rev
, int, 0644);
117 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
118 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
119 " compliant (default=1)");
121 static int markers_enabled
;
122 module_param(markers_enabled
, int, 0644);
123 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
125 static int crc_enabled
= 1;
126 module_param(crc_enabled
, int, 0644);
127 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
129 static int rcv_win
= 256 * 1024;
130 module_param(rcv_win
, int, 0644);
131 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
133 static int snd_win
= 128 * 1024;
134 module_param(snd_win
, int, 0644);
135 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=128KB)");
137 static struct workqueue_struct
*workq
;
139 static struct sk_buff_head rxq
;
141 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
142 static void ep_timeout(unsigned long arg
);
143 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
145 static LIST_HEAD(timeout_list
);
146 static spinlock_t timeout_lock
;
148 static void deref_qp(struct c4iw_ep
*ep
)
150 c4iw_qp_rem_ref(&ep
->com
.qp
->ibqp
);
151 clear_bit(QP_REFERENCED
, &ep
->com
.flags
);
154 static void ref_qp(struct c4iw_ep
*ep
)
156 set_bit(QP_REFERENCED
, &ep
->com
.flags
);
157 c4iw_qp_add_ref(&ep
->com
.qp
->ibqp
);
160 static void start_ep_timer(struct c4iw_ep
*ep
)
162 PDBG("%s ep %p\n", __func__
, ep
);
163 if (timer_pending(&ep
->timer
)) {
164 pr_err("%s timer already started! ep %p\n",
168 clear_bit(TIMEOUT
, &ep
->com
.flags
);
169 c4iw_get_ep(&ep
->com
);
170 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
171 ep
->timer
.data
= (unsigned long)ep
;
172 ep
->timer
.function
= ep_timeout
;
173 add_timer(&ep
->timer
);
176 static int stop_ep_timer(struct c4iw_ep
*ep
)
178 PDBG("%s ep %p stopping\n", __func__
, ep
);
179 del_timer_sync(&ep
->timer
);
180 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
181 c4iw_put_ep(&ep
->com
);
187 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
188 struct l2t_entry
*l2e
)
192 if (c4iw_fatal_error(rdev
)) {
194 PDBG("%s - device in error state - dropping\n", __func__
);
197 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
200 return error
< 0 ? error
: 0;
203 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
207 if (c4iw_fatal_error(rdev
)) {
209 PDBG("%s - device in error state - dropping\n", __func__
);
212 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
215 return error
< 0 ? error
: 0;
218 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
220 struct cpl_tid_release
*req
;
222 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
225 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
226 INIT_TP_WR(req
, hwtid
);
227 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
228 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
229 c4iw_ofld_send(rdev
, skb
);
233 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
235 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[GET_TCPOPT_MSS(opt
)] - 40;
237 if (GET_TCPOPT_TSTAMP(opt
))
241 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, GET_TCPOPT_MSS(opt
),
245 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
247 enum c4iw_ep_state state
;
249 mutex_lock(&epc
->mutex
);
251 mutex_unlock(&epc
->mutex
);
255 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
260 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
262 mutex_lock(&epc
->mutex
);
263 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
264 __state_set(epc
, new);
265 mutex_unlock(&epc
->mutex
);
269 static void *alloc_ep(int size
, gfp_t gfp
)
271 struct c4iw_ep_common
*epc
;
273 epc
= kzalloc(size
, gfp
);
275 kref_init(&epc
->kref
);
276 mutex_init(&epc
->mutex
);
277 c4iw_init_wr_wait(&epc
->wr_wait
);
279 PDBG("%s alloc ep %p\n", __func__
, epc
);
283 void _c4iw_free_ep(struct kref
*kref
)
287 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
288 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
289 if (test_bit(QP_REFERENCED
, &ep
->com
.flags
))
291 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
292 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
293 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
294 dst_release(ep
->dst
);
295 cxgb4_l2t_release(ep
->l2t
);
300 static void release_ep_resources(struct c4iw_ep
*ep
)
302 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
303 c4iw_put_ep(&ep
->com
);
306 static int status2errno(int status
)
311 case CPL_ERR_CONN_RESET
:
313 case CPL_ERR_ARP_MISS
:
314 return -EHOSTUNREACH
;
315 case CPL_ERR_CONN_TIMEDOUT
:
317 case CPL_ERR_TCAM_FULL
:
319 case CPL_ERR_CONN_EXIST
:
327 * Try and reuse skbs already allocated...
329 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
331 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
334 skb_reset_transport_header(skb
);
336 skb
= alloc_skb(len
, gfp
);
338 t4_set_arp_err_handler(skb
, NULL
, NULL
);
342 static struct net_device
*get_real_dev(struct net_device
*egress_dev
)
344 struct net_device
*phys_dev
= egress_dev
;
345 if (egress_dev
->priv_flags
& IFF_802_1Q_VLAN
)
346 phys_dev
= vlan_dev_real_dev(egress_dev
);
350 static int our_interface(struct c4iw_dev
*dev
, struct net_device
*egress_dev
)
354 egress_dev
= get_real_dev(egress_dev
);
355 for (i
= 0; i
< dev
->rdev
.lldi
.nports
; i
++)
356 if (dev
->rdev
.lldi
.ports
[i
] == egress_dev
)
361 static struct dst_entry
*find_route6(struct c4iw_dev
*dev
, __u8
*local_ip
,
362 __u8
*peer_ip
, __be16 local_port
,
363 __be16 peer_port
, u8 tos
,
366 struct dst_entry
*dst
= NULL
;
368 if (IS_ENABLED(CONFIG_IPV6
)) {
371 memset(&fl6
, 0, sizeof(fl6
));
372 memcpy(&fl6
.daddr
, peer_ip
, 16);
373 memcpy(&fl6
.saddr
, local_ip
, 16);
374 if (ipv6_addr_type(&fl6
.daddr
) & IPV6_ADDR_LINKLOCAL
)
375 fl6
.flowi6_oif
= sin6_scope_id
;
376 dst
= ip6_route_output(&init_net
, NULL
, &fl6
);
379 if (!our_interface(dev
, ip6_dst_idev(dst
)->dev
) &&
380 !(ip6_dst_idev(dst
)->dev
->flags
& IFF_LOOPBACK
)) {
390 static struct dst_entry
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
391 __be32 peer_ip
, __be16 local_port
,
392 __be16 peer_port
, u8 tos
)
398 rt
= ip_route_output_ports(&init_net
, &fl4
, NULL
, peer_ip
, local_ip
,
399 peer_port
, local_port
, IPPROTO_TCP
,
403 n
= dst_neigh_lookup(&rt
->dst
, &peer_ip
);
406 if (!our_interface(dev
, n
->dev
) &&
407 !(n
->dev
->flags
& IFF_LOOPBACK
)) {
408 dst_release(&rt
->dst
);
415 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
417 PDBG("%s c4iw_dev %p\n", __func__
, handle
);
422 * Handle an ARP failure for an active open.
424 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
426 printk(KERN_ERR MOD
"ARP failure duing connect\n");
431 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
434 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
436 struct c4iw_rdev
*rdev
= handle
;
437 struct cpl_abort_req
*req
= cplhdr(skb
);
439 PDBG("%s rdev %p\n", __func__
, rdev
);
440 req
->cmd
= CPL_ABORT_NO_RST
;
441 c4iw_ofld_send(rdev
, skb
);
444 static void send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
446 unsigned int flowclen
= 80;
447 struct fw_flowc_wr
*flowc
;
450 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
451 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
453 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP(FW_FLOWC_WR
) |
454 FW_FLOWC_WR_NPARAMS(8));
455 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen
,
456 16)) | FW_WR_FLOWID(ep
->hwtid
));
458 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
459 flowc
->mnemval
[0].val
= cpu_to_be32(PCI_FUNC(ep
->com
.dev
->rdev
.lldi
.pdev
->devfn
) << 8);
460 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
461 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
462 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
463 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
464 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
465 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
466 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
467 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
468 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
469 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
470 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
471 flowc
->mnemval
[6].val
= cpu_to_be32(snd_win
);
472 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
473 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
474 /* Pad WR to 16 byte boundary */
475 flowc
->mnemval
[8].mnemonic
= 0;
476 flowc
->mnemval
[8].val
= 0;
477 for (i
= 0; i
< 9; i
++) {
478 flowc
->mnemval
[i
].r4
[0] = 0;
479 flowc
->mnemval
[i
].r4
[1] = 0;
480 flowc
->mnemval
[i
].r4
[2] = 0;
483 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
484 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
487 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
489 struct cpl_close_con_req
*req
;
491 int wrlen
= roundup(sizeof *req
, 16);
493 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
494 skb
= get_skb(NULL
, wrlen
, gfp
);
496 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
499 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
500 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
501 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
502 memset(req
, 0, wrlen
);
503 INIT_TP_WR(req
, ep
->hwtid
);
504 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
506 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
509 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
511 struct cpl_abort_req
*req
;
512 int wrlen
= roundup(sizeof *req
, 16);
514 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
515 skb
= get_skb(skb
, wrlen
, gfp
);
517 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
521 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
522 t4_set_arp_err_handler(skb
, &ep
->com
.dev
->rdev
, abort_arp_failure
);
523 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
524 memset(req
, 0, wrlen
);
525 INIT_TP_WR(req
, ep
->hwtid
);
526 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
527 req
->cmd
= CPL_ABORT_SEND_RST
;
528 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
531 static int send_connect(struct c4iw_ep
*ep
)
533 struct cpl_act_open_req
*req
;
534 struct cpl_t5_act_open_req
*t5_req
;
535 struct cpl_act_open_req6
*req6
;
536 struct cpl_t5_act_open_req6
*t5_req6
;
540 unsigned int mtu_idx
;
543 int sizev4
= is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
) ?
544 sizeof(struct cpl_act_open_req
) :
545 sizeof(struct cpl_t5_act_open_req
);
546 int sizev6
= is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
) ?
547 sizeof(struct cpl_act_open_req6
) :
548 sizeof(struct cpl_t5_act_open_req6
);
549 struct sockaddr_in
*la
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
550 struct sockaddr_in
*ra
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
551 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
552 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)&ep
->com
.remote_addr
;
554 wrlen
= (ep
->com
.remote_addr
.ss_family
== AF_INET
) ?
555 roundup(sizev4
, 16) :
558 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
560 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
562 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
566 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
568 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
569 wscale
= compute_wscale(rcv_win
);
570 opt0
= (nocong
? NO_CONG(1) : 0) |
575 L2T_IDX(ep
->l2t
->idx
) |
576 TX_CHAN(ep
->tx_chan
) |
577 SMAC_SEL(ep
->smac_idx
) |
579 ULP_MODE(ULP_MODE_TCPDDP
) |
580 RCV_BUFSIZ(rcv_win
>>10);
581 opt2
= RX_CHANNEL(0) |
582 CCTRL_ECN(enable_ecn
) |
583 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
584 if (enable_tcp_timestamps
)
585 opt2
|= TSTAMPS_EN(1);
588 if (wscale
&& enable_tcp_window_scaling
)
589 opt2
|= WND_SCALE_EN(1);
590 if (is_t5(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
591 opt2
|= T5_OPT_2_VALID
;
592 opt2
|= V_CONG_CNTRL(CONG_ALG_TAHOE
);
594 t4_set_arp_err_handler(skb
, NULL
, act_open_req_arp_failure
);
596 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
597 if (ep
->com
.remote_addr
.ss_family
== AF_INET
) {
598 req
= (struct cpl_act_open_req
*) skb_put(skb
, wrlen
);
600 OPCODE_TID(req
) = cpu_to_be32(
601 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
602 ((ep
->rss_qid
<< 14) | ep
->atid
)));
603 req
->local_port
= la
->sin_port
;
604 req
->peer_port
= ra
->sin_port
;
605 req
->local_ip
= la
->sin_addr
.s_addr
;
606 req
->peer_ip
= ra
->sin_addr
.s_addr
;
607 req
->opt0
= cpu_to_be64(opt0
);
608 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
609 ep
->com
.dev
->rdev
.lldi
.ports
[0],
611 req
->opt2
= cpu_to_be32(opt2
);
613 req6
= (struct cpl_act_open_req6
*)skb_put(skb
, wrlen
);
616 OPCODE_TID(req6
) = cpu_to_be32(
617 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
618 ((ep
->rss_qid
<<14)|ep
->atid
)));
619 req6
->local_port
= la6
->sin6_port
;
620 req6
->peer_port
= ra6
->sin6_port
;
621 req6
->local_ip_hi
= *((__be64
*)
622 (la6
->sin6_addr
.s6_addr
));
623 req6
->local_ip_lo
= *((__be64
*)
624 (la6
->sin6_addr
.s6_addr
+ 8));
625 req6
->peer_ip_hi
= *((__be64
*)
626 (ra6
->sin6_addr
.s6_addr
));
627 req6
->peer_ip_lo
= *((__be64
*)
628 (ra6
->sin6_addr
.s6_addr
+ 8));
629 req6
->opt0
= cpu_to_be64(opt0
);
630 req6
->params
= cpu_to_be32(cxgb4_select_ntuple(
631 ep
->com
.dev
->rdev
.lldi
.ports
[0],
633 req6
->opt2
= cpu_to_be32(opt2
);
636 if (ep
->com
.remote_addr
.ss_family
== AF_INET
) {
637 t5_req
= (struct cpl_t5_act_open_req
*)
639 INIT_TP_WR(t5_req
, 0);
640 OPCODE_TID(t5_req
) = cpu_to_be32(
641 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
642 ((ep
->rss_qid
<< 14) | ep
->atid
)));
643 t5_req
->local_port
= la
->sin_port
;
644 t5_req
->peer_port
= ra
->sin_port
;
645 t5_req
->local_ip
= la
->sin_addr
.s_addr
;
646 t5_req
->peer_ip
= ra
->sin_addr
.s_addr
;
647 t5_req
->opt0
= cpu_to_be64(opt0
);
648 t5_req
->params
= cpu_to_be64(V_FILTER_TUPLE(
650 ep
->com
.dev
->rdev
.lldi
.ports
[0],
652 t5_req
->opt2
= cpu_to_be32(opt2
);
654 t5_req6
= (struct cpl_t5_act_open_req6
*)
656 INIT_TP_WR(t5_req6
, 0);
657 OPCODE_TID(t5_req6
) = cpu_to_be32(
658 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
659 ((ep
->rss_qid
<<14)|ep
->atid
)));
660 t5_req6
->local_port
= la6
->sin6_port
;
661 t5_req6
->peer_port
= ra6
->sin6_port
;
662 t5_req6
->local_ip_hi
= *((__be64
*)
663 (la6
->sin6_addr
.s6_addr
));
664 t5_req6
->local_ip_lo
= *((__be64
*)
665 (la6
->sin6_addr
.s6_addr
+ 8));
666 t5_req6
->peer_ip_hi
= *((__be64
*)
667 (ra6
->sin6_addr
.s6_addr
));
668 t5_req6
->peer_ip_lo
= *((__be64
*)
669 (ra6
->sin6_addr
.s6_addr
+ 8));
670 t5_req6
->opt0
= cpu_to_be64(opt0
);
671 t5_req6
->params
= (__force __be64
)cpu_to_be32(
673 ep
->com
.dev
->rdev
.lldi
.ports
[0],
675 t5_req6
->opt2
= cpu_to_be32(opt2
);
679 set_bit(ACT_OPEN_REQ
, &ep
->com
.history
);
680 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
683 static void send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
687 struct fw_ofld_tx_data_wr
*req
;
688 struct mpa_message
*mpa
;
689 struct mpa_v2_conn_params mpa_v2_params
;
691 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
693 BUG_ON(skb_cloned(skb
));
695 mpalen
= sizeof(*mpa
) + ep
->plen
;
696 if (mpa_rev_to_use
== 2)
697 mpalen
+= sizeof(struct mpa_v2_conn_params
);
698 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
699 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
701 connect_reply_upcall(ep
, -ENOMEM
);
704 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
706 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
707 memset(req
, 0, wrlen
);
708 req
->op_to_immdlen
= cpu_to_be32(
709 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
711 FW_WR_IMMDLEN(mpalen
));
712 req
->flowid_len16
= cpu_to_be32(
713 FW_WR_FLOWID(ep
->hwtid
) |
714 FW_WR_LEN16(wrlen
>> 4));
715 req
->plen
= cpu_to_be32(mpalen
);
716 req
->tunnel_to_proxy
= cpu_to_be32(
717 FW_OFLD_TX_DATA_WR_FLUSH(1) |
718 FW_OFLD_TX_DATA_WR_SHOVE(1));
720 mpa
= (struct mpa_message
*)(req
+ 1);
721 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
722 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
723 (markers_enabled
? MPA_MARKERS
: 0) |
724 (mpa_rev_to_use
== 2 ? MPA_ENHANCED_RDMA_CONN
: 0);
725 mpa
->private_data_size
= htons(ep
->plen
);
726 mpa
->revision
= mpa_rev_to_use
;
727 if (mpa_rev_to_use
== 1) {
728 ep
->tried_with_mpa_v1
= 1;
729 ep
->retry_with_mpa_v1
= 0;
732 if (mpa_rev_to_use
== 2) {
733 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
734 sizeof (struct mpa_v2_conn_params
));
735 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
736 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
739 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
740 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
742 htons(MPA_V2_RDMA_WRITE_RTR
);
743 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
745 htons(MPA_V2_RDMA_READ_RTR
);
747 memcpy(mpa
->private_data
, &mpa_v2_params
,
748 sizeof(struct mpa_v2_conn_params
));
751 memcpy(mpa
->private_data
+
752 sizeof(struct mpa_v2_conn_params
),
753 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
756 memcpy(mpa
->private_data
,
757 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
760 * Reference the mpa skb. This ensures the data area
761 * will remain in memory until the hw acks the tx.
762 * Function fw4_ack() will deref it.
765 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
768 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
770 __state_set(&ep
->com
, MPA_REQ_SENT
);
771 ep
->mpa_attr
.initiator
= 1;
772 ep
->snd_seq
+= mpalen
;
776 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
779 struct fw_ofld_tx_data_wr
*req
;
780 struct mpa_message
*mpa
;
782 struct mpa_v2_conn_params mpa_v2_params
;
784 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
786 mpalen
= sizeof(*mpa
) + plen
;
787 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
788 mpalen
+= sizeof(struct mpa_v2_conn_params
);
789 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
791 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
793 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
796 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
798 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
799 memset(req
, 0, wrlen
);
800 req
->op_to_immdlen
= cpu_to_be32(
801 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
803 FW_WR_IMMDLEN(mpalen
));
804 req
->flowid_len16
= cpu_to_be32(
805 FW_WR_FLOWID(ep
->hwtid
) |
806 FW_WR_LEN16(wrlen
>> 4));
807 req
->plen
= cpu_to_be32(mpalen
);
808 req
->tunnel_to_proxy
= cpu_to_be32(
809 FW_OFLD_TX_DATA_WR_FLUSH(1) |
810 FW_OFLD_TX_DATA_WR_SHOVE(1));
812 mpa
= (struct mpa_message
*)(req
+ 1);
813 memset(mpa
, 0, sizeof(*mpa
));
814 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
815 mpa
->flags
= MPA_REJECT
;
816 mpa
->revision
= ep
->mpa_attr
.version
;
817 mpa
->private_data_size
= htons(plen
);
819 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
820 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
821 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
822 sizeof (struct mpa_v2_conn_params
));
823 mpa_v2_params
.ird
= htons(((u16
)ep
->ird
) |
824 (peer2peer
? MPA_V2_PEER2PEER_MODEL
:
826 mpa_v2_params
.ord
= htons(((u16
)ep
->ord
) | (peer2peer
?
828 FW_RI_INIT_P2PTYPE_RDMA_WRITE
?
829 MPA_V2_RDMA_WRITE_RTR
: p2p_type
==
830 FW_RI_INIT_P2PTYPE_READ_REQ
?
831 MPA_V2_RDMA_READ_RTR
: 0) : 0));
832 memcpy(mpa
->private_data
, &mpa_v2_params
,
833 sizeof(struct mpa_v2_conn_params
));
836 memcpy(mpa
->private_data
+
837 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
840 memcpy(mpa
->private_data
, pdata
, plen
);
843 * Reference the mpa skb again. This ensures the data area
844 * will remain in memory until the hw acks the tx.
845 * Function fw4_ack() will deref it.
848 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
849 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
852 ep
->snd_seq
+= mpalen
;
853 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
856 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
859 struct fw_ofld_tx_data_wr
*req
;
860 struct mpa_message
*mpa
;
862 struct mpa_v2_conn_params mpa_v2_params
;
864 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
866 mpalen
= sizeof(*mpa
) + plen
;
867 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
868 mpalen
+= sizeof(struct mpa_v2_conn_params
);
869 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
871 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
873 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
876 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
878 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
879 memset(req
, 0, wrlen
);
880 req
->op_to_immdlen
= cpu_to_be32(
881 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
883 FW_WR_IMMDLEN(mpalen
));
884 req
->flowid_len16
= cpu_to_be32(
885 FW_WR_FLOWID(ep
->hwtid
) |
886 FW_WR_LEN16(wrlen
>> 4));
887 req
->plen
= cpu_to_be32(mpalen
);
888 req
->tunnel_to_proxy
= cpu_to_be32(
889 FW_OFLD_TX_DATA_WR_FLUSH(1) |
890 FW_OFLD_TX_DATA_WR_SHOVE(1));
892 mpa
= (struct mpa_message
*)(req
+ 1);
893 memset(mpa
, 0, sizeof(*mpa
));
894 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
895 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
896 (markers_enabled
? MPA_MARKERS
: 0);
897 mpa
->revision
= ep
->mpa_attr
.version
;
898 mpa
->private_data_size
= htons(plen
);
900 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
901 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
902 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
903 sizeof (struct mpa_v2_conn_params
));
904 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
905 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
906 if (peer2peer
&& (ep
->mpa_attr
.p2p_type
!=
907 FW_RI_INIT_P2PTYPE_DISABLED
)) {
908 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
910 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
912 htons(MPA_V2_RDMA_WRITE_RTR
);
913 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
915 htons(MPA_V2_RDMA_READ_RTR
);
918 memcpy(mpa
->private_data
, &mpa_v2_params
,
919 sizeof(struct mpa_v2_conn_params
));
922 memcpy(mpa
->private_data
+
923 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
926 memcpy(mpa
->private_data
, pdata
, plen
);
929 * Reference the mpa skb. This ensures the data area
930 * will remain in memory until the hw acks the tx.
931 * Function fw4_ack() will deref it.
934 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
936 __state_set(&ep
->com
, MPA_REP_SENT
);
937 ep
->snd_seq
+= mpalen
;
938 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
941 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
944 struct cpl_act_establish
*req
= cplhdr(skb
);
945 unsigned int tid
= GET_TID(req
);
946 unsigned int atid
= GET_TID_TID(ntohl(req
->tos_atid
));
947 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
949 ep
= lookup_atid(t
, atid
);
951 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
952 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
954 mutex_lock(&ep
->com
.mutex
);
955 dst_confirm(ep
->dst
);
957 /* setup the hwtid for this connection */
959 cxgb4_insert_tid(t
, ep
, tid
);
960 insert_handle(dev
, &dev
->hwtid_idr
, ep
, ep
->hwtid
);
962 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
963 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
965 set_emss(ep
, ntohs(req
->tcp_opt
));
967 /* dealloc the atid */
968 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
969 cxgb4_free_atid(t
, atid
);
970 set_bit(ACT_ESTAB
, &ep
->com
.history
);
972 /* start MPA negotiation */
973 send_flowc(ep
, NULL
);
974 if (ep
->retry_with_mpa_v1
)
975 send_mpa_req(ep
, skb
, 1);
977 send_mpa_req(ep
, skb
, mpa_rev
);
978 mutex_unlock(&ep
->com
.mutex
);
982 static void close_complete_upcall(struct c4iw_ep
*ep
, int status
)
984 struct iw_cm_event event
;
986 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
987 memset(&event
, 0, sizeof(event
));
988 event
.event
= IW_CM_EVENT_CLOSE
;
989 event
.status
= status
;
991 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
992 ep
, ep
->com
.cm_id
, ep
->hwtid
);
993 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
994 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
995 ep
->com
.cm_id
= NULL
;
996 set_bit(CLOSE_UPCALL
, &ep
->com
.history
);
1000 static int abort_connection(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
1002 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1003 __state_set(&ep
->com
, ABORTING
);
1004 set_bit(ABORT_CONN
, &ep
->com
.history
);
1005 return send_abort(ep
, skb
, gfp
);
1008 static void peer_close_upcall(struct c4iw_ep
*ep
)
1010 struct iw_cm_event event
;
1012 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1013 memset(&event
, 0, sizeof(event
));
1014 event
.event
= IW_CM_EVENT_DISCONNECT
;
1015 if (ep
->com
.cm_id
) {
1016 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1017 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1018 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1019 set_bit(DISCONN_UPCALL
, &ep
->com
.history
);
1023 static void peer_abort_upcall(struct c4iw_ep
*ep
)
1025 struct iw_cm_event event
;
1027 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1028 memset(&event
, 0, sizeof(event
));
1029 event
.event
= IW_CM_EVENT_CLOSE
;
1030 event
.status
= -ECONNRESET
;
1031 if (ep
->com
.cm_id
) {
1032 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
1033 ep
->com
.cm_id
, ep
->hwtid
);
1034 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1035 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
1036 ep
->com
.cm_id
= NULL
;
1037 set_bit(ABORT_UPCALL
, &ep
->com
.history
);
1041 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
1043 struct iw_cm_event event
;
1045 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
1046 memset(&event
, 0, sizeof(event
));
1047 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
1048 event
.status
= status
;
1049 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1050 sizeof(ep
->com
.local_addr
));
1051 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1052 sizeof(ep
->com
.remote_addr
));
1054 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
1055 if (!ep
->tried_with_mpa_v1
) {
1056 /* this means MPA_v2 is used */
1057 event
.private_data_len
= ep
->plen
-
1058 sizeof(struct mpa_v2_conn_params
);
1059 event
.private_data
= ep
->mpa_pkt
+
1060 sizeof(struct mpa_message
) +
1061 sizeof(struct mpa_v2_conn_params
);
1063 /* this means MPA_v1 is used */
1064 event
.private_data_len
= ep
->plen
;
1065 event
.private_data
= ep
->mpa_pkt
+
1066 sizeof(struct mpa_message
);
1070 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
1072 set_bit(CONN_RPL_UPCALL
, &ep
->com
.history
);
1073 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1076 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
1077 ep
->com
.cm_id
= NULL
;
1081 static int connect_request_upcall(struct c4iw_ep
*ep
)
1083 struct iw_cm_event event
;
1086 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1087 memset(&event
, 0, sizeof(event
));
1088 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
1089 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1090 sizeof(ep
->com
.local_addr
));
1091 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1092 sizeof(ep
->com
.remote_addr
));
1093 event
.provider_data
= ep
;
1094 if (!ep
->tried_with_mpa_v1
) {
1095 /* this means MPA_v2 is used */
1096 event
.ord
= ep
->ord
;
1097 event
.ird
= ep
->ird
;
1098 event
.private_data_len
= ep
->plen
-
1099 sizeof(struct mpa_v2_conn_params
);
1100 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
) +
1101 sizeof(struct mpa_v2_conn_params
);
1103 /* this means MPA_v1 is used. Send max supported */
1104 event
.ord
= c4iw_max_read_depth
;
1105 event
.ird
= c4iw_max_read_depth
;
1106 event
.private_data_len
= ep
->plen
;
1107 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
1109 c4iw_get_ep(&ep
->com
);
1110 ret
= ep
->parent_ep
->com
.cm_id
->event_handler(ep
->parent_ep
->com
.cm_id
,
1113 c4iw_put_ep(&ep
->com
);
1114 set_bit(CONNREQ_UPCALL
, &ep
->com
.history
);
1115 c4iw_put_ep(&ep
->parent_ep
->com
);
1119 static void established_upcall(struct c4iw_ep
*ep
)
1121 struct iw_cm_event event
;
1123 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1124 memset(&event
, 0, sizeof(event
));
1125 event
.event
= IW_CM_EVENT_ESTABLISHED
;
1126 event
.ird
= ep
->ird
;
1127 event
.ord
= ep
->ord
;
1128 if (ep
->com
.cm_id
) {
1129 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1130 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1131 set_bit(ESTAB_UPCALL
, &ep
->com
.history
);
1135 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
1137 struct cpl_rx_data_ack
*req
;
1138 struct sk_buff
*skb
;
1139 int wrlen
= roundup(sizeof *req
, 16);
1141 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1142 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1144 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
1148 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
1149 memset(req
, 0, wrlen
);
1150 INIT_TP_WR(req
, ep
->hwtid
);
1151 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
1153 req
->credit_dack
= cpu_to_be32(credits
| RX_FORCE_ACK(1) |
1155 V_RX_DACK_MODE(dack_mode
));
1156 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->ctrlq_idx
);
1157 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1161 static int process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1163 struct mpa_message
*mpa
;
1164 struct mpa_v2_conn_params
*mpa_v2_params
;
1166 u16 resp_ird
, resp_ord
;
1167 u8 rtr_mismatch
= 0, insuff_ird
= 0;
1168 struct c4iw_qp_attributes attrs
;
1169 enum c4iw_qp_attr_mask mask
;
1173 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1176 * Stop mpa timer. If it expired, then
1177 * we ignore the MPA reply. process_timeout()
1178 * will abort the connection.
1180 if (stop_ep_timer(ep
))
1184 * If we get more than the supported amount of private data
1185 * then we must fail this connection.
1187 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1193 * copy the new data into our accumulation buffer.
1195 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1197 ep
->mpa_pkt_len
+= skb
->len
;
1200 * if we don't even have the mpa message, then bail.
1202 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1204 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1206 /* Validate MPA header. */
1207 if (mpa
->revision
> mpa_rev
) {
1208 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1209 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1213 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
1218 plen
= ntohs(mpa
->private_data_size
);
1221 * Fail if there's too much private data.
1223 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1229 * If plen does not account for pkt size
1231 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1236 ep
->plen
= (u8
) plen
;
1239 * If we don't have all the pdata yet, then bail.
1240 * We'll continue process when more data arrives.
1242 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1245 if (mpa
->flags
& MPA_REJECT
) {
1246 err
= -ECONNREFUSED
;
1251 * If we get here we have accumulated the entire mpa
1252 * start reply message including private data. And
1253 * the MPA header is valid.
1255 __state_set(&ep
->com
, FPDU_MODE
);
1256 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1257 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1258 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1259 ep
->mpa_attr
.version
= mpa
->revision
;
1260 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1262 if (mpa
->revision
== 2) {
1263 ep
->mpa_attr
.enhanced_rdma_conn
=
1264 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1265 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1266 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1267 (ep
->mpa_pkt
+ sizeof(*mpa
));
1268 resp_ird
= ntohs(mpa_v2_params
->ird
) &
1269 MPA_V2_IRD_ORD_MASK
;
1270 resp_ord
= ntohs(mpa_v2_params
->ord
) &
1271 MPA_V2_IRD_ORD_MASK
;
1274 * This is a double-check. Ideally, below checks are
1275 * not required since ird/ord stuff has been taken
1276 * care of in c4iw_accept_cr
1278 if ((ep
->ird
< resp_ord
) || (ep
->ord
> resp_ird
)) {
1285 if (ntohs(mpa_v2_params
->ird
) &
1286 MPA_V2_PEER2PEER_MODEL
) {
1287 if (ntohs(mpa_v2_params
->ord
) &
1288 MPA_V2_RDMA_WRITE_RTR
)
1289 ep
->mpa_attr
.p2p_type
=
1290 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1291 else if (ntohs(mpa_v2_params
->ord
) &
1292 MPA_V2_RDMA_READ_RTR
)
1293 ep
->mpa_attr
.p2p_type
=
1294 FW_RI_INIT_P2PTYPE_READ_REQ
;
1297 } else if (mpa
->revision
== 1)
1299 ep
->mpa_attr
.p2p_type
= p2p_type
;
1301 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1302 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1303 "%d\n", __func__
, ep
->mpa_attr
.crc_enabled
,
1304 ep
->mpa_attr
.recv_marker_enabled
,
1305 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1306 ep
->mpa_attr
.p2p_type
, p2p_type
);
1309 * If responder's RTR does not match with that of initiator, assign
1310 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1311 * generated when moving QP to RTS state.
1312 * A TERM message will be sent after QP has moved to RTS state
1314 if ((ep
->mpa_attr
.version
== 2) && peer2peer
&&
1315 (ep
->mpa_attr
.p2p_type
!= p2p_type
)) {
1316 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1320 attrs
.mpa_attr
= ep
->mpa_attr
;
1321 attrs
.max_ird
= ep
->ird
;
1322 attrs
.max_ord
= ep
->ord
;
1323 attrs
.llp_stream_handle
= ep
;
1324 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1326 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1327 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
1328 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
1330 /* bind QP and TID with INIT_WR */
1331 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1332 ep
->com
.qp
, mask
, &attrs
, 1);
1337 * If responder's RTR requirement did not match with what initiator
1338 * supports, generate TERM message
1341 printk(KERN_ERR
"%s: RTR mismatch, sending TERM\n", __func__
);
1342 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1343 attrs
.ecode
= MPA_NOMATCH_RTR
;
1344 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1345 attrs
.send_term
= 1;
1346 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1347 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1354 * Generate TERM if initiator IRD is not sufficient for responder
1355 * provided ORD. Currently, we do the same behaviour even when
1356 * responder provided IRD is also not sufficient as regards to
1360 printk(KERN_ERR
"%s: Insufficient IRD, sending TERM\n",
1362 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1363 attrs
.ecode
= MPA_INSUFF_IRD
;
1364 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1365 attrs
.send_term
= 1;
1366 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1367 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1374 __state_set(&ep
->com
, ABORTING
);
1375 send_abort(ep
, skb
, GFP_KERNEL
);
1377 connect_reply_upcall(ep
, err
);
1381 static void process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1383 struct mpa_message
*mpa
;
1384 struct mpa_v2_conn_params
*mpa_v2_params
;
1387 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1390 * If we get more than the supported amount of private data
1391 * then we must fail this connection.
1393 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1394 (void)stop_ep_timer(ep
);
1395 abort_connection(ep
, skb
, GFP_KERNEL
);
1399 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1402 * Copy the new data into our accumulation buffer.
1404 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1406 ep
->mpa_pkt_len
+= skb
->len
;
1409 * If we don't even have the mpa message, then bail.
1410 * We'll continue process when more data arrives.
1412 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1415 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1416 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1419 * Validate MPA Header.
1421 if (mpa
->revision
> mpa_rev
) {
1422 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1423 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1424 (void)stop_ep_timer(ep
);
1425 abort_connection(ep
, skb
, GFP_KERNEL
);
1429 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1430 (void)stop_ep_timer(ep
);
1431 abort_connection(ep
, skb
, GFP_KERNEL
);
1435 plen
= ntohs(mpa
->private_data_size
);
1438 * Fail if there's too much private data.
1440 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1441 (void)stop_ep_timer(ep
);
1442 abort_connection(ep
, skb
, GFP_KERNEL
);
1447 * If plen does not account for pkt size
1449 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1450 (void)stop_ep_timer(ep
);
1451 abort_connection(ep
, skb
, GFP_KERNEL
);
1454 ep
->plen
= (u8
) plen
;
1457 * If we don't have all the pdata yet, then bail.
1459 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1463 * If we get here we have accumulated the entire mpa
1464 * start reply message including private data.
1466 ep
->mpa_attr
.initiator
= 0;
1467 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1468 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1469 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1470 ep
->mpa_attr
.version
= mpa
->revision
;
1471 if (mpa
->revision
== 1)
1472 ep
->tried_with_mpa_v1
= 1;
1473 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1475 if (mpa
->revision
== 2) {
1476 ep
->mpa_attr
.enhanced_rdma_conn
=
1477 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1478 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1479 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1480 (ep
->mpa_pkt
+ sizeof(*mpa
));
1481 ep
->ird
= ntohs(mpa_v2_params
->ird
) &
1482 MPA_V2_IRD_ORD_MASK
;
1483 ep
->ord
= ntohs(mpa_v2_params
->ord
) &
1484 MPA_V2_IRD_ORD_MASK
;
1485 if (ntohs(mpa_v2_params
->ird
) & MPA_V2_PEER2PEER_MODEL
)
1487 if (ntohs(mpa_v2_params
->ord
) &
1488 MPA_V2_RDMA_WRITE_RTR
)
1489 ep
->mpa_attr
.p2p_type
=
1490 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1491 else if (ntohs(mpa_v2_params
->ord
) &
1492 MPA_V2_RDMA_READ_RTR
)
1493 ep
->mpa_attr
.p2p_type
=
1494 FW_RI_INIT_P2PTYPE_READ_REQ
;
1497 } else if (mpa
->revision
== 1)
1499 ep
->mpa_attr
.p2p_type
= p2p_type
;
1501 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1502 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1503 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1504 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1505 ep
->mpa_attr
.p2p_type
);
1508 * If the endpoint timer already expired, then we ignore
1509 * the start request. process_timeout() will abort
1512 if (!stop_ep_timer(ep
)) {
1513 __state_set(&ep
->com
, MPA_REQ_RCVD
);
1516 mutex_lock(&ep
->parent_ep
->com
.mutex
);
1517 if (ep
->parent_ep
->com
.state
!= DEAD
) {
1518 if (connect_request_upcall(ep
))
1519 abort_connection(ep
, skb
, GFP_KERNEL
);
1521 abort_connection(ep
, skb
, GFP_KERNEL
);
1523 mutex_unlock(&ep
->parent_ep
->com
.mutex
);
1528 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1531 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1532 unsigned int dlen
= ntohs(hdr
->len
);
1533 unsigned int tid
= GET_TID(hdr
);
1534 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1535 __u8 status
= hdr
->status
;
1538 ep
= lookup_tid(t
, tid
);
1541 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1542 skb_pull(skb
, sizeof(*hdr
));
1543 skb_trim(skb
, dlen
);
1544 mutex_lock(&ep
->com
.mutex
);
1546 /* update RX credits */
1547 update_rx_credits(ep
, dlen
);
1549 switch (ep
->com
.state
) {
1551 ep
->rcv_seq
+= dlen
;
1552 disconnect
= process_mpa_reply(ep
, skb
);
1555 ep
->rcv_seq
+= dlen
;
1556 process_mpa_request(ep
, skb
);
1559 struct c4iw_qp_attributes attrs
;
1560 BUG_ON(!ep
->com
.qp
);
1562 pr_err("%s Unexpected streaming data." \
1563 " qpid %u ep %p state %d tid %u status %d\n",
1564 __func__
, ep
->com
.qp
->wq
.sq
.qid
, ep
,
1565 ep
->com
.state
, ep
->hwtid
, status
);
1566 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1567 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1568 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1575 mutex_unlock(&ep
->com
.mutex
);
1577 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1581 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1584 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1586 unsigned int tid
= GET_TID(rpl
);
1587 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1589 ep
= lookup_tid(t
, tid
);
1591 printk(KERN_WARNING MOD
"Abort rpl to freed endpoint\n");
1594 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1595 mutex_lock(&ep
->com
.mutex
);
1596 switch (ep
->com
.state
) {
1598 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1599 __state_set(&ep
->com
, DEAD
);
1603 printk(KERN_ERR
"%s ep %p state %d\n",
1604 __func__
, ep
, ep
->com
.state
);
1607 mutex_unlock(&ep
->com
.mutex
);
1610 release_ep_resources(ep
);
1614 static void send_fw_act_open_req(struct c4iw_ep
*ep
, unsigned int atid
)
1616 struct sk_buff
*skb
;
1617 struct fw_ofld_connection_wr
*req
;
1618 unsigned int mtu_idx
;
1620 struct sockaddr_in
*sin
;
1622 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1623 req
= (struct fw_ofld_connection_wr
*)__skb_put(skb
, sizeof(*req
));
1624 memset(req
, 0, sizeof(*req
));
1625 req
->op_compl
= htonl(V_WR_OP(FW_OFLD_CONNECTION_WR
));
1626 req
->len16_pkd
= htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req
), 16)));
1627 req
->le
.filter
= cpu_to_be32(cxgb4_select_ntuple(
1628 ep
->com
.dev
->rdev
.lldi
.ports
[0],
1630 sin
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
1631 req
->le
.lport
= sin
->sin_port
;
1632 req
->le
.u
.ipv4
.lip
= sin
->sin_addr
.s_addr
;
1633 sin
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
1634 req
->le
.pport
= sin
->sin_port
;
1635 req
->le
.u
.ipv4
.pip
= sin
->sin_addr
.s_addr
;
1636 req
->tcb
.t_state_to_astid
=
1637 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT
) |
1638 V_FW_OFLD_CONNECTION_WR_ASTID(atid
));
1639 req
->tcb
.cplrxdataack_cplpassacceptrpl
=
1640 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK
);
1641 req
->tcb
.tx_max
= (__force __be32
) jiffies
;
1642 req
->tcb
.rcv_adv
= htons(1);
1643 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1644 wscale
= compute_wscale(rcv_win
);
1645 req
->tcb
.opt0
= (__force __be64
) (TCAM_BYPASS(1) |
1646 (nocong
? NO_CONG(1) : 0) |
1651 L2T_IDX(ep
->l2t
->idx
) |
1652 TX_CHAN(ep
->tx_chan
) |
1653 SMAC_SEL(ep
->smac_idx
) |
1655 ULP_MODE(ULP_MODE_TCPDDP
) |
1656 RCV_BUFSIZ(rcv_win
>> 10));
1657 req
->tcb
.opt2
= (__force __be32
) (PACE(1) |
1658 TX_QUEUE(ep
->com
.dev
->rdev
.lldi
.tx_modq
[ep
->tx_chan
]) |
1660 CCTRL_ECN(enable_ecn
) |
1661 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
));
1662 if (enable_tcp_timestamps
)
1663 req
->tcb
.opt2
|= (__force __be32
) TSTAMPS_EN(1);
1664 if (enable_tcp_sack
)
1665 req
->tcb
.opt2
|= (__force __be32
) SACK_EN(1);
1666 if (wscale
&& enable_tcp_window_scaling
)
1667 req
->tcb
.opt2
|= (__force __be32
) WND_SCALE_EN(1);
1668 req
->tcb
.opt0
= cpu_to_be64((__force u64
) req
->tcb
.opt0
);
1669 req
->tcb
.opt2
= cpu_to_be32((__force u32
) req
->tcb
.opt2
);
1670 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, ep
->ctrlq_idx
);
1671 set_bit(ACT_OFLD_CONN
, &ep
->com
.history
);
1672 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1676 * Return whether a failed active open has allocated a TID
1678 static inline int act_open_has_tid(int status
)
1680 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1681 status
!= CPL_ERR_ARP_MISS
;
1684 /* Returns whether a CPL status conveys negative advice.
1686 static int is_neg_adv(unsigned int status
)
1688 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1689 status
== CPL_ERR_PERSIST_NEG_ADVICE
||
1690 status
== CPL_ERR_KEEPALV_NEG_ADVICE
;
1693 #define ACT_OPEN_RETRY_COUNT 2
1695 static int import_ep(struct c4iw_ep
*ep
, int iptype
, __u8
*peer_ip
,
1696 struct dst_entry
*dst
, struct c4iw_dev
*cdev
,
1699 struct neighbour
*n
;
1701 struct net_device
*pdev
;
1703 n
= dst_neigh_lookup(dst
, peer_ip
);
1709 if (n
->dev
->flags
& IFF_LOOPBACK
) {
1711 pdev
= ip_dev_find(&init_net
, *(__be32
*)peer_ip
);
1712 else if (IS_ENABLED(CONFIG_IPV6
))
1713 for_each_netdev(&init_net
, pdev
) {
1714 if (ipv6_chk_addr(&init_net
,
1715 (struct in6_addr
*)peer_ip
,
1726 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1730 ep
->mtu
= pdev
->mtu
;
1731 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1732 ep
->smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1733 step
= cdev
->rdev
.lldi
.ntxq
/
1734 cdev
->rdev
.lldi
.nchan
;
1735 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1736 step
= cdev
->rdev
.lldi
.nrxq
/
1737 cdev
->rdev
.lldi
.nchan
;
1738 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
1739 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1740 cxgb4_port_idx(pdev
) * step
];
1743 pdev
= get_real_dev(n
->dev
);
1744 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1748 ep
->mtu
= dst_mtu(dst
);
1749 ep
->tx_chan
= cxgb4_port_chan(n
->dev
);
1750 ep
->smac_idx
= (cxgb4_port_viid(n
->dev
) & 0x7F) << 1;
1751 step
= cdev
->rdev
.lldi
.ntxq
/
1752 cdev
->rdev
.lldi
.nchan
;
1753 ep
->txq_idx
= cxgb4_port_idx(n
->dev
) * step
;
1754 ep
->ctrlq_idx
= cxgb4_port_idx(n
->dev
);
1755 step
= cdev
->rdev
.lldi
.nrxq
/
1756 cdev
->rdev
.lldi
.nchan
;
1757 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1758 cxgb4_port_idx(n
->dev
) * step
];
1761 ep
->retry_with_mpa_v1
= 0;
1762 ep
->tried_with_mpa_v1
= 0;
1774 static int c4iw_reconnect(struct c4iw_ep
*ep
)
1777 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)
1778 &ep
->com
.cm_id
->local_addr
;
1779 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)
1780 &ep
->com
.cm_id
->remote_addr
;
1781 struct sockaddr_in6
*laddr6
= (struct sockaddr_in6
*)
1782 &ep
->com
.cm_id
->local_addr
;
1783 struct sockaddr_in6
*raddr6
= (struct sockaddr_in6
*)
1784 &ep
->com
.cm_id
->remote_addr
;
1788 PDBG("%s qp %p cm_id %p\n", __func__
, ep
->com
.qp
, ep
->com
.cm_id
);
1789 init_timer(&ep
->timer
);
1792 * Allocate an active TID to initiate a TCP connection.
1794 ep
->atid
= cxgb4_alloc_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
);
1795 if (ep
->atid
== -1) {
1796 pr_err("%s - cannot alloc atid.\n", __func__
);
1800 insert_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
, ep
->atid
);
1803 if (ep
->com
.cm_id
->local_addr
.ss_family
== AF_INET
) {
1804 ep
->dst
= find_route(ep
->com
.dev
, laddr
->sin_addr
.s_addr
,
1805 raddr
->sin_addr
.s_addr
, laddr
->sin_port
,
1806 raddr
->sin_port
, 0);
1808 ra
= (__u8
*)&raddr
->sin_addr
;
1810 ep
->dst
= find_route6(ep
->com
.dev
, laddr6
->sin6_addr
.s6_addr
,
1811 raddr6
->sin6_addr
.s6_addr
,
1812 laddr6
->sin6_port
, raddr6
->sin6_port
, 0,
1813 raddr6
->sin6_scope_id
);
1815 ra
= (__u8
*)&raddr6
->sin6_addr
;
1818 pr_err("%s - cannot find route.\n", __func__
);
1819 err
= -EHOSTUNREACH
;
1822 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, false);
1824 pr_err("%s - cannot alloc l2e.\n", __func__
);
1828 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1829 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
1832 state_set(&ep
->com
, CONNECTING
);
1835 /* send connect request to rnic */
1836 err
= send_connect(ep
);
1840 cxgb4_l2t_release(ep
->l2t
);
1842 dst_release(ep
->dst
);
1844 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
1845 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
1848 * remember to send notification to upper layer.
1849 * We are in here so the upper layer is not aware that this is
1850 * re-connect attempt and so, upper layer is still waiting for
1851 * response of 1st connect request.
1853 connect_reply_upcall(ep
, -ECONNRESET
);
1854 c4iw_put_ep(&ep
->com
);
1859 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1862 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1863 unsigned int atid
= GET_TID_TID(GET_AOPEN_ATID(
1864 ntohl(rpl
->atid_status
)));
1865 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1866 int status
= GET_AOPEN_STATUS(ntohl(rpl
->atid_status
));
1867 struct sockaddr_in
*la
;
1868 struct sockaddr_in
*ra
;
1869 struct sockaddr_in6
*la6
;
1870 struct sockaddr_in6
*ra6
;
1872 ep
= lookup_atid(t
, atid
);
1873 la
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
1874 ra
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
1875 la6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
1876 ra6
= (struct sockaddr_in6
*)&ep
->com
.remote_addr
;
1878 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
1879 status
, status2errno(status
));
1881 if (is_neg_adv(status
)) {
1882 printk(KERN_WARNING MOD
"Connection problems for atid %u\n",
1887 set_bit(ACT_OPEN_RPL
, &ep
->com
.history
);
1890 * Log interesting failures.
1893 case CPL_ERR_CONN_RESET
:
1894 case CPL_ERR_CONN_TIMEDOUT
:
1896 case CPL_ERR_TCAM_FULL
:
1897 mutex_lock(&dev
->rdev
.stats
.lock
);
1898 dev
->rdev
.stats
.tcam_full
++;
1899 mutex_unlock(&dev
->rdev
.stats
.lock
);
1900 if (ep
->com
.local_addr
.ss_family
== AF_INET
&&
1901 dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
1902 send_fw_act_open_req(ep
,
1903 GET_TID_TID(GET_AOPEN_ATID(
1904 ntohl(rpl
->atid_status
))));
1908 case CPL_ERR_CONN_EXIST
:
1909 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
1910 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
1911 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
,
1913 cxgb4_free_atid(t
, atid
);
1914 dst_release(ep
->dst
);
1915 cxgb4_l2t_release(ep
->l2t
);
1921 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
1922 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
1923 atid
, status
, status2errno(status
),
1924 &la
->sin_addr
.s_addr
, ntohs(la
->sin_port
),
1925 &ra
->sin_addr
.s_addr
, ntohs(ra
->sin_port
));
1927 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
1928 atid
, status
, status2errno(status
),
1929 la6
->sin6_addr
.s6_addr
, ntohs(la6
->sin6_port
),
1930 ra6
->sin6_addr
.s6_addr
, ntohs(ra6
->sin6_port
));
1935 connect_reply_upcall(ep
, status2errno(status
));
1936 state_set(&ep
->com
, DEAD
);
1938 if (status
&& act_open_has_tid(status
))
1939 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
1941 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
1942 cxgb4_free_atid(t
, atid
);
1943 dst_release(ep
->dst
);
1944 cxgb4_l2t_release(ep
->l2t
);
1945 c4iw_put_ep(&ep
->com
);
1950 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1952 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1953 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1954 unsigned int stid
= GET_TID(rpl
);
1955 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1958 PDBG("%s stid %d lookup failure!\n", __func__
, stid
);
1961 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
1962 rpl
->status
, status2errno(rpl
->status
));
1963 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
1969 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1971 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
1972 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1973 unsigned int stid
= GET_TID(rpl
);
1974 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1976 PDBG("%s ep %p\n", __func__
, ep
);
1977 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
1981 static void accept_cr(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
1982 struct cpl_pass_accept_req
*req
)
1984 struct cpl_pass_accept_rpl
*rpl
;
1985 unsigned int mtu_idx
;
1990 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1991 BUG_ON(skb_cloned(skb
));
1992 skb_trim(skb
, sizeof(*rpl
));
1994 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1995 wscale
= compute_wscale(rcv_win
);
1996 opt0
= (nocong
? NO_CONG(1) : 0) |
2001 L2T_IDX(ep
->l2t
->idx
) |
2002 TX_CHAN(ep
->tx_chan
) |
2003 SMAC_SEL(ep
->smac_idx
) |
2004 DSCP(ep
->tos
>> 2) |
2005 ULP_MODE(ULP_MODE_TCPDDP
) |
2006 RCV_BUFSIZ(rcv_win
>>10);
2007 opt2
= RX_CHANNEL(0) |
2008 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
2010 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
2011 opt2
|= TSTAMPS_EN(1);
2012 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
2014 if (wscale
&& enable_tcp_window_scaling
)
2015 opt2
|= WND_SCALE_EN(1);
2017 const struct tcphdr
*tcph
;
2018 u32 hlen
= ntohl(req
->hdr_len
);
2020 tcph
= (const void *)(req
+ 1) + G_ETH_HDR_LEN(hlen
) +
2022 if (tcph
->ece
&& tcph
->cwr
)
2023 opt2
|= CCTRL_ECN(1);
2025 if (is_t5(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
2026 opt2
|= T5_OPT_2_VALID
;
2027 opt2
|= V_CONG_CNTRL(CONG_ALG_TAHOE
);
2031 INIT_TP_WR(rpl
, ep
->hwtid
);
2032 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
2034 rpl
->opt0
= cpu_to_be64(opt0
);
2035 rpl
->opt2
= cpu_to_be32(opt2
);
2036 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
2037 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
2038 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
2043 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, struct sk_buff
*skb
)
2045 PDBG("%s c4iw_dev %p tid %u\n", __func__
, dev
, hwtid
);
2046 BUG_ON(skb_cloned(skb
));
2047 skb_trim(skb
, sizeof(struct cpl_tid_release
));
2049 release_tid(&dev
->rdev
, hwtid
, skb
);
2053 static void get_4tuple(struct cpl_pass_accept_req
*req
, int *iptype
,
2054 __u8
*local_ip
, __u8
*peer_ip
,
2055 __be16
*local_port
, __be16
*peer_port
)
2057 int eth_len
= G_ETH_HDR_LEN(be32_to_cpu(req
->hdr_len
));
2058 int ip_len
= G_IP_HDR_LEN(be32_to_cpu(req
->hdr_len
));
2059 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
2060 struct ipv6hdr
*ip6
= (struct ipv6hdr
*)((u8
*)(req
+ 1) + eth_len
);
2061 struct tcphdr
*tcp
= (struct tcphdr
*)
2062 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
2064 if (ip
->version
== 4) {
2065 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
2066 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
2069 memcpy(peer_ip
, &ip
->saddr
, 4);
2070 memcpy(local_ip
, &ip
->daddr
, 4);
2072 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__
,
2073 ip6
->saddr
.s6_addr
, ip6
->daddr
.s6_addr
, ntohs(tcp
->source
),
2076 memcpy(peer_ip
, ip6
->saddr
.s6_addr
, 16);
2077 memcpy(local_ip
, ip6
->daddr
.s6_addr
, 16);
2079 *peer_port
= tcp
->source
;
2080 *local_port
= tcp
->dest
;
2085 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2087 struct c4iw_ep
*child_ep
= NULL
, *parent_ep
;
2088 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
2089 unsigned int stid
= GET_POPEN_TID(ntohl(req
->tos_stid
));
2090 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2091 unsigned int hwtid
= GET_TID(req
);
2092 struct dst_entry
*dst
;
2093 __u8 local_ip
[16], peer_ip
[16];
2094 __be16 local_port
, peer_port
;
2096 u16 peer_mss
= ntohs(req
->tcpopt
.mss
);
2099 parent_ep
= lookup_stid(t
, stid
);
2101 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
2105 if (state_read(&parent_ep
->com
) != LISTEN
) {
2106 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
2111 get_4tuple(req
, &iptype
, local_ip
, peer_ip
, &local_port
, &peer_port
);
2113 /* Find output route */
2115 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2116 , __func__
, parent_ep
, hwtid
,
2117 local_ip
, peer_ip
, ntohs(local_port
),
2118 ntohs(peer_port
), peer_mss
);
2119 dst
= find_route(dev
, *(__be32
*)local_ip
, *(__be32
*)peer_ip
,
2120 local_port
, peer_port
,
2121 GET_POPEN_TOS(ntohl(req
->tos_stid
)));
2123 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2124 , __func__
, parent_ep
, hwtid
,
2125 local_ip
, peer_ip
, ntohs(local_port
),
2126 ntohs(peer_port
), peer_mss
);
2127 dst
= find_route6(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
2128 PASS_OPEN_TOS(ntohl(req
->tos_stid
)),
2129 ((struct sockaddr_in6
*)
2130 &parent_ep
->com
.local_addr
)->sin6_scope_id
);
2133 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
2138 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
2140 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
2146 err
= import_ep(child_ep
, iptype
, peer_ip
, dst
, dev
, false);
2148 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
2155 if (peer_mss
&& child_ep
->mtu
> (peer_mss
+ 40))
2156 child_ep
->mtu
= peer_mss
+ 40;
2158 state_set(&child_ep
->com
, CONNECTING
);
2159 child_ep
->com
.dev
= dev
;
2160 child_ep
->com
.cm_id
= NULL
;
2162 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
2163 &child_ep
->com
.local_addr
;
2164 sin
->sin_family
= PF_INET
;
2165 sin
->sin_port
= local_port
;
2166 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2167 sin
= (struct sockaddr_in
*)&child_ep
->com
.remote_addr
;
2168 sin
->sin_family
= PF_INET
;
2169 sin
->sin_port
= peer_port
;
2170 sin
->sin_addr
.s_addr
= *(__be32
*)peer_ip
;
2172 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)
2173 &child_ep
->com
.local_addr
;
2174 sin6
->sin6_family
= PF_INET6
;
2175 sin6
->sin6_port
= local_port
;
2176 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2177 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.remote_addr
;
2178 sin6
->sin6_family
= PF_INET6
;
2179 sin6
->sin6_port
= peer_port
;
2180 memcpy(sin6
->sin6_addr
.s6_addr
, peer_ip
, 16);
2182 c4iw_get_ep(&parent_ep
->com
);
2183 child_ep
->parent_ep
= parent_ep
;
2184 child_ep
->tos
= GET_POPEN_TOS(ntohl(req
->tos_stid
));
2185 child_ep
->dst
= dst
;
2186 child_ep
->hwtid
= hwtid
;
2188 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
2189 child_ep
->tx_chan
, child_ep
->smac_idx
, child_ep
->rss_qid
);
2191 init_timer(&child_ep
->timer
);
2192 cxgb4_insert_tid(t
, child_ep
, hwtid
);
2193 insert_handle(dev
, &dev
->hwtid_idr
, child_ep
, child_ep
->hwtid
);
2194 accept_cr(child_ep
, skb
, req
);
2195 set_bit(PASS_ACCEPT_REQ
, &child_ep
->com
.history
);
2198 reject_cr(dev
, hwtid
, skb
);
2203 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2206 struct cpl_pass_establish
*req
= cplhdr(skb
);
2207 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2208 unsigned int tid
= GET_TID(req
);
2210 ep
= lookup_tid(t
, tid
);
2211 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2212 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
2213 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
2215 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__
, ep
, tid
,
2216 ntohs(req
->tcp_opt
));
2218 set_emss(ep
, ntohs(req
->tcp_opt
));
2220 dst_confirm(ep
->dst
);
2221 state_set(&ep
->com
, MPA_REQ_WAIT
);
2223 send_flowc(ep
, skb
);
2224 set_bit(PASS_ESTAB
, &ep
->com
.history
);
2229 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2231 struct cpl_peer_close
*hdr
= cplhdr(skb
);
2233 struct c4iw_qp_attributes attrs
;
2236 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2237 unsigned int tid
= GET_TID(hdr
);
2240 ep
= lookup_tid(t
, tid
);
2241 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2242 dst_confirm(ep
->dst
);
2244 set_bit(PEER_CLOSE
, &ep
->com
.history
);
2245 mutex_lock(&ep
->com
.mutex
);
2246 switch (ep
->com
.state
) {
2248 __state_set(&ep
->com
, CLOSING
);
2251 __state_set(&ep
->com
, CLOSING
);
2252 connect_reply_upcall(ep
, -ECONNRESET
);
2257 * We're gonna mark this puppy DEAD, but keep
2258 * the reference on it until the ULP accepts or
2259 * rejects the CR. Also wake up anyone waiting
2260 * in rdma connection migration (see c4iw_accept_cr()).
2262 __state_set(&ep
->com
, CLOSING
);
2263 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2264 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2267 __state_set(&ep
->com
, CLOSING
);
2268 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2269 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2273 __state_set(&ep
->com
, CLOSING
);
2274 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
2275 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2276 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2277 if (ret
!= -ECONNRESET
) {
2278 peer_close_upcall(ep
);
2286 __state_set(&ep
->com
, MORIBUND
);
2290 (void)stop_ep_timer(ep
);
2291 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2292 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2293 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2294 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2296 close_complete_upcall(ep
, 0);
2297 __state_set(&ep
->com
, DEAD
);
2307 mutex_unlock(&ep
->com
.mutex
);
2309 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2311 release_ep_resources(ep
);
2315 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2317 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
2319 struct cpl_abort_rpl
*rpl
;
2320 struct sk_buff
*rpl_skb
;
2321 struct c4iw_qp_attributes attrs
;
2324 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2325 unsigned int tid
= GET_TID(req
);
2327 ep
= lookup_tid(t
, tid
);
2328 if (is_neg_adv(req
->status
)) {
2329 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
2333 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
2335 set_bit(PEER_ABORT
, &ep
->com
.history
);
2338 * Wake up any threads in rdma_init() or rdma_fini().
2339 * However, this is not needed if com state is just
2342 if (ep
->com
.state
!= MPA_REQ_SENT
)
2343 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2345 mutex_lock(&ep
->com
.mutex
);
2346 switch (ep
->com
.state
) {
2350 (void)stop_ep_timer(ep
);
2353 (void)stop_ep_timer(ep
);
2354 if (mpa_rev
== 1 || (mpa_rev
== 2 && ep
->tried_with_mpa_v1
))
2355 connect_reply_upcall(ep
, -ECONNRESET
);
2358 * we just don't send notification upwards because we
2359 * want to retry with mpa_v1 without upper layers even
2362 * do some housekeeping so as to re-initiate the
2365 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__
,
2367 ep
->retry_with_mpa_v1
= 1;
2379 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2380 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2381 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2382 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2386 "%s - qp <- error failed!\n",
2389 peer_abort_upcall(ep
);
2394 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
2395 mutex_unlock(&ep
->com
.mutex
);
2401 dst_confirm(ep
->dst
);
2402 if (ep
->com
.state
!= ABORTING
) {
2403 __state_set(&ep
->com
, DEAD
);
2404 /* we don't release if we want to retry with mpa_v1 */
2405 if (!ep
->retry_with_mpa_v1
)
2408 mutex_unlock(&ep
->com
.mutex
);
2410 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
2412 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
2417 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
2418 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
2419 INIT_TP_WR(rpl
, ep
->hwtid
);
2420 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
2421 rpl
->cmd
= CPL_ABORT_NO_RST
;
2422 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
2425 release_ep_resources(ep
);
2426 else if (ep
->retry_with_mpa_v1
) {
2427 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
2428 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
2429 dst_release(ep
->dst
);
2430 cxgb4_l2t_release(ep
->l2t
);
2437 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2440 struct c4iw_qp_attributes attrs
;
2441 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
2443 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2444 unsigned int tid
= GET_TID(rpl
);
2446 ep
= lookup_tid(t
, tid
);
2448 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2451 /* The cm_id may be null if we failed to connect */
2452 mutex_lock(&ep
->com
.mutex
);
2453 switch (ep
->com
.state
) {
2455 __state_set(&ep
->com
, MORIBUND
);
2458 (void)stop_ep_timer(ep
);
2459 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
2460 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2461 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2463 C4IW_QP_ATTR_NEXT_STATE
,
2466 close_complete_upcall(ep
, 0);
2467 __state_set(&ep
->com
, DEAD
);
2477 mutex_unlock(&ep
->com
.mutex
);
2479 release_ep_resources(ep
);
2483 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2485 struct cpl_rdma_terminate
*rpl
= cplhdr(skb
);
2486 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2487 unsigned int tid
= GET_TID(rpl
);
2489 struct c4iw_qp_attributes attrs
;
2491 ep
= lookup_tid(t
, tid
);
2494 if (ep
&& ep
->com
.qp
) {
2495 printk(KERN_WARNING MOD
"TERM received tid %u qpid %u\n", tid
,
2496 ep
->com
.qp
->wq
.sq
.qid
);
2497 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
2498 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2499 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2501 printk(KERN_WARNING MOD
"TERM received tid %u no ep/qp\n", tid
);
2507 * Upcall from the adapter indicating data has been transmitted.
2508 * For us its just the single MPA request or reply. We can now free
2509 * the skb holding the mpa message.
2511 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2514 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
2515 u8 credits
= hdr
->credits
;
2516 unsigned int tid
= GET_TID(hdr
);
2517 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2520 ep
= lookup_tid(t
, tid
);
2521 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
2523 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2524 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
2528 dst_confirm(ep
->dst
);
2530 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2531 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
2532 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
2533 kfree_skb(ep
->mpa_skb
);
2539 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
2543 struct c4iw_ep
*ep
= to_ep(cm_id
);
2544 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2546 mutex_lock(&ep
->com
.mutex
);
2547 if (ep
->com
.state
== DEAD
) {
2548 mutex_unlock(&ep
->com
.mutex
);
2549 c4iw_put_ep(&ep
->com
);
2552 set_bit(ULP_REJECT
, &ep
->com
.history
);
2553 BUG_ON(ep
->com
.state
!= MPA_REQ_RCVD
);
2555 abort_connection(ep
, NULL
, GFP_KERNEL
);
2557 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
2560 mutex_unlock(&ep
->com
.mutex
);
2562 err
= c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2563 c4iw_put_ep(&ep
->com
);
2567 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2570 struct c4iw_qp_attributes attrs
;
2571 enum c4iw_qp_attr_mask mask
;
2572 struct c4iw_ep
*ep
= to_ep(cm_id
);
2573 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
2574 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
2576 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2578 mutex_lock(&ep
->com
.mutex
);
2579 if (ep
->com
.state
== DEAD
) {
2584 BUG_ON(ep
->com
.state
!= MPA_REQ_RCVD
);
2587 set_bit(ULP_ACCEPT
, &ep
->com
.history
);
2588 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
2589 (conn_param
->ird
> c4iw_max_read_depth
)) {
2590 abort_connection(ep
, NULL
, GFP_KERNEL
);
2595 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
2596 if (conn_param
->ord
> ep
->ird
) {
2597 ep
->ird
= conn_param
->ird
;
2598 ep
->ord
= conn_param
->ord
;
2599 send_mpa_reject(ep
, conn_param
->private_data
,
2600 conn_param
->private_data_len
);
2601 abort_connection(ep
, NULL
, GFP_KERNEL
);
2605 if (conn_param
->ird
> ep
->ord
) {
2607 conn_param
->ird
= 1;
2609 abort_connection(ep
, NULL
, GFP_KERNEL
);
2616 ep
->ird
= conn_param
->ird
;
2617 ep
->ord
= conn_param
->ord
;
2619 if (ep
->mpa_attr
.version
!= 2)
2620 if (peer2peer
&& ep
->ird
== 0)
2623 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
2625 cm_id
->add_ref(cm_id
);
2626 ep
->com
.cm_id
= cm_id
;
2630 /* bind QP to EP and move to RTS */
2631 attrs
.mpa_attr
= ep
->mpa_attr
;
2632 attrs
.max_ird
= ep
->ird
;
2633 attrs
.max_ord
= ep
->ord
;
2634 attrs
.llp_stream_handle
= ep
;
2635 attrs
.next_state
= C4IW_QP_STATE_RTS
;
2637 /* bind QP and TID with INIT_WR */
2638 mask
= C4IW_QP_ATTR_NEXT_STATE
|
2639 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
2640 C4IW_QP_ATTR_MPA_ATTR
|
2641 C4IW_QP_ATTR_MAX_IRD
|
2642 C4IW_QP_ATTR_MAX_ORD
;
2644 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2645 ep
->com
.qp
, mask
, &attrs
, 1);
2648 err
= send_mpa_reply(ep
, conn_param
->private_data
,
2649 conn_param
->private_data_len
);
2653 __state_set(&ep
->com
, FPDU_MODE
);
2654 established_upcall(ep
);
2655 mutex_unlock(&ep
->com
.mutex
);
2656 c4iw_put_ep(&ep
->com
);
2659 ep
->com
.cm_id
= NULL
;
2660 cm_id
->rem_ref(cm_id
);
2662 mutex_unlock(&ep
->com
.mutex
);
2663 c4iw_put_ep(&ep
->com
);
2667 static int pick_local_ipaddrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
2669 struct in_device
*ind
;
2671 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)&cm_id
->local_addr
;
2672 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)&cm_id
->remote_addr
;
2674 ind
= in_dev_get(dev
->rdev
.lldi
.ports
[0]);
2676 return -EADDRNOTAVAIL
;
2677 for_primary_ifa(ind
) {
2678 laddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
2679 raddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
2685 return found
? 0 : -EADDRNOTAVAIL
;
2688 static int get_lladdr(struct net_device
*dev
, struct in6_addr
*addr
,
2689 unsigned char banned_flags
)
2691 struct inet6_dev
*idev
;
2692 int err
= -EADDRNOTAVAIL
;
2695 idev
= __in6_dev_get(dev
);
2697 struct inet6_ifaddr
*ifp
;
2699 read_lock_bh(&idev
->lock
);
2700 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
2701 if (ifp
->scope
== IFA_LINK
&&
2702 !(ifp
->flags
& banned_flags
)) {
2703 memcpy(addr
, &ifp
->addr
, 16);
2708 read_unlock_bh(&idev
->lock
);
2714 static int pick_local_ip6addrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
2716 struct in6_addr
uninitialized_var(addr
);
2717 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)&cm_id
->local_addr
;
2718 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)&cm_id
->remote_addr
;
2720 if (get_lladdr(dev
->rdev
.lldi
.ports
[0], &addr
, IFA_F_TENTATIVE
)) {
2721 memcpy(la6
->sin6_addr
.s6_addr
, &addr
, 16);
2722 memcpy(ra6
->sin6_addr
.s6_addr
, &addr
, 16);
2725 return -EADDRNOTAVAIL
;
2728 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2730 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2733 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)&cm_id
->local_addr
;
2734 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)&cm_id
->remote_addr
;
2735 struct sockaddr_in6
*laddr6
= (struct sockaddr_in6
*)&cm_id
->local_addr
;
2736 struct sockaddr_in6
*raddr6
= (struct sockaddr_in6
*)
2737 &cm_id
->remote_addr
;
2741 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
2742 (conn_param
->ird
> c4iw_max_read_depth
)) {
2746 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2748 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2752 init_timer(&ep
->timer
);
2753 ep
->plen
= conn_param
->private_data_len
;
2755 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
2756 conn_param
->private_data
, ep
->plen
);
2757 ep
->ird
= conn_param
->ird
;
2758 ep
->ord
= conn_param
->ord
;
2760 if (peer2peer
&& ep
->ord
== 0)
2763 cm_id
->add_ref(cm_id
);
2765 ep
->com
.cm_id
= cm_id
;
2766 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
2768 PDBG("%s qpn 0x%x not found!\n", __func__
, conn_param
->qpn
);
2773 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
2777 * Allocate an active TID to initiate a TCP connection.
2779 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
2780 if (ep
->atid
== -1) {
2781 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
2785 insert_handle(dev
, &dev
->atid_idr
, ep
, ep
->atid
);
2787 if (cm_id
->remote_addr
.ss_family
== AF_INET
) {
2789 ra
= (__u8
*)&raddr
->sin_addr
;
2792 * Handle loopback requests to INADDR_ANY.
2794 if ((__force
int)raddr
->sin_addr
.s_addr
== INADDR_ANY
) {
2795 err
= pick_local_ipaddrs(dev
, cm_id
);
2801 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
2802 __func__
, &laddr
->sin_addr
, ntohs(laddr
->sin_port
),
2803 ra
, ntohs(raddr
->sin_port
));
2804 ep
->dst
= find_route(dev
, laddr
->sin_addr
.s_addr
,
2805 raddr
->sin_addr
.s_addr
, laddr
->sin_port
,
2806 raddr
->sin_port
, 0);
2809 ra
= (__u8
*)&raddr6
->sin6_addr
;
2812 * Handle loopback requests to INADDR_ANY.
2814 if (ipv6_addr_type(&raddr6
->sin6_addr
) == IPV6_ADDR_ANY
) {
2815 err
= pick_local_ip6addrs(dev
, cm_id
);
2821 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
2822 __func__
, laddr6
->sin6_addr
.s6_addr
,
2823 ntohs(laddr6
->sin6_port
),
2824 raddr6
->sin6_addr
.s6_addr
, ntohs(raddr6
->sin6_port
));
2825 ep
->dst
= find_route6(dev
, laddr6
->sin6_addr
.s6_addr
,
2826 raddr6
->sin6_addr
.s6_addr
,
2827 laddr6
->sin6_port
, raddr6
->sin6_port
, 0,
2828 raddr6
->sin6_scope_id
);
2831 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
2832 err
= -EHOSTUNREACH
;
2836 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, true);
2838 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
2842 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2843 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2846 state_set(&ep
->com
, CONNECTING
);
2848 memcpy(&ep
->com
.local_addr
, &cm_id
->local_addr
,
2849 sizeof(ep
->com
.local_addr
));
2850 memcpy(&ep
->com
.remote_addr
, &cm_id
->remote_addr
,
2851 sizeof(ep
->com
.remote_addr
));
2853 /* send connect request to rnic */
2854 err
= send_connect(ep
);
2858 cxgb4_l2t_release(ep
->l2t
);
2860 dst_release(ep
->dst
);
2862 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
2863 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2865 cm_id
->rem_ref(cm_id
);
2866 c4iw_put_ep(&ep
->com
);
2871 static int create_server6(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
2874 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
2876 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2877 err
= cxgb4_create_server6(ep
->com
.dev
->rdev
.lldi
.ports
[0],
2878 ep
->stid
, &sin6
->sin6_addr
,
2880 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
2882 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
2886 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
2888 sin6
->sin6_addr
.s6_addr
, ntohs(sin6
->sin6_port
));
2892 static int create_server4(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
2895 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
2897 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
2899 err
= cxgb4_create_server_filter(
2900 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2901 sin
->sin_addr
.s_addr
, sin
->sin_port
, 0,
2902 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0, 0);
2903 if (err
== -EBUSY
) {
2904 set_current_state(TASK_UNINTERRUPTIBLE
);
2905 schedule_timeout(usecs_to_jiffies(100));
2907 } while (err
== -EBUSY
);
2909 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2910 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0],
2911 ep
->stid
, sin
->sin_addr
.s_addr
, sin
->sin_port
,
2912 0, ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
2914 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
2919 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
2921 &sin
->sin_addr
, ntohs(sin
->sin_port
));
2925 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
2928 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2929 struct c4iw_listen_ep
*ep
;
2933 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2935 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2939 PDBG("%s ep %p\n", __func__
, ep
);
2940 cm_id
->add_ref(cm_id
);
2941 ep
->com
.cm_id
= cm_id
;
2943 ep
->backlog
= backlog
;
2944 memcpy(&ep
->com
.local_addr
, &cm_id
->local_addr
,
2945 sizeof(ep
->com
.local_addr
));
2948 * Allocate a server TID.
2950 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
2951 ep
->com
.local_addr
.ss_family
== AF_INET
)
2952 ep
->stid
= cxgb4_alloc_sftid(dev
->rdev
.lldi
.tids
,
2953 cm_id
->local_addr
.ss_family
, ep
);
2955 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
,
2956 cm_id
->local_addr
.ss_family
, ep
);
2958 if (ep
->stid
== -1) {
2959 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
2963 insert_handle(dev
, &dev
->stid_idr
, ep
, ep
->stid
);
2964 state_set(&ep
->com
, LISTEN
);
2965 if (ep
->com
.local_addr
.ss_family
== AF_INET
)
2966 err
= create_server4(dev
, ep
);
2968 err
= create_server6(dev
, ep
);
2970 cm_id
->provider_data
= ep
;
2973 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
2974 ep
->com
.local_addr
.ss_family
);
2976 cm_id
->rem_ref(cm_id
);
2977 c4iw_put_ep(&ep
->com
);
2983 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
2986 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
2988 PDBG("%s ep %p\n", __func__
, ep
);
2991 state_set(&ep
->com
, DEAD
);
2992 if (ep
->com
.dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
2993 ep
->com
.local_addr
.ss_family
== AF_INET
) {
2994 err
= cxgb4_remove_server_filter(
2995 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2996 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
2998 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2999 err
= cxgb4_remove_server(
3000 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3001 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3004 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
,
3007 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->stid_idr
, ep
->stid
);
3008 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3009 ep
->com
.local_addr
.ss_family
);
3011 cm_id
->rem_ref(cm_id
);
3012 c4iw_put_ep(&ep
->com
);
3016 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
3021 struct c4iw_rdev
*rdev
;
3023 mutex_lock(&ep
->com
.mutex
);
3025 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
3026 states
[ep
->com
.state
], abrupt
);
3028 rdev
= &ep
->com
.dev
->rdev
;
3029 if (c4iw_fatal_error(rdev
)) {
3031 close_complete_upcall(ep
, -EIO
);
3032 ep
->com
.state
= DEAD
;
3034 switch (ep
->com
.state
) {
3042 ep
->com
.state
= ABORTING
;
3044 ep
->com
.state
= CLOSING
;
3047 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
3050 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
3053 (void)stop_ep_timer(ep
);
3054 ep
->com
.state
= ABORTING
;
3056 ep
->com
.state
= MORIBUND
;
3062 PDBG("%s ignoring disconnect ep %p state %u\n",
3063 __func__
, ep
, ep
->com
.state
);
3072 set_bit(EP_DISC_ABORT
, &ep
->com
.history
);
3073 close_complete_upcall(ep
, -ECONNRESET
);
3074 ret
= send_abort(ep
, NULL
, gfp
);
3076 set_bit(EP_DISC_CLOSE
, &ep
->com
.history
);
3077 ret
= send_halfclose(ep
, gfp
);
3082 mutex_unlock(&ep
->com
.mutex
);
3084 release_ep_resources(ep
);
3088 static void active_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3089 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3092 int atid
= be32_to_cpu(req
->tid
);
3094 ep
= (struct c4iw_ep
*)lookup_atid(dev
->rdev
.lldi
.tids
,
3095 (__force u32
) req
->tid
);
3099 switch (req
->retval
) {
3101 set_bit(ACT_RETRY_NOMEM
, &ep
->com
.history
);
3102 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3103 send_fw_act_open_req(ep
, atid
);
3107 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
3108 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3109 send_fw_act_open_req(ep
, atid
);
3114 pr_info("%s unexpected ofld conn wr retval %d\n",
3115 __func__
, req
->retval
);
3118 pr_err("active ofld_connect_wr failure %d atid %d\n",
3120 mutex_lock(&dev
->rdev
.stats
.lock
);
3121 dev
->rdev
.stats
.act_ofld_conn_fails
++;
3122 mutex_unlock(&dev
->rdev
.stats
.lock
);
3123 connect_reply_upcall(ep
, status2errno(req
->retval
));
3124 state_set(&ep
->com
, DEAD
);
3125 remove_handle(dev
, &dev
->atid_idr
, atid
);
3126 cxgb4_free_atid(dev
->rdev
.lldi
.tids
, atid
);
3127 dst_release(ep
->dst
);
3128 cxgb4_l2t_release(ep
->l2t
);
3129 c4iw_put_ep(&ep
->com
);
3132 static void passive_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3133 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3135 struct sk_buff
*rpl_skb
;
3136 struct cpl_pass_accept_req
*cpl
;
3139 rpl_skb
= (struct sk_buff
*)(unsigned long)req
->cookie
;
3142 PDBG("%s passive open failure %d\n", __func__
, req
->retval
);
3143 mutex_lock(&dev
->rdev
.stats
.lock
);
3144 dev
->rdev
.stats
.pas_ofld_conn_fails
++;
3145 mutex_unlock(&dev
->rdev
.stats
.lock
);
3148 cpl
= (struct cpl_pass_accept_req
*)cplhdr(rpl_skb
);
3149 OPCODE_TID(cpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
,
3150 (__force u32
) htonl(
3151 (__force u32
) req
->tid
)));
3152 ret
= pass_accept_req(dev
, rpl_skb
);
3159 static int deferred_fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3161 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
3162 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
;
3164 switch (rpl
->type
) {
3166 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
3168 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
3169 req
= (struct cpl_fw6_msg_ofld_connection_wr_rpl
*)rpl
->data
;
3170 switch (req
->t_state
) {
3172 active_ofld_conn_reply(dev
, skb
, req
);
3175 passive_ofld_conn_reply(dev
, skb
, req
);
3178 pr_err("%s unexpected ofld conn wr state %d\n",
3179 __func__
, req
->t_state
);
3187 static void build_cpl_pass_accept_req(struct sk_buff
*skb
, int stid
, u8 tos
)
3190 u16 vlantag
, len
, hdr_len
, eth_hdr_len
;
3192 struct cpl_rx_pkt
*cpl
= cplhdr(skb
);
3193 struct cpl_pass_accept_req
*req
;
3194 struct tcp_options_received tmp_opt
;
3195 struct c4iw_dev
*dev
;
3197 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
3198 /* Store values from cpl_rx_pkt in temporary location. */
3199 vlantag
= (__force u16
) cpl
->vlan
;
3200 len
= (__force u16
) cpl
->len
;
3201 l2info
= (__force u32
) cpl
->l2info
;
3202 hdr_len
= (__force u16
) cpl
->hdr_len
;
3205 __skb_pull(skb
, sizeof(*req
) + sizeof(struct rss_header
));
3208 * We need to parse the TCP options from SYN packet.
3209 * to generate cpl_pass_accept_req.
3211 memset(&tmp_opt
, 0, sizeof(tmp_opt
));
3212 tcp_clear_options(&tmp_opt
);
3213 tcp_parse_options(skb
, &tmp_opt
, 0, NULL
);
3215 req
= (struct cpl_pass_accept_req
*)__skb_push(skb
, sizeof(*req
));
3216 memset(req
, 0, sizeof(*req
));
3217 req
->l2info
= cpu_to_be16(V_SYN_INTF(intf
) |
3218 V_SYN_MAC_IDX(G_RX_MACIDX(
3219 (__force
int) htonl(l2info
))) |
3221 eth_hdr_len
= is_t4(dev
->rdev
.lldi
.adapter_type
) ?
3222 G_RX_ETHHDR_LEN((__force
int) htonl(l2info
)) :
3223 G_RX_T5_ETHHDR_LEN((__force
int) htonl(l2info
));
3224 req
->hdr_len
= cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
3225 (__force
int) htonl(l2info
))) |
3226 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
3227 (__force
int) htons(hdr_len
))) |
3228 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
3229 (__force
int) htons(hdr_len
))) |
3230 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len
)));
3231 req
->vlan
= (__force __be16
) vlantag
;
3232 req
->len
= (__force __be16
) len
;
3233 req
->tos_stid
= cpu_to_be32(PASS_OPEN_TID(stid
) |
3234 PASS_OPEN_TOS(tos
));
3235 req
->tcpopt
.mss
= htons(tmp_opt
.mss_clamp
);
3236 if (tmp_opt
.wscale_ok
)
3237 req
->tcpopt
.wsf
= tmp_opt
.snd_wscale
;
3238 req
->tcpopt
.tstamp
= tmp_opt
.saw_tstamp
;
3239 if (tmp_opt
.sack_ok
)
3240 req
->tcpopt
.sack
= 1;
3241 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
, 0));
3245 static void send_fw_pass_open_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3246 __be32 laddr
, __be16 lport
,
3247 __be32 raddr
, __be16 rport
,
3248 u32 rcv_isn
, u32 filter
, u16 window
,
3249 u32 rss_qid
, u8 port_id
)
3251 struct sk_buff
*req_skb
;
3252 struct fw_ofld_connection_wr
*req
;
3253 struct cpl_pass_accept_req
*cpl
= cplhdr(skb
);
3256 req_skb
= alloc_skb(sizeof(struct fw_ofld_connection_wr
), GFP_KERNEL
);
3257 req
= (struct fw_ofld_connection_wr
*)__skb_put(req_skb
, sizeof(*req
));
3258 memset(req
, 0, sizeof(*req
));
3259 req
->op_compl
= htonl(V_WR_OP(FW_OFLD_CONNECTION_WR
) | FW_WR_COMPL(1));
3260 req
->len16_pkd
= htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req
), 16)));
3261 req
->le
.version_cpl
= htonl(F_FW_OFLD_CONNECTION_WR_CPL
);
3262 req
->le
.filter
= (__force __be32
) filter
;
3263 req
->le
.lport
= lport
;
3264 req
->le
.pport
= rport
;
3265 req
->le
.u
.ipv4
.lip
= laddr
;
3266 req
->le
.u
.ipv4
.pip
= raddr
;
3267 req
->tcb
.rcv_nxt
= htonl(rcv_isn
+ 1);
3268 req
->tcb
.rcv_adv
= htons(window
);
3269 req
->tcb
.t_state_to_astid
=
3270 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV
) |
3271 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl
->tcpopt
.wsf
) |
3272 V_FW_OFLD_CONNECTION_WR_ASTID(
3273 GET_PASS_OPEN_TID(ntohl(cpl
->tos_stid
))));
3276 * We store the qid in opt2 which will be used by the firmware
3277 * to send us the wr response.
3279 req
->tcb
.opt2
= htonl(V_RSS_QUEUE(rss_qid
));
3282 * We initialize the MSS index in TCB to 0xF.
3283 * So that when driver sends cpl_pass_accept_rpl
3284 * TCB picks up the correct value. If this was 0
3285 * TP will ignore any value > 0 for MSS index.
3287 req
->tcb
.opt0
= cpu_to_be64(V_MSS_IDX(0xF));
3288 req
->cookie
= (unsigned long)skb
;
3290 set_wr_txq(req_skb
, CPL_PRIORITY_CONTROL
, port_id
);
3291 ret
= cxgb4_ofld_send(dev
->rdev
.lldi
.ports
[0], req_skb
);
3293 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__
,
3301 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3302 * messages when a filter is being used instead of server to
3303 * redirect a syn packet. When packets hit filter they are redirected
3304 * to the offload queue and driver tries to establish the connection
3305 * using firmware work request.
3307 static int rx_pkt(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3310 unsigned int filter
;
3311 struct ethhdr
*eh
= NULL
;
3312 struct vlan_ethhdr
*vlan_eh
= NULL
;
3314 struct tcphdr
*tcph
;
3315 struct rss_header
*rss
= (void *)skb
->data
;
3316 struct cpl_rx_pkt
*cpl
= (void *)skb
->data
;
3317 struct cpl_pass_accept_req
*req
= (void *)(rss
+ 1);
3318 struct l2t_entry
*e
;
3319 struct dst_entry
*dst
;
3320 struct c4iw_ep
*lep
;
3322 struct port_info
*pi
;
3323 struct net_device
*pdev
;
3324 u16 rss_qid
, eth_hdr_len
;
3327 struct neighbour
*neigh
;
3329 /* Drop all non-SYN packets */
3330 if (!(cpl
->l2info
& cpu_to_be32(F_RXF_SYN
)))
3334 * Drop all packets which did not hit the filter.
3335 * Unlikely to happen.
3337 if (!(rss
->filter_hit
&& rss
->filter_tid
))
3341 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3343 stid
= (__force
int) cpu_to_be32((__force u32
) rss
->hash_val
);
3345 lep
= (struct c4iw_ep
*)lookup_stid(dev
->rdev
.lldi
.tids
, stid
);
3347 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
3351 eth_hdr_len
= is_t4(dev
->rdev
.lldi
.adapter_type
) ?
3352 G_RX_ETHHDR_LEN(htonl(cpl
->l2info
)) :
3353 G_RX_T5_ETHHDR_LEN(htonl(cpl
->l2info
));
3354 if (eth_hdr_len
== ETH_HLEN
) {
3355 eh
= (struct ethhdr
*)(req
+ 1);
3356 iph
= (struct iphdr
*)(eh
+ 1);
3358 vlan_eh
= (struct vlan_ethhdr
*)(req
+ 1);
3359 iph
= (struct iphdr
*)(vlan_eh
+ 1);
3360 skb
->vlan_tci
= ntohs(cpl
->vlan
);
3363 if (iph
->version
!= 0x4)
3366 tcph
= (struct tcphdr
*)(iph
+ 1);
3367 skb_set_network_header(skb
, (void *)iph
- (void *)rss
);
3368 skb_set_transport_header(skb
, (void *)tcph
- (void *)rss
);
3371 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__
,
3372 ntohl(iph
->daddr
), ntohs(tcph
->dest
), ntohl(iph
->saddr
),
3373 ntohs(tcph
->source
), iph
->tos
);
3375 dst
= find_route(dev
, iph
->daddr
, iph
->saddr
, tcph
->dest
, tcph
->source
,
3378 pr_err("%s - failed to find dst entry!\n",
3382 neigh
= dst_neigh_lookup_skb(dst
, skb
);
3385 pr_err("%s - failed to allocate neigh!\n",
3390 if (neigh
->dev
->flags
& IFF_LOOPBACK
) {
3391 pdev
= ip_dev_find(&init_net
, iph
->daddr
);
3392 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3394 pi
= (struct port_info
*)netdev_priv(pdev
);
3395 tx_chan
= cxgb4_port_chan(pdev
);
3398 pdev
= get_real_dev(neigh
->dev
);
3399 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3401 pi
= (struct port_info
*)netdev_priv(pdev
);
3402 tx_chan
= cxgb4_port_chan(pdev
);
3404 neigh_release(neigh
);
3406 pr_err("%s - failed to allocate l2t entry!\n",
3411 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
3412 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[pi
->port_id
* step
];
3413 window
= (__force u16
) htons((__force u16
)tcph
->window
);
3415 /* Calcuate filter portion for LE region. */
3416 filter
= (__force
unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3417 dev
->rdev
.lldi
.ports
[0],
3421 * Synthesize the cpl_pass_accept_req. We have everything except the
3422 * TID. Once firmware sends a reply with TID we update the TID field
3423 * in cpl and pass it through the regular cpl_pass_accept_req path.
3425 build_cpl_pass_accept_req(skb
, stid
, iph
->tos
);
3426 send_fw_pass_open_req(dev
, skb
, iph
->daddr
, tcph
->dest
, iph
->saddr
,
3427 tcph
->source
, ntohl(tcph
->seq
), filter
, window
,
3428 rss_qid
, pi
->port_id
);
3429 cxgb4_l2t_release(e
);
3437 * These are the real handlers that are called from a
3440 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
] = {
3441 [CPL_ACT_ESTABLISH
] = act_establish
,
3442 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
3443 [CPL_RX_DATA
] = rx_data
,
3444 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
3445 [CPL_ABORT_RPL
] = abort_rpl
,
3446 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
3447 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
3448 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
3449 [CPL_PASS_ESTABLISH
] = pass_establish
,
3450 [CPL_PEER_CLOSE
] = peer_close
,
3451 [CPL_ABORT_REQ_RSS
] = peer_abort
,
3452 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
3453 [CPL_RDMA_TERMINATE
] = terminate
,
3454 [CPL_FW4_ACK
] = fw4_ack
,
3455 [CPL_FW6_MSG
] = deferred_fw6_msg
,
3456 [CPL_RX_PKT
] = rx_pkt
3459 static void process_timeout(struct c4iw_ep
*ep
)
3461 struct c4iw_qp_attributes attrs
;
3464 mutex_lock(&ep
->com
.mutex
);
3465 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
3467 set_bit(TIMEDOUT
, &ep
->com
.history
);
3468 switch (ep
->com
.state
) {
3470 __state_set(&ep
->com
, ABORTING
);
3471 connect_reply_upcall(ep
, -ETIMEDOUT
);
3474 __state_set(&ep
->com
, ABORTING
);
3478 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
3479 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
3480 c4iw_modify_qp(ep
->com
.qp
->rhp
,
3481 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
3484 __state_set(&ep
->com
, ABORTING
);
3485 close_complete_upcall(ep
, -ETIMEDOUT
);
3491 * These states are expected if the ep timed out at the same
3492 * time as another thread was calling stop_ep_timer().
3493 * So we silently do nothing for these states.
3498 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
3499 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
3503 abort_connection(ep
, NULL
, GFP_KERNEL
);
3504 mutex_unlock(&ep
->com
.mutex
);
3505 c4iw_put_ep(&ep
->com
);
3508 static void process_timedout_eps(void)
3512 spin_lock_irq(&timeout_lock
);
3513 while (!list_empty(&timeout_list
)) {
3514 struct list_head
*tmp
;
3516 tmp
= timeout_list
.next
;
3520 spin_unlock_irq(&timeout_lock
);
3521 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
3522 process_timeout(ep
);
3523 spin_lock_irq(&timeout_lock
);
3525 spin_unlock_irq(&timeout_lock
);
3528 static void process_work(struct work_struct
*work
)
3530 struct sk_buff
*skb
= NULL
;
3531 struct c4iw_dev
*dev
;
3532 struct cpl_act_establish
*rpl
;
3533 unsigned int opcode
;
3536 process_timedout_eps();
3537 while ((skb
= skb_dequeue(&rxq
))) {
3539 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
3540 opcode
= rpl
->ot
.opcode
;
3542 BUG_ON(!work_handlers
[opcode
]);
3543 ret
= work_handlers
[opcode
](dev
, skb
);
3546 process_timedout_eps();
3550 static DECLARE_WORK(skb_work
, process_work
);
3552 static void ep_timeout(unsigned long arg
)
3554 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
3557 spin_lock(&timeout_lock
);
3558 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
3560 * Only insert if it is not already on the list.
3562 if (!ep
->entry
.next
) {
3563 list_add_tail(&ep
->entry
, &timeout_list
);
3567 spin_unlock(&timeout_lock
);
3569 queue_work(workq
, &skb_work
);
3573 * All the CM events are handled on a work queue to have a safe context.
3575 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3579 * Save dev in the skb->cb area.
3581 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
3584 * Queue the skb and schedule the worker thread.
3586 skb_queue_tail(&rxq
, skb
);
3587 queue_work(workq
, &skb_work
);
3591 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3593 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
3595 if (rpl
->status
!= CPL_ERR_NONE
) {
3596 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
3597 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
3603 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3605 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
3606 struct c4iw_wr_wait
*wr_waitp
;
3609 PDBG("%s type %u\n", __func__
, rpl
->type
);
3611 switch (rpl
->type
) {
3612 case FW6_TYPE_WR_RPL
:
3613 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
3614 wr_waitp
= (struct c4iw_wr_wait
*)(__force
unsigned long) rpl
->data
[1];
3615 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
3617 c4iw_wake_up(wr_waitp
, ret
? -ret
: 0);
3621 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
3625 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
3633 static int peer_abort_intr(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3635 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
3637 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
3638 unsigned int tid
= GET_TID(req
);
3640 ep
= lookup_tid(t
, tid
);
3642 printk(KERN_WARNING MOD
3643 "Abort on non-existent endpoint, tid %d\n", tid
);
3647 if (is_neg_adv(req
->status
)) {
3648 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
3653 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
3657 * Wake up any threads in rdma_init() or rdma_fini().
3658 * However, if we are on MPAv2 and want to retry with MPAv1
3659 * then, don't wake up yet.
3661 if (mpa_rev
== 2 && !ep
->tried_with_mpa_v1
) {
3662 if (ep
->com
.state
!= MPA_REQ_SENT
)
3663 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
3665 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
3671 * Most upcalls from the T4 Core go to sched() to
3672 * schedule the processing on a work queue.
3674 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
3675 [CPL_ACT_ESTABLISH
] = sched
,
3676 [CPL_ACT_OPEN_RPL
] = sched
,
3677 [CPL_RX_DATA
] = sched
,
3678 [CPL_ABORT_RPL_RSS
] = sched
,
3679 [CPL_ABORT_RPL
] = sched
,
3680 [CPL_PASS_OPEN_RPL
] = sched
,
3681 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
3682 [CPL_PASS_ACCEPT_REQ
] = sched
,
3683 [CPL_PASS_ESTABLISH
] = sched
,
3684 [CPL_PEER_CLOSE
] = sched
,
3685 [CPL_CLOSE_CON_RPL
] = sched
,
3686 [CPL_ABORT_REQ_RSS
] = peer_abort_intr
,
3687 [CPL_RDMA_TERMINATE
] = sched
,
3688 [CPL_FW4_ACK
] = sched
,
3689 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
3690 [CPL_FW6_MSG
] = fw6_msg
,
3691 [CPL_RX_PKT
] = sched
3694 int __init
c4iw_cm_init(void)
3696 spin_lock_init(&timeout_lock
);
3697 skb_queue_head_init(&rxq
);
3699 workq
= create_singlethread_workqueue("iw_cxgb4");
3706 void __exit
c4iw_cm_term(void)
3708 WARN_ON(!list_empty(&timeout_list
));
3709 flush_workqueue(workq
);
3710 destroy_workqueue(workq
);