cxgb4i: additional types of negative advice
[deliverable/linux.git] / drivers / scsi / cxgbi / cxgb4i / cxgb4i.c
CommitLineData
7b36b6e0 1/*
2 * cxgb4i.c: Chelsio T4 iSCSI driver.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
15
7b36b6e0 16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <scsi/scsi_host.h>
19#include <net/tcp.h>
20#include <net/dst.h>
21#include <linux/netdevice.h>
759a0cc5 22#include <net/addrconf.h>
7b36b6e0 23
3bd3e8bf 24#include "t4_regs.h"
7b36b6e0 25#include "t4_msg.h"
26#include "cxgb4.h"
27#include "cxgb4_uld.h"
28#include "t4fw_api.h"
29#include "l2t.h"
30#include "cxgb4i.h"
31
32static unsigned int dbg_level;
33
34#include "../libcxgbi.h"
35
36#define DRV_MODULE_NAME "cxgb4i"
3bd3e8bf
KX
37#define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver"
38#define DRV_MODULE_VERSION "0.9.4"
7b36b6e0 39
40static char version[] =
41 DRV_MODULE_DESC " " DRV_MODULE_NAME
3bd3e8bf 42 " v" DRV_MODULE_VERSION "\n";
7b36b6e0 43
44MODULE_AUTHOR("Chelsio Communications, Inc.");
45MODULE_DESCRIPTION(DRV_MODULE_DESC);
46MODULE_VERSION(DRV_MODULE_VERSION);
47MODULE_LICENSE("GPL");
48
49module_param(dbg_level, uint, 0644);
50MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
51
52static int cxgb4i_rcv_win = 256 * 1024;
53module_param(cxgb4i_rcv_win, int, 0644);
54MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
55
56static int cxgb4i_snd_win = 128 * 1024;
57module_param(cxgb4i_snd_win, int, 0644);
58MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
59
60static int cxgb4i_rx_credit_thres = 10 * 1024;
61module_param(cxgb4i_rx_credit_thres, int, 0644);
62MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
63 "RX credits return threshold in bytes (default=10KB)");
64
65static unsigned int cxgb4i_max_connect = (8 * 1024);
66module_param(cxgb4i_max_connect, uint, 0644);
67MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
68
69static unsigned short cxgb4i_sport_base = 20000;
70module_param(cxgb4i_sport_base, ushort, 0644);
71MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
72
73typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
74
75static void *t4_uld_add(const struct cxgb4_lld_info *);
76static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
77static int t4_uld_state_change(void *, enum cxgb4_state state);
64bfead8 78static inline int send_tx_flowc_wr(struct cxgbi_sock *);
7b36b6e0 79
80static const struct cxgb4_uld_info cxgb4i_uld_info = {
81 .name = DRV_MODULE_NAME,
82 .add = t4_uld_add,
83 .rx_handler = t4_uld_rx_handler,
84 .state_change = t4_uld_state_change,
85};
86
87static struct scsi_host_template cxgb4i_host_template = {
88 .module = THIS_MODULE,
89 .name = DRV_MODULE_NAME,
90 .proc_name = DRV_MODULE_NAME,
91 .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
92 .queuecommand = iscsi_queuecommand,
db5ed4df 93 .change_queue_depth = scsi_change_queue_depth,
7b36b6e0 94 .sg_tablesize = SG_ALL,
95 .max_sectors = 0xFFFF,
96 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
97 .eh_abort_handler = iscsi_eh_abort,
98 .eh_device_reset_handler = iscsi_eh_device_reset,
99 .eh_target_reset_handler = iscsi_eh_recover_target,
100 .target_alloc = iscsi_target_alloc,
101 .use_clustering = DISABLE_CLUSTERING,
102 .this_id = -1,
c40ecc12 103 .track_queue_depth = 1,
7b36b6e0 104};
105
106static struct iscsi_transport cxgb4i_iscsi_transport = {
107 .owner = THIS_MODULE,
108 .name = DRV_MODULE_NAME,
109 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
110 CAP_DATADGST | CAP_DIGEST_OFFLOAD |
fdafd4df 111 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
3128c6c7 112 .attr_is_visible = cxgbi_attr_is_visible,
7b36b6e0 113 .get_host_param = cxgbi_get_host_param,
114 .set_host_param = cxgbi_set_host_param,
115 /* session management */
116 .create_session = cxgbi_create_session,
117 .destroy_session = cxgbi_destroy_session,
118 .get_session_param = iscsi_session_get_param,
119 /* connection management */
120 .create_conn = cxgbi_create_conn,
121 .bind_conn = cxgbi_bind_conn,
122 .destroy_conn = iscsi_tcp_conn_teardown,
123 .start_conn = iscsi_conn_start,
124 .stop_conn = iscsi_conn_stop,
c71b9b66 125 .get_conn_param = iscsi_conn_get_param,
7b36b6e0 126 .set_param = cxgbi_set_conn_param,
127 .get_stats = cxgbi_get_conn_stats,
128 /* pdu xmit req from user space */
129 .send_pdu = iscsi_conn_send_pdu,
130 /* task */
131 .init_task = iscsi_tcp_task_init,
132 .xmit_task = iscsi_tcp_task_xmit,
133 .cleanup_task = cxgbi_cleanup_task,
134 /* pdu */
135 .alloc_pdu = cxgbi_conn_alloc_pdu,
136 .init_pdu = cxgbi_conn_init_pdu,
137 .xmit_pdu = cxgbi_conn_xmit_pdu,
138 .parse_pdu_itt = cxgbi_parse_pdu_itt,
139 /* TCP connect/disconnect */
c71b9b66 140 .get_ep_param = cxgbi_get_ep_param,
7b36b6e0 141 .ep_connect = cxgbi_ep_connect,
142 .ep_poll = cxgbi_ep_poll,
143 .ep_disconnect = cxgbi_ep_disconnect,
144 /* Error recovery timeout call */
145 .session_recovery_timedout = iscsi_session_recovery_timedout,
146};
147
148static struct scsi_transport_template *cxgb4i_stt;
149
150/*
151 * CPL (Chelsio Protocol Language) defines a message passing interface between
152 * the host driver and Chelsio asic.
153 * The section below implments CPLs that related to iscsi tcp connection
154 * open/close/abort and data send/receive.
155 */
759a0cc5 156
7b36b6e0 157#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
158#define RCV_BUFSIZ_MASK 0x3FFU
159#define MAX_IMM_TX_PKT_LEN 128
160
161static inline void set_queue(struct sk_buff *skb, unsigned int queue,
162 const struct cxgbi_sock *csk)
163{
164 skb->queue_mapping = queue;
165}
166
167static int push_tx_frames(struct cxgbi_sock *, int);
168
169/*
170 * is_ofld_imm - check whether a packet can be sent as immediate data
171 * @skb: the packet
172 *
173 * Returns true if a packet can be sent as an offload WR with immediate
174 * data. We currently use the same limit as for Ethernet packets.
175 */
84944d8c 176static inline bool is_ofld_imm(const struct sk_buff *skb)
7b36b6e0 177{
84944d8c
KX
178 int len = skb->len;
179
180 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
181 len += sizeof(struct fw_ofld_tx_data_wr);
182
183 return len <= MAX_IMM_TX_PKT_LEN;
7b36b6e0 184}
185
186static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
187 struct l2t_entry *e)
188{
3bd3e8bf 189 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
759a0cc5 190 int t4 = is_t4(lldi->adapter_type);
7b36b6e0 191 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
192 unsigned long long opt0;
193 unsigned int opt2;
194 unsigned int qid_atid = ((unsigned int)csk->atid) |
195 (((unsigned int)csk->rss_qid) << 14);
196
d7990b0c
AB
197 opt0 = KEEP_ALIVE_F |
198 WND_SCALE_V(wscale) |
199 MSS_IDX_V(csk->mss_idx) |
200 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
201 TX_CHAN_V(csk->tx_chan) |
202 SMAC_SEL_V(csk->smac_idx) |
203 ULP_MODE_V(ULP_MODE_ISCSI) |
204 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
205 opt2 = RX_CHANNEL_V(0) |
206 RSS_QUEUE_VALID_F |
207 (RX_FC_DISABLE_F) |
208 RSS_QUEUE_V(csk->rss_qid);
7b36b6e0 209
3bd3e8bf
KX
210 if (is_t4(lldi->adapter_type)) {
211 struct cpl_act_open_req *req =
212 (struct cpl_act_open_req *)skb->head;
213
3bd3e8bf
KX
214 INIT_TP_WR(req, 0);
215 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
7b36b6e0 216 qid_atid));
3bd3e8bf
KX
217 req->local_port = csk->saddr.sin_port;
218 req->peer_port = csk->daddr.sin_port;
219 req->local_ip = csk->saddr.sin_addr.s_addr;
220 req->peer_ip = csk->daddr.sin_addr.s_addr;
221 req->opt0 = cpu_to_be64(opt0);
ac0245ff
KX
222 req->params = cpu_to_be32(cxgb4_select_ntuple(
223 csk->cdev->ports[csk->port_id],
224 csk->l2t));
d7990b0c 225 opt2 |= RX_FC_VALID_F;
3bd3e8bf 226 req->opt2 = cpu_to_be32(opt2);
7b36b6e0 227
3bd3e8bf
KX
228 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
229 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
230 csk, &req->local_ip, ntohs(req->local_port),
231 &req->peer_ip, ntohs(req->peer_port),
232 csk->atid, csk->rss_qid);
233 } else {
234 struct cpl_t5_act_open_req *req =
235 (struct cpl_t5_act_open_req *)skb->head;
236
3bd3e8bf
KX
237 INIT_TP_WR(req, 0);
238 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
239 qid_atid));
240 req->local_port = csk->saddr.sin_port;
241 req->peer_port = csk->daddr.sin_port;
242 req->local_ip = csk->saddr.sin_addr.s_addr;
243 req->peer_ip = csk->daddr.sin_addr.s_addr;
244 req->opt0 = cpu_to_be64(opt0);
d7990b0c 245 req->params = cpu_to_be64(FILTER_TUPLE_V(
ac0245ff
KX
246 cxgb4_select_ntuple(
247 csk->cdev->ports[csk->port_id],
248 csk->l2t)));
3bd3e8bf
KX
249 opt2 |= 1 << 31;
250 req->opt2 = cpu_to_be32(opt2);
251
252 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
253 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
254 csk, &req->local_ip, ntohs(req->local_port),
255 &req->peer_ip, ntohs(req->peer_port),
256 csk->atid, csk->rss_qid);
257 }
258
259 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
759a0cc5
AB
260
261 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
262 (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
263 csk->state, csk->flags, csk->atid, csk->rss_qid);
264
265 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
266}
267
f42bb57c 268#if IS_ENABLED(CONFIG_IPV6)
759a0cc5
AB
269static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
270 struct l2t_entry *e)
271{
272 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
273 int t4 = is_t4(lldi->adapter_type);
274 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
275 unsigned long long opt0;
276 unsigned int opt2;
277 unsigned int qid_atid = ((unsigned int)csk->atid) |
278 (((unsigned int)csk->rss_qid) << 14);
279
d7990b0c
AB
280 opt0 = KEEP_ALIVE_F |
281 WND_SCALE_V(wscale) |
282 MSS_IDX_V(csk->mss_idx) |
283 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
284 TX_CHAN_V(csk->tx_chan) |
285 SMAC_SEL_V(csk->smac_idx) |
286 ULP_MODE_V(ULP_MODE_ISCSI) |
287 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
759a0cc5 288
d7990b0c
AB
289 opt2 = RX_CHANNEL_V(0) |
290 RSS_QUEUE_VALID_F |
291 RX_FC_DISABLE_F |
292 RSS_QUEUE_V(csk->rss_qid);
759a0cc5
AB
293
294 if (t4) {
295 struct cpl_act_open_req6 *req =
296 (struct cpl_act_open_req6 *)skb->head;
297
298 INIT_TP_WR(req, 0);
299 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
300 qid_atid));
301 req->local_port = csk->saddr6.sin6_port;
302 req->peer_port = csk->daddr6.sin6_port;
303
304 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
305 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
306 8);
307 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
308 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
309 8);
310
311 req->opt0 = cpu_to_be64(opt0);
312
d7990b0c 313 opt2 |= RX_FC_VALID_F;
759a0cc5
AB
314 req->opt2 = cpu_to_be32(opt2);
315
316 req->params = cpu_to_be32(cxgb4_select_ntuple(
317 csk->cdev->ports[csk->port_id],
318 csk->l2t));
319 } else {
320 struct cpl_t5_act_open_req6 *req =
321 (struct cpl_t5_act_open_req6 *)skb->head;
322
323 INIT_TP_WR(req, 0);
324 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
325 qid_atid));
326 req->local_port = csk->saddr6.sin6_port;
327 req->peer_port = csk->daddr6.sin6_port;
328 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
329 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
330 8);
331 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
332 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
333 8);
334 req->opt0 = cpu_to_be64(opt0);
335
d7990b0c 336 opt2 |= T5_OPT_2_VALID_F;
759a0cc5
AB
337 req->opt2 = cpu_to_be32(opt2);
338
d7990b0c 339 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
759a0cc5
AB
340 csk->cdev->ports[csk->port_id],
341 csk->l2t)));
342 }
343
344 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
345
346 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
347 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
348 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
349 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
350 csk->rss_qid);
351
7b36b6e0 352 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
353}
f42bb57c 354#endif
7b36b6e0 355
356static void send_close_req(struct cxgbi_sock *csk)
357{
358 struct sk_buff *skb = csk->cpl_close;
359 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
360 unsigned int tid = csk->tid;
361
362 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
363 "csk 0x%p,%u,0x%lx, tid %u.\n",
364 csk, csk->state, csk->flags, csk->tid);
365 csk->cpl_close = NULL;
366 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
367 INIT_TP_WR(req, tid);
368 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
369 req->rsvd = 0;
370
371 cxgbi_sock_skb_entail(csk, skb);
372 if (csk->state >= CTP_ESTABLISHED)
373 push_tx_frames(csk, 1);
374}
375
376static void abort_arp_failure(void *handle, struct sk_buff *skb)
377{
378 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
379 struct cpl_abort_req *req;
380
381 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
382 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
383 csk, csk->state, csk->flags, csk->tid);
384 req = (struct cpl_abort_req *)skb->data;
385 req->cmd = CPL_ABORT_NO_RST;
386 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
387}
388
389static void send_abort_req(struct cxgbi_sock *csk)
390{
391 struct cpl_abort_req *req;
392 struct sk_buff *skb = csk->cpl_abort_req;
393
394 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
395 return;
64bfead8
KX
396
397 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
398 send_tx_flowc_wr(csk);
399 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
400 }
401
7b36b6e0 402 cxgbi_sock_set_state(csk, CTP_ABORTING);
403 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
404 cxgbi_sock_purge_write_queue(csk);
405
406 csk->cpl_abort_req = NULL;
407 req = (struct cpl_abort_req *)skb->head;
408 set_queue(skb, CPL_PRIORITY_DATA, csk);
409 req->cmd = CPL_ABORT_SEND_RST;
410 t4_set_arp_err_handler(skb, csk, abort_arp_failure);
411 INIT_TP_WR(req, csk->tid);
412 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
413 req->rsvd0 = htonl(csk->snd_nxt);
414 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
415
416 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
417 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
418 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
419 req->rsvd1);
420
421 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
422}
423
424static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
425{
426 struct sk_buff *skb = csk->cpl_abort_rpl;
427 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
428
429 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
430 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
431 csk, csk->state, csk->flags, csk->tid, rst_status);
432
433 csk->cpl_abort_rpl = NULL;
434 set_queue(skb, CPL_PRIORITY_DATA, csk);
435 INIT_TP_WR(rpl, csk->tid);
436 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
437 rpl->cmd = rst_status;
438 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
439}
440
441/*
442 * CPL connection rx data ack: host ->
443 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
444 * credits sent.
445 */
446static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
447{
448 struct sk_buff *skb;
449 struct cpl_rx_data_ack *req;
450
451 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
452 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
453 csk, csk->state, csk->flags, csk->tid, credits);
454
24d3f95a 455 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
7b36b6e0 456 if (!skb) {
457 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
458 return 0;
459 }
460 req = (struct cpl_rx_data_ack *)skb->head;
461
462 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
463 INIT_TP_WR(req, csk->tid);
464 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
465 csk->tid));
d7990b0c
AB
466 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
467 | RX_FORCE_ACK_F);
7b36b6e0 468 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
469 return credits;
470}
471
472/*
473 * sgl_len - calculates the size of an SGL of the given capacity
474 * @n: the number of SGL entries
475 * Calculates the number of flits needed for a scatter/gather list that
476 * can hold the given number of entries.
477 */
478static inline unsigned int sgl_len(unsigned int n)
479{
480 n--;
481 return (3 * n) / 2 + (n & 1) + 2;
482}
483
484/*
485 * calc_tx_flits_ofld - calculate # of flits for an offload packet
486 * @skb: the packet
487 *
488 * Returns the number of flits needed for the given offload packet.
489 * These packets are already fully constructed and no additional headers
490 * will be added.
491 */
492static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
493{
494 unsigned int flits, cnt;
495
496 if (is_ofld_imm(skb))
497 return DIV_ROUND_UP(skb->len, 8);
498 flits = skb_transport_offset(skb) / 8;
499 cnt = skb_shinfo(skb)->nr_frags;
499e2e6f 500 if (skb_tail_pointer(skb) != skb_transport_header(skb))
7b36b6e0 501 cnt++;
502 return flits + sgl_len(cnt);
503}
504
64bfead8
KX
505#define FLOWC_WR_NPARAMS_MIN 9
506static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
507{
508 int nparams, flowclen16, flowclen;
509
510 nparams = FLOWC_WR_NPARAMS_MIN;
511 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
512 flowclen16 = DIV_ROUND_UP(flowclen, 16);
513 flowclen = flowclen16 * 16;
514 /*
515 * Return the number of 16-byte credits used by the FlowC request.
516 * Pass back the nparams and actual FlowC length if requested.
517 */
518 if (nparamsp)
519 *nparamsp = nparams;
520 if (flowclenp)
521 *flowclenp = flowclen;
522
523 return flowclen16;
524}
525
526static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
7b36b6e0 527{
528 struct sk_buff *skb;
529 struct fw_flowc_wr *flowc;
64bfead8 530 int nparams, flowclen16, flowclen;
7b36b6e0 531
64bfead8 532 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
24d3f95a 533 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
7b36b6e0 534 flowc = (struct fw_flowc_wr *)skb->head;
535 flowc->op_to_nparams =
64bfead8 536 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
7b36b6e0 537 flowc->flowid_len16 =
64bfead8 538 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
7b36b6e0 539 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
e27d6169 540 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
7b36b6e0 541 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
542 flowc->mnemval[1].val = htonl(csk->tx_chan);
543 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
544 flowc->mnemval[2].val = htonl(csk->tx_chan);
545 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
546 flowc->mnemval[3].val = htonl(csk->rss_qid);
547 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
548 flowc->mnemval[4].val = htonl(csk->snd_nxt);
549 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
550 flowc->mnemval[5].val = htonl(csk->rcv_nxt);
551 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
552 flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
553 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
554 flowc->mnemval[7].val = htonl(csk->advmss);
555 flowc->mnemval[8].mnemonic = 0;
556 flowc->mnemval[8].val = 0;
64bfead8
KX
557 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
558 flowc->mnemval[8].val = 16384;
559
7b36b6e0 560 set_queue(skb, CPL_PRIORITY_DATA, csk);
561
562 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
563 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
564 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
565 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
566 csk->advmss);
567
568 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
64bfead8
KX
569
570 return flowclen16;
7b36b6e0 571}
572
573static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
574 int dlen, int len, u32 credits, int compl)
575{
576 struct fw_ofld_tx_data_wr *req;
577 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
e2ac9628 578 unsigned int wr_ulp_mode = 0, val;
7857c62a 579 bool imm = is_ofld_imm(skb);
7b36b6e0 580
581 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
582
7857c62a 583 if (imm) {
e2ac9628
HS
584 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
585 FW_WR_COMPL_F |
586 FW_WR_IMMDLEN_V(dlen));
587 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
588 FW_WR_LEN16_V(credits));
7b36b6e0 589 } else {
590 req->op_to_immdlen =
e2ac9628
HS
591 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
592 FW_WR_COMPL_F |
593 FW_WR_IMMDLEN_V(0));
7b36b6e0 594 req->flowid_len16 =
e2ac9628
HS
595 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
596 FW_WR_LEN16_V(credits));
7b36b6e0 597 }
598 if (submode)
e2ac9628
HS
599 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
600 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
601 val = skb_peek(&csk->write_queue) ? 0 : 1;
6aca4112 602 req->tunnel_to_proxy = htonl(wr_ulp_mode |
e2ac9628 603 FW_OFLD_TX_DATA_WR_SHOVE_V(val));
7b36b6e0 604 req->plen = htonl(len);
605 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
606 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
607}
608
609static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
610{
611 kfree_skb(skb);
612}
613
614static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
615{
616 int total_size = 0;
617 struct sk_buff *skb;
618
619 if (unlikely(csk->state < CTP_ESTABLISHED ||
620 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
621 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
622 1 << CXGBI_DBG_PDU_TX,
623 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
624 csk, csk->state, csk->flags, csk->tid);
625 return 0;
626 }
627
628 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
629 int dlen = skb->len;
630 int len = skb->len;
631 unsigned int credits_needed;
64bfead8 632 int flowclen16 = 0;
7b36b6e0 633
634 skb_reset_transport_header(skb);
635 if (is_ofld_imm(skb))
84944d8c 636 credits_needed = DIV_ROUND_UP(dlen, 16);
7b36b6e0 637 else
84944d8c
KX
638 credits_needed = DIV_ROUND_UP(
639 8 * calc_tx_flits_ofld(skb),
640 16);
641
642 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
643 credits_needed += DIV_ROUND_UP(
644 sizeof(struct fw_ofld_tx_data_wr),
7b36b6e0 645 16);
646
64bfead8
KX
647 /*
648 * Assumes the initial credits is large enough to support
649 * fw_flowc_wr plus largest possible first payload
650 */
651 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
652 flowclen16 = send_tx_flowc_wr(csk);
653 csk->wr_cred -= flowclen16;
654 csk->wr_una_cred += flowclen16;
655 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
656 }
657
7b36b6e0 658 if (csk->wr_cred < credits_needed) {
659 log_debug(1 << CXGBI_DBG_PDU_TX,
660 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
661 csk, skb->len, skb->data_len,
662 credits_needed, csk->wr_cred);
663 break;
664 }
665 __skb_unlink(skb, &csk->write_queue);
666 set_queue(skb, CPL_PRIORITY_DATA, csk);
64bfead8 667 skb->csum = credits_needed + flowclen16;
7b36b6e0 668 csk->wr_cred -= credits_needed;
669 csk->wr_una_cred += credits_needed;
670 cxgbi_sock_enqueue_wr(csk, skb);
671
672 log_debug(1 << CXGBI_DBG_PDU_TX,
673 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
674 csk, skb->len, skb->data_len, credits_needed,
675 csk->wr_cred, csk->wr_una_cred);
676
677 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
7b36b6e0 678 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
679 make_tx_data_wr(csk, skb, dlen, len, credits_needed,
680 req_completion);
681 csk->snd_nxt += len;
682 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
683 }
684 total_size += skb->truesize;
685 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
686
687 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
688 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
689 csk, csk->state, csk->flags, csk->tid, skb, len);
690
691 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
692 }
693 return total_size;
694}
695
696static inline void free_atid(struct cxgbi_sock *csk)
697{
698 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
699
700 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
701 cxgb4_free_atid(lldi->tids, csk->atid);
702 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
703 cxgbi_sock_put(csk);
704 }
705}
706
707static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
708{
709 struct cxgbi_sock *csk;
710 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
711 unsigned short tcp_opt = ntohs(req->tcp_opt);
712 unsigned int tid = GET_TID(req);
713 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
714 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
715 struct tid_info *t = lldi->tids;
716 u32 rcv_isn = be32_to_cpu(req->rcv_isn);
717
718 csk = lookup_atid(t, atid);
719 if (unlikely(!csk)) {
720 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
721 goto rel_skb;
722 }
723
e27d6169 724 if (csk->atid != atid) {
725 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
726 atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
727 goto rel_skb;
728 }
729
759a0cc5
AB
730 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
731 (&csk->saddr), (&csk->daddr),
732 atid, tid, csk, csk->state, csk->flags, rcv_isn);
733
734 module_put(THIS_MODULE);
7b36b6e0 735
736 cxgbi_sock_get(csk);
737 csk->tid = tid;
738 cxgb4_insert_tid(lldi->tids, csk, tid);
739 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
740
741 free_atid(csk);
742
743 spin_lock_bh(&csk->lock);
744 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
745 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
746 csk, csk->state, csk->flags, csk->tid);
747
748 if (csk->retry_timer.function) {
749 del_timer(&csk->retry_timer);
750 csk->retry_timer.function = NULL;
751 }
752
753 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
754 /*
755 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
756 * pass through opt0.
757 */
758 if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
759 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
760
761 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
762 if (GET_TCPOPT_TSTAMP(tcp_opt))
763 csk->advmss -= 12;
764 if (csk->advmss < 128)
765 csk->advmss = 128;
766
767 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
768 "csk 0x%p, mss_idx %u, advmss %u.\n",
769 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
770
771 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
772
773 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
774 send_abort_req(csk);
775 else {
776 if (skb_queue_len(&csk->write_queue))
777 push_tx_frames(csk, 0);
778 cxgbi_conn_tx_open(csk);
779 }
780 spin_unlock_bh(&csk->lock);
781
782rel_skb:
783 __kfree_skb(skb);
784}
785
786static int act_open_rpl_status_to_errno(int status)
787{
788 switch (status) {
789 case CPL_ERR_CONN_RESET:
790 return -ECONNREFUSED;
791 case CPL_ERR_ARP_MISS:
792 return -EHOSTUNREACH;
793 case CPL_ERR_CONN_TIMEDOUT:
794 return -ETIMEDOUT;
795 case CPL_ERR_TCAM_FULL:
796 return -ENOMEM;
797 case CPL_ERR_CONN_EXIST:
798 return -EADDRINUSE;
799 default:
800 return -EIO;
801 }
802}
803
804static void csk_act_open_retry_timer(unsigned long data)
805{
001586a7 806 struct sk_buff *skb = NULL;
7b36b6e0 807 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
3bd3e8bf 808 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
759a0cc5
AB
809 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
810 struct l2t_entry *);
811 int t4 = is_t4(lldi->adapter_type), size, size6;
7b36b6e0 812
813 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
814 "csk 0x%p,%u,0x%lx,%u.\n",
815 csk, csk->state, csk->flags, csk->tid);
816
817 cxgbi_sock_get(csk);
818 spin_lock_bh(&csk->lock);
759a0cc5
AB
819
820 if (t4) {
821 size = sizeof(struct cpl_act_open_req);
822 size6 = sizeof(struct cpl_act_open_req6);
823 } else {
824 size = sizeof(struct cpl_t5_act_open_req);
825 size6 = sizeof(struct cpl_t5_act_open_req6);
826 }
827
828 if (csk->csk_family == AF_INET) {
829 send_act_open_func = send_act_open_req;
830 skb = alloc_wr(size, 0, GFP_ATOMIC);
f42bb57c 831#if IS_ENABLED(CONFIG_IPV6)
759a0cc5
AB
832 } else {
833 send_act_open_func = send_act_open_req6;
834 skb = alloc_wr(size6, 0, GFP_ATOMIC);
f42bb57c 835#endif
759a0cc5
AB
836 }
837
7b36b6e0 838 if (!skb)
839 cxgbi_sock_fail_act_open(csk, -ENOMEM);
840 else {
841 skb->sk = (struct sock *)csk;
842 t4_set_arp_err_handler(skb, csk,
759a0cc5
AB
843 cxgbi_sock_act_open_req_arp_failure);
844 send_act_open_func(csk, skb, csk->l2t);
7b36b6e0 845 }
759a0cc5 846
7b36b6e0 847 spin_unlock_bh(&csk->lock);
848 cxgbi_sock_put(csk);
759a0cc5 849
7b36b6e0 850}
851
928567ad
KX
852static inline bool is_neg_adv(unsigned int status)
853{
854 return status == CPL_ERR_RTX_NEG_ADVICE ||
855 status == CPL_ERR_KEEPALV_NEG_ADVICE ||
856 status == CPL_ERR_PERSIST_NEG_ADVICE;
857}
858
7b36b6e0 859static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
860{
861 struct cxgbi_sock *csk;
862 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
863 unsigned int tid = GET_TID(rpl);
864 unsigned int atid =
865 GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
866 unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
867 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
868 struct tid_info *t = lldi->tids;
869
870 csk = lookup_atid(t, atid);
871 if (unlikely(!csk)) {
872 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
873 goto rel_skb;
874 }
875
759a0cc5
AB
876 pr_info_ipaddr("tid %u/%u, status %u.\n"
877 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
878 atid, tid, status, csk, csk->state, csk->flags);
7b36b6e0 879
928567ad 880 if (is_neg_adv(status))
150cca7c
KX
881 goto rel_skb;
882
ee7255ad
AB
883 module_put(THIS_MODULE);
884
7b36b6e0 885 if (status && status != CPL_ERR_TCAM_FULL &&
886 status != CPL_ERR_CONN_EXIST &&
887 status != CPL_ERR_ARP_MISS)
888 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
889
890 cxgbi_sock_get(csk);
891 spin_lock_bh(&csk->lock);
892
893 if (status == CPL_ERR_CONN_EXIST &&
894 csk->retry_timer.function != csk_act_open_retry_timer) {
895 csk->retry_timer.function = csk_act_open_retry_timer;
896 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
897 } else
898 cxgbi_sock_fail_act_open(csk,
899 act_open_rpl_status_to_errno(status));
900
901 spin_unlock_bh(&csk->lock);
902 cxgbi_sock_put(csk);
903rel_skb:
904 __kfree_skb(skb);
905}
906
907static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
908{
909 struct cxgbi_sock *csk;
910 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
911 unsigned int tid = GET_TID(req);
912 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
913 struct tid_info *t = lldi->tids;
914
915 csk = lookup_tid(t, tid);
916 if (unlikely(!csk)) {
917 pr_err("can't find connection for tid %u.\n", tid);
918 goto rel_skb;
919 }
759a0cc5
AB
920 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
921 (&csk->saddr), (&csk->daddr),
922 csk, csk->state, csk->flags, csk->tid);
7b36b6e0 923 cxgbi_sock_rcv_peer_close(csk);
924rel_skb:
925 __kfree_skb(skb);
926}
927
928static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
929{
930 struct cxgbi_sock *csk;
931 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
932 unsigned int tid = GET_TID(rpl);
933 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
934 struct tid_info *t = lldi->tids;
935
936 csk = lookup_tid(t, tid);
937 if (unlikely(!csk)) {
938 pr_err("can't find connection for tid %u.\n", tid);
939 goto rel_skb;
940 }
759a0cc5
AB
941 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
942 (&csk->saddr), (&csk->daddr),
943 csk, csk->state, csk->flags, csk->tid);
7b36b6e0 944 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
945rel_skb:
946 __kfree_skb(skb);
947}
948
949static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
950 int *need_rst)
951{
952 switch (abort_reason) {
953 case CPL_ERR_BAD_SYN: /* fall through */
954 case CPL_ERR_CONN_RESET:
955 return csk->state > CTP_ESTABLISHED ?
956 -EPIPE : -ECONNRESET;
957 case CPL_ERR_XMIT_TIMEDOUT:
958 case CPL_ERR_PERSIST_TIMEDOUT:
959 case CPL_ERR_FINWAIT2_TIMEDOUT:
960 case CPL_ERR_KEEPALIVE_TIMEDOUT:
961 return -ETIMEDOUT;
962 default:
963 return -EIO;
964 }
965}
966
967static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
968{
969 struct cxgbi_sock *csk;
970 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
971 unsigned int tid = GET_TID(req);
972 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
973 struct tid_info *t = lldi->tids;
974 int rst_status = CPL_ABORT_NO_RST;
975
976 csk = lookup_tid(t, tid);
977 if (unlikely(!csk)) {
978 pr_err("can't find connection for tid %u.\n", tid);
979 goto rel_skb;
980 }
981
759a0cc5
AB
982 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
983 (&csk->saddr), (&csk->daddr),
984 csk, csk->state, csk->flags, csk->tid, req->status);
7b36b6e0 985
928567ad 986 if (is_neg_adv(req->status))
7b36b6e0 987 goto rel_skb;
988
989 cxgbi_sock_get(csk);
990 spin_lock_bh(&csk->lock);
991
7b07bf24
AB
992 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
993
994 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
995 send_tx_flowc_wr(csk);
996 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
7b36b6e0 997 }
998
7b07bf24
AB
999 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
1000 cxgbi_sock_set_state(csk, CTP_ABORTING);
1001
7b36b6e0 1002 send_abort_rpl(csk, rst_status);
1003
1004 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
1005 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
1006 cxgbi_sock_closed(csk);
1007 }
7b07bf24 1008
7b36b6e0 1009 spin_unlock_bh(&csk->lock);
1010 cxgbi_sock_put(csk);
1011rel_skb:
1012 __kfree_skb(skb);
1013}
1014
1015static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1016{
1017 struct cxgbi_sock *csk;
1018 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
1019 unsigned int tid = GET_TID(rpl);
1020 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1021 struct tid_info *t = lldi->tids;
1022
1023 csk = lookup_tid(t, tid);
1024 if (!csk)
1025 goto rel_skb;
1026
759a0cc5
AB
1027 if (csk)
1028 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1029 (&csk->saddr), (&csk->daddr), csk,
1030 csk->state, csk->flags, csk->tid, rpl->status);
7b36b6e0 1031
1032 if (rpl->status == CPL_ERR_ABORT_FAILED)
1033 goto rel_skb;
1034
1035 cxgbi_sock_rcv_abort_rpl(csk);
1036rel_skb:
1037 __kfree_skb(skb);
1038}
1039
1040static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
1041{
1042 struct cxgbi_sock *csk;
1043 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1044 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1045 unsigned int tid = GET_TID(cpl);
1046 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1047 struct tid_info *t = lldi->tids;
7b36b6e0 1048
1049 csk = lookup_tid(t, tid);
1050 if (unlikely(!csk)) {
1051 pr_err("can't find conn. for tid %u.\n", tid);
1052 goto rel_skb;
1053 }
1054
1055 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1056 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1057 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1058 pdu_len_ddp);
1059
1060 spin_lock_bh(&csk->lock);
1061
1062 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1063 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1064 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1065 csk, csk->state, csk->flags, csk->tid);
1066 if (csk->state != CTP_ABORTING)
1067 goto abort_conn;
1068 else
1069 goto discard;
1070 }
1071
1072 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
e27d6169 1073 cxgbi_skcb_flags(skb) = 0;
1074
7b36b6e0 1075 skb_reset_transport_header(skb);
1076 __skb_pull(skb, sizeof(*cpl));
1077 __pskb_trim(skb, ntohs(cpl->len));
1078
1079 if (!csk->skb_ulp_lhdr) {
1080 unsigned char *bhs;
3bd3e8bf 1081 unsigned int hlen, dlen, plen;
7b36b6e0 1082
1083 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1084 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1085 csk, csk->state, csk->flags, csk->tid, skb);
1086 csk->skb_ulp_lhdr = skb;
e27d6169 1087 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
7b36b6e0 1088
e27d6169 1089 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
7b36b6e0 1090 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
e27d6169 1091 csk->tid, cxgbi_skcb_tcp_seq(skb),
7b36b6e0 1092 csk->rcv_nxt);
1093 goto abort_conn;
1094 }
1095
e27d6169 1096 bhs = skb->data;
7b36b6e0 1097 hlen = ntohs(cpl->len);
1098 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
1099
3bd3e8bf
KX
1100 plen = ISCSI_PDU_LEN(pdu_len_ddp);
1101 if (is_t4(lldi->adapter_type))
1102 plen -= 40;
1103
1104 if ((hlen + dlen) != plen) {
7b36b6e0 1105 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1106 "mismatch %u != %u + %u, seq 0x%x.\n",
3bd3e8bf
KX
1107 csk->tid, plen, hlen, dlen,
1108 cxgbi_skcb_tcp_seq(skb));
7b36b6e0 1109 goto abort_conn;
1110 }
1111
1112 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
1113 if (dlen)
1114 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1115 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1116
1117 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1118 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1119 csk, skb, *bhs, hlen, dlen,
1120 ntohl(*((unsigned int *)(bhs + 16))),
1121 ntohl(*((unsigned int *)(bhs + 24))));
1122
1123 } else {
e27d6169 1124 struct sk_buff *lskb = csk->skb_ulp_lhdr;
7b36b6e0 1125
e27d6169 1126 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
7b36b6e0 1127 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1128 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1129 csk, csk->state, csk->flags, skb, lskb);
1130 }
1131
1132 __skb_queue_tail(&csk->receive_queue, skb);
1133 spin_unlock_bh(&csk->lock);
1134 return;
1135
1136abort_conn:
1137 send_abort_req(csk);
1138discard:
1139 spin_unlock_bh(&csk->lock);
1140rel_skb:
1141 __kfree_skb(skb);
1142}
1143
1144static void do_rx_data_ddp(struct cxgbi_device *cdev,
1145 struct sk_buff *skb)
1146{
1147 struct cxgbi_sock *csk;
1148 struct sk_buff *lskb;
1149 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1150 unsigned int tid = GET_TID(rpl);
1151 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1152 struct tid_info *t = lldi->tids;
1153 unsigned int status = ntohl(rpl->ddpvld);
1154
1155 csk = lookup_tid(t, tid);
1156 if (unlikely(!csk)) {
1157 pr_err("can't find connection for tid %u.\n", tid);
1158 goto rel_skb;
1159 }
1160
1161 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1162 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1163 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
1164
1165 spin_lock_bh(&csk->lock);
1166
1167 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1168 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1169 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1170 csk, csk->state, csk->flags, csk->tid);
1171 if (csk->state != CTP_ABORTING)
1172 goto abort_conn;
1173 else
1174 goto discard;
1175 }
1176
1177 if (!csk->skb_ulp_lhdr) {
1178 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1179 goto abort_conn;
1180 }
1181
1182 lskb = csk->skb_ulp_lhdr;
1183 csk->skb_ulp_lhdr = NULL;
1184
7b36b6e0 1185 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1186
1187 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1188 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1189 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1190
1191 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
e27d6169 1192 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1193 csk, lskb, status, cxgbi_skcb_flags(lskb));
7b36b6e0 1194 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
1195 }
1196 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
e27d6169 1197 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1198 csk, lskb, status, cxgbi_skcb_flags(lskb));
7b36b6e0 1199 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1200 }
1201 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1202 log_debug(1 << CXGBI_DBG_PDU_RX,
1203 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1204 csk, lskb, status);
1205 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1206 }
1207 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1208 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1209 log_debug(1 << CXGBI_DBG_PDU_RX,
1210 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1211 csk, lskb, status);
1212 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1213 }
1214 log_debug(1 << CXGBI_DBG_PDU_RX,
1215 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1216 csk, lskb, cxgbi_skcb_flags(lskb));
1217
e27d6169 1218 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
7b36b6e0 1219 cxgbi_conn_pdu_ready(csk);
1220 spin_unlock_bh(&csk->lock);
1221 goto rel_skb;
1222
1223abort_conn:
1224 send_abort_req(csk);
1225discard:
1226 spin_unlock_bh(&csk->lock);
1227rel_skb:
1228 __kfree_skb(skb);
1229}
1230
1231static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1232{
1233 struct cxgbi_sock *csk;
1234 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1235 unsigned int tid = GET_TID(rpl);
1236 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1237 struct tid_info *t = lldi->tids;
1238
1239 csk = lookup_tid(t, tid);
1240 if (unlikely(!csk))
1241 pr_err("can't find connection for tid %u.\n", tid);
1242 else {
1243 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1244 "csk 0x%p,%u,0x%lx,%u.\n",
1245 csk, csk->state, csk->flags, csk->tid);
1246 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1247 rpl->seq_vld);
1248 }
1249 __kfree_skb(skb);
1250}
1251
1252static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1253{
1254 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1255 unsigned int tid = GET_TID(rpl);
1256 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1257 struct tid_info *t = lldi->tids;
1258 struct cxgbi_sock *csk;
1259
1260 csk = lookup_tid(t, tid);
1261 if (!csk)
1262 pr_err("can't find conn. for tid %u.\n", tid);
1263
1264 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1265 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1266 csk, csk->state, csk->flags, csk->tid, rpl->status);
1267
1268 if (rpl->status != CPL_ERR_NONE)
1269 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1270 csk, tid, rpl->status);
1271
1272 __kfree_skb(skb);
1273}
1274
1275static int alloc_cpls(struct cxgbi_sock *csk)
1276{
24d3f95a 1277 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1278 0, GFP_KERNEL);
7b36b6e0 1279 if (!csk->cpl_close)
1280 return -ENOMEM;
1281
24d3f95a 1282 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1283 0, GFP_KERNEL);
7b36b6e0 1284 if (!csk->cpl_abort_req)
1285 goto free_cpls;
1286
24d3f95a 1287 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1288 0, GFP_KERNEL);
7b36b6e0 1289 if (!csk->cpl_abort_rpl)
1290 goto free_cpls;
1291 return 0;
1292
1293free_cpls:
1294 cxgbi_sock_free_cpl_skbs(csk);
1295 return -ENOMEM;
1296}
1297
1298static inline void l2t_put(struct cxgbi_sock *csk)
1299{
1300 if (csk->l2t) {
1301 cxgb4_l2t_release(csk->l2t);
1302 csk->l2t = NULL;
1303 cxgbi_sock_put(csk);
1304 }
1305}
1306
1307static void release_offload_resources(struct cxgbi_sock *csk)
1308{
1309 struct cxgb4_lld_info *lldi;
1310
1311 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1312 "csk 0x%p,%u,0x%lx,%u.\n",
1313 csk, csk->state, csk->flags, csk->tid);
1314
1315 cxgbi_sock_free_cpl_skbs(csk);
1316 if (csk->wr_cred != csk->wr_max_cred) {
1317 cxgbi_sock_purge_wr_queue(csk);
1318 cxgbi_sock_reset_wr_list(csk);
1319 }
1320
1321 l2t_put(csk);
1322 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1323 free_atid(csk);
1324 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1325 lldi = cxgbi_cdev_priv(csk->cdev);
1326 cxgb4_remove_tid(lldi->tids, 0, csk->tid);
1327 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1328 cxgbi_sock_put(csk);
1329 }
1330 csk->dst = NULL;
1331 csk->cdev = NULL;
1332}
1333
1334static int init_act_open(struct cxgbi_sock *csk)
1335{
1336 struct cxgbi_device *cdev = csk->cdev;
1337 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1338 struct net_device *ndev = cdev->ports[csk->port_id];
7b36b6e0 1339 struct sk_buff *skb = NULL;
759a0cc5
AB
1340 struct neighbour *n = NULL;
1341 void *daddr;
7b36b6e0 1342 unsigned int step;
759a0cc5
AB
1343 unsigned int size, size6;
1344 int t4 = is_t4(lldi->adapter_type);
7b36b6e0 1345
1346 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1347 "csk 0x%p,%u,0x%lx,%u.\n",
1348 csk, csk->state, csk->flags, csk->tid);
1349
759a0cc5
AB
1350 if (csk->csk_family == AF_INET)
1351 daddr = &csk->daddr.sin_addr.s_addr;
e81fbf6c
AB
1352#if IS_ENABLED(CONFIG_IPV6)
1353 else if (csk->csk_family == AF_INET6)
759a0cc5 1354 daddr = &csk->daddr6.sin6_addr;
e81fbf6c
AB
1355#endif
1356 else {
1357 pr_err("address family 0x%x not supported\n", csk->csk_family);
1358 goto rel_resource;
1359 }
759a0cc5
AB
1360
1361 n = dst_neigh_lookup(csk->dst, daddr);
1362
1363 if (!n) {
1364 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1365 goto rel_resource;
1366 }
1367
7b36b6e0 1368 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1369 if (csk->atid < 0) {
1370 pr_err("%s, NO atid available.\n", ndev->name);
1371 return -EINVAL;
1372 }
1373 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1374 cxgbi_sock_get(csk);
1375
51e059bd 1376 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
7b36b6e0 1377 if (!csk->l2t) {
1378 pr_err("%s, cannot alloc l2t.\n", ndev->name);
1379 goto rel_resource;
1380 }
1381 cxgbi_sock_get(csk);
1382
759a0cc5
AB
1383 if (t4) {
1384 size = sizeof(struct cpl_act_open_req);
1385 size6 = sizeof(struct cpl_act_open_req6);
1386 } else {
1387 size = sizeof(struct cpl_t5_act_open_req);
1388 size6 = sizeof(struct cpl_t5_act_open_req6);
1389 }
1390
1391 if (csk->csk_family == AF_INET)
1392 skb = alloc_wr(size, 0, GFP_NOIO);
f42bb57c 1393#if IS_ENABLED(CONFIG_IPV6)
759a0cc5
AB
1394 else
1395 skb = alloc_wr(size6, 0, GFP_NOIO);
f42bb57c 1396#endif
759a0cc5 1397
7b36b6e0 1398 if (!skb)
1399 goto rel_resource;
1400 skb->sk = (struct sock *)csk;
1401 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1402
1403 if (!csk->mtu)
1404 csk->mtu = dst_mtu(csk->dst);
1405 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1406 csk->tx_chan = cxgb4_port_chan(ndev);
1407 /* SMT two entries per row */
1408 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
1409 step = lldi->ntxq / lldi->nchan;
1410 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1411 step = lldi->nrxq / lldi->nchan;
1412 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
759a0cc5
AB
1413 csk->wr_cred = lldi->wr_cred -
1414 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1415 csk->wr_max_cred = csk->wr_cred;
7b36b6e0 1416 csk->wr_una_cred = 0;
1417 cxgbi_sock_reset_wr_list(csk);
1418 csk->err = 0;
7b36b6e0 1419
759a0cc5
AB
1420 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1421 (&csk->saddr), (&csk->daddr), csk, csk->state,
1422 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1423 csk->mtu, csk->mss_idx, csk->smac_idx);
1424
1425 /* must wait for either a act_open_rpl or act_open_establish */
1426 try_module_get(THIS_MODULE);
7b36b6e0 1427 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
759a0cc5
AB
1428 if (csk->csk_family == AF_INET)
1429 send_act_open_req(csk, skb, csk->l2t);
f42bb57c 1430#if IS_ENABLED(CONFIG_IPV6)
759a0cc5
AB
1431 else
1432 send_act_open_req6(csk, skb, csk->l2t);
f42bb57c 1433#endif
c4737377 1434 neigh_release(n);
759a0cc5 1435
7b36b6e0 1436 return 0;
1437
1438rel_resource:
c4737377
DM
1439 if (n)
1440 neigh_release(n);
7b36b6e0 1441 if (skb)
1442 __kfree_skb(skb);
1443 return -EINVAL;
1444}
1445
1446cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1447 [CPL_ACT_ESTABLISH] = do_act_establish,
1448 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1449 [CPL_PEER_CLOSE] = do_peer_close,
1450 [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1451 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1452 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1453 [CPL_FW4_ACK] = do_fw4_ack,
1454 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
3bd3e8bf 1455 [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
7b36b6e0 1456 [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1457 [CPL_RX_DATA_DDP] = do_rx_data_ddp,
3bd3e8bf 1458 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
7b36b6e0 1459};
1460
1461int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1462{
1463 int rc;
1464
1465 if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1466 cxgb4i_max_connect = CXGB4I_MAX_CONN;
1467
1468 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1469 cxgb4i_max_connect);
1470 if (rc < 0)
1471 return rc;
1472
1473 cdev->csk_release_offload_resources = release_offload_resources;
1474 cdev->csk_push_tx_frames = push_tx_frames;
1475 cdev->csk_send_abort_req = send_abort_req;
1476 cdev->csk_send_close_req = send_close_req;
1477 cdev->csk_send_rx_credits = send_rx_credits;
1478 cdev->csk_alloc_cpls = alloc_cpls;
1479 cdev->csk_init_act_open = init_act_open;
1480
1481 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1482 return 0;
1483}
1484
1485/*
1486 * functions to program the pagepod in h/w
1487 */
e27d6169 1488#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
3bd3e8bf
KX
1489static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
1490 struct ulp_mem_io *req,
e27d6169 1491 unsigned int wr_len, unsigned int dlen,
1492 unsigned int pm_addr)
7b36b6e0 1493{
e27d6169 1494 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
7b36b6e0 1495
1496 INIT_ULPTX_WR(req, wr_len, 0, 0);
3bd3e8bf 1497 if (is_t4(lldi->adapter_type))
d7990b0c
AB
1498 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1499 (ULP_MEMIO_ORDER_F));
3bd3e8bf 1500 else
d7990b0c
AB
1501 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1502 (T5_ULP_MEMIO_IMM_F));
1503 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1504 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
7b36b6e0 1505 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
e27d6169 1506
d7990b0c 1507 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
e27d6169 1508 idata->len = htonl(dlen);
7b36b6e0 1509}
1510
e27d6169 1511static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
7b36b6e0 1512 struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
1513 unsigned int npods,
1514 struct cxgbi_gather_list *gl,
1515 unsigned int gl_pidx)
1516{
1517 struct cxgbi_ddp_info *ddp = cdev->ddp;
3bd3e8bf 1518 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
7b36b6e0 1519 struct sk_buff *skb;
1520 struct ulp_mem_io *req;
e27d6169 1521 struct ulptx_idata *idata;
7b36b6e0 1522 struct cxgbi_pagepod *ppod;
e27d6169 1523 unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
1524 unsigned int dlen = PPOD_SIZE * npods;
1525 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1526 sizeof(struct ulptx_idata) + dlen, 16);
7b36b6e0 1527 unsigned int i;
1528
e27d6169 1529 skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
7b36b6e0 1530 if (!skb) {
1531 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1532 cdev, idx, npods);
1533 return -ENOMEM;
1534 }
1535 req = (struct ulp_mem_io *)skb->head;
1536 set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
1537
3bd3e8bf 1538 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
e27d6169 1539 idata = (struct ulptx_idata *)(req + 1);
1540 ppod = (struct cxgbi_pagepod *)(idata + 1);
7b36b6e0 1541
1542 for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
1543 if (!hdr && !gl)
1544 cxgbi_ddp_ppod_clear(ppod);
1545 else
1546 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
1547 }
1548
1549 cxgb4_ofld_send(cdev->ports[port_id], skb);
1550 return 0;
1551}
1552
1553static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1554 unsigned int idx, unsigned int npods,
1555 struct cxgbi_gather_list *gl)
1556{
1557 unsigned int i, cnt;
1558 int err = 0;
1559
1560 for (i = 0; i < npods; i += cnt, idx += cnt) {
1561 cnt = npods - i;
e27d6169 1562 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1563 cnt = ULPMEM_IDATA_MAX_NPPODS;
1564 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
7b36b6e0 1565 idx, cnt, gl, 4 * i);
1566 if (err < 0)
1567 break;
1568 }
1569 return err;
1570}
1571
1572static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1573 unsigned int idx, unsigned int npods)
1574{
1575 unsigned int i, cnt;
1576 int err;
1577
1578 for (i = 0; i < npods; i += cnt, idx += cnt) {
1579 cnt = npods - i;
e27d6169 1580 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1581 cnt = ULPMEM_IDATA_MAX_NPPODS;
1582 err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
7b36b6e0 1583 idx, cnt, NULL, 0);
1584 if (err < 0)
1585 break;
1586 }
1587}
1588
1589static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1590 int pg_idx, bool reply)
1591{
1592 struct sk_buff *skb;
1593 struct cpl_set_tcb_field *req;
7b36b6e0 1594
e27d6169 1595 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
7b36b6e0 1596 return 0;
1597
24d3f95a 1598 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
7b36b6e0 1599 if (!skb)
1600 return -ENOMEM;
1601
e27d6169 1602 /* set up ulp page size */
7b36b6e0 1603 req = (struct cpl_set_tcb_field *)skb->head;
1604 INIT_TP_WR(req, csk->tid);
1605 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1606 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
e27d6169 1607 req->word_cookie = htons(0);
1608 req->mask = cpu_to_be64(0x3 << 8);
1609 req->val = cpu_to_be64(pg_idx << 8);
7b36b6e0 1610 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1611
1612 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1613 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1614
1615 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1616 return 0;
1617}
1618
1619static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1620 int hcrc, int dcrc, int reply)
1621{
1622 struct sk_buff *skb;
1623 struct cpl_set_tcb_field *req;
7b36b6e0 1624
e27d6169 1625 if (!hcrc && !dcrc)
1626 return 0;
7b36b6e0 1627
24d3f95a 1628 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
7b36b6e0 1629 if (!skb)
1630 return -ENOMEM;
1631
1632 csk->hcrc_len = (hcrc ? 4 : 0);
1633 csk->dcrc_len = (dcrc ? 4 : 0);
e27d6169 1634 /* set up ulp submode */
7b36b6e0 1635 req = (struct cpl_set_tcb_field *)skb->head;
1636 INIT_TP_WR(req, tid);
1637 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1638 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
e27d6169 1639 req->word_cookie = htons(0);
1640 req->mask = cpu_to_be64(0x3 << 4);
1641 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1642 (dcrc ? ULP_CRC_DATA : 0)) << 4);
7b36b6e0 1643 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1644
1645 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1646 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1647
1648 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1649 return 0;
1650}
1651
1652static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1653{
1654 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1655 struct cxgbi_ddp_info *ddp = cdev->ddp;
1656 unsigned int tagmask, pgsz_factor[4];
1657 int err;
1658
1659 if (ddp) {
1660 kref_get(&ddp->refcnt);
1661 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1662 cdev, cdev->ddp);
1663 return -EALREADY;
1664 }
1665
1666 err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
1667 lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
1668 lldi->iscsi_iolen, lldi->iscsi_iolen);
1669 if (err < 0)
1670 return err;
1671
1672 ddp = cdev->ddp;
1673
1674 tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1675 cxgbi_ddp_page_size_factor(pgsz_factor);
1676 cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
1677
7b36b6e0 1678 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1679 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1680 cdev->csk_ddp_set = ddp_set_map;
1681 cdev->csk_ddp_clear = ddp_clear_map;
1682
1683 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1684 cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
1685 cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
1686 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1687 " %u/%u.\n",
1688 cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1689 ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
1690 ddp->max_rxsz, lldi->iscsi_iolen);
1691 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1692 cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
1693 ddp->max_rxsz);
1694 return 0;
1695}
1696
1697static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1698{
1699 struct cxgbi_device *cdev;
1700 struct port_info *pi;
1701 int i, rc;
1702
1703 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
1704 if (!cdev) {
1705 pr_info("t4 device 0x%p, register failed.\n", lldi);
1706 return NULL;
1707 }
1708 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1709 cdev, lldi->adapter_type, lldi->nports,
1710 lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
1711 lldi->nrxq, lldi->wr_cred);
1712 for (i = 0; i < lldi->nrxq; i++)
1713 log_debug(1 << CXGBI_DBG_DEV,
1714 "t4 0x%p, rxq id #%d: %u.\n",
1715 cdev, i, lldi->rxq_ids[i]);
1716
1717 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
1718 cdev->flags = CXGBI_FLAG_DEV_T4;
1719 cdev->pdev = lldi->pdev;
1720 cdev->ports = lldi->ports;
1721 cdev->nports = lldi->nports;
1722 cdev->mtus = lldi->mtus;
1723 cdev->nmtus = NMTUS;
1724 cdev->snd_win = cxgb4i_snd_win;
1725 cdev->rcv_win = cxgb4i_rcv_win;
1726 cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1727 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1728 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1729 cdev->itp = &cxgb4i_iscsi_transport;
1730
d7990b0c
AB
1731 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
1732 << FW_VIID_PFN_S;
e27d6169 1733 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1734 cdev, lldi->ports[0]->name, cdev->pfvf);
1735
7b36b6e0 1736 rc = cxgb4i_ddp_init(cdev);
1737 if (rc) {
1738 pr_info("t4 0x%p ddp init failed.\n", cdev);
1739 goto err_out;
1740 }
1741 rc = cxgb4i_ofld_init(cdev);
1742 if (rc) {
1743 pr_info("t4 0x%p ofld init failed.\n", cdev);
1744 goto err_out;
1745 }
1746
1747 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
1748 &cxgb4i_host_template, cxgb4i_stt);
1749 if (rc)
1750 goto err_out;
1751
1752 for (i = 0; i < cdev->nports; i++) {
1753 pi = netdev_priv(lldi->ports[i]);
1754 cdev->hbas[i]->port_id = pi->port_id;
1755 }
1756 return cdev;
1757
1758err_out:
1759 cxgbi_device_unregister(cdev);
1760 return ERR_PTR(-ENOMEM);
1761}
1762
1763#define RX_PULL_LEN 128
1764static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
1765 const struct pkt_gl *pgl)
1766{
1767 const struct cpl_act_establish *rpl;
1768 struct sk_buff *skb;
1769 unsigned int opc;
1770 struct cxgbi_device *cdev = handle;
1771
1772 if (pgl == NULL) {
1773 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1774
24d3f95a 1775 skb = alloc_wr(len, 0, GFP_ATOMIC);
7b36b6e0 1776 if (!skb)
1777 goto nomem;
1778 skb_copy_to_linear_data(skb, &rsp[1], len);
1779 } else {
1780 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
1781 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1782 pgl->va, be64_to_cpu(*rsp),
1783 be64_to_cpu(*(u64 *)pgl->va),
1784 pgl->tot_len);
1785 return 0;
1786 }
1787 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
1788 if (unlikely(!skb))
1789 goto nomem;
1790 }
1791
1792 rpl = (struct cpl_act_establish *)skb->data;
1793 opc = rpl->ot.opcode;
1794 log_debug(1 << CXGBI_DBG_TOE,
1795 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1796 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
1797 if (cxgb4i_cplhandlers[opc])
1798 cxgb4i_cplhandlers[opc](cdev, skb);
1799 else {
1800 pr_err("No handler for opcode 0x%x.\n", opc);
1801 __kfree_skb(skb);
1802 }
1803 return 0;
1804nomem:
1805 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
1806 return 1;
1807}
1808
1809static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1810{
1811 struct cxgbi_device *cdev = handle;
1812
1813 switch (state) {
1814 case CXGB4_STATE_UP:
1815 pr_info("cdev 0x%p, UP.\n", cdev);
7b36b6e0 1816 break;
1817 case CXGB4_STATE_START_RECOVERY:
1818 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
1819 /* close all connections */
1820 break;
1821 case CXGB4_STATE_DOWN:
1822 pr_info("cdev 0x%p, DOWN.\n", cdev);
1823 break;
1824 case CXGB4_STATE_DETACH:
1825 pr_info("cdev 0x%p, DETACH.\n", cdev);
c3b331a3 1826 cxgbi_device_unregister(cdev);
7b36b6e0 1827 break;
1828 default:
1829 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
1830 break;
1831 }
1832 return 0;
1833}
1834
1835static int __init cxgb4i_init_module(void)
1836{
1837 int rc;
1838
1839 printk(KERN_INFO "%s", version);
1840
1841 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1842 if (rc < 0)
1843 return rc;
1844 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
759a0cc5 1845
7b36b6e0 1846 return 0;
1847}
1848
1849static void __exit cxgb4i_exit_module(void)
1850{
1851 cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1852 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1853 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1854}
1855
1856module_init(cxgb4i_init_module);
1857module_exit(cxgb4i_exit_module);
This page took 0.581568 seconds and 5 git commands to generate.