cxgb4i: fix tx immediate data credit check
[deliverable/linux.git] / drivers / scsi / cxgbi / cxgb4i / cxgb4i.c
CommitLineData
7b36b6e0 1/*
2 * cxgb4i.c: Chelsio T4 iSCSI driver.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Rakesh Ranjan (rranjan@chelsio.com)
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
15
7b36b6e0 16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <scsi/scsi_host.h>
19#include <net/tcp.h>
20#include <net/dst.h>
21#include <linux/netdevice.h>
759a0cc5 22#include <net/addrconf.h>
7b36b6e0 23
3bd3e8bf 24#include "t4_regs.h"
7b36b6e0 25#include "t4_msg.h"
26#include "cxgb4.h"
27#include "cxgb4_uld.h"
28#include "t4fw_api.h"
29#include "l2t.h"
30#include "cxgb4i.h"
31
32static unsigned int dbg_level;
33
34#include "../libcxgbi.h"
35
36#define DRV_MODULE_NAME "cxgb4i"
3bd3e8bf
KX
37#define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver"
38#define DRV_MODULE_VERSION "0.9.4"
7b36b6e0 39
40static char version[] =
41 DRV_MODULE_DESC " " DRV_MODULE_NAME
3bd3e8bf 42 " v" DRV_MODULE_VERSION "\n";
7b36b6e0 43
44MODULE_AUTHOR("Chelsio Communications, Inc.");
45MODULE_DESCRIPTION(DRV_MODULE_DESC);
46MODULE_VERSION(DRV_MODULE_VERSION);
47MODULE_LICENSE("GPL");
48
49module_param(dbg_level, uint, 0644);
50MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
51
52static int cxgb4i_rcv_win = 256 * 1024;
53module_param(cxgb4i_rcv_win, int, 0644);
54MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
55
56static int cxgb4i_snd_win = 128 * 1024;
57module_param(cxgb4i_snd_win, int, 0644);
58MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
59
60static int cxgb4i_rx_credit_thres = 10 * 1024;
61module_param(cxgb4i_rx_credit_thres, int, 0644);
62MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
63 "RX credits return threshold in bytes (default=10KB)");
64
65static unsigned int cxgb4i_max_connect = (8 * 1024);
66module_param(cxgb4i_max_connect, uint, 0644);
67MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
68
69static unsigned short cxgb4i_sport_base = 20000;
70module_param(cxgb4i_sport_base, ushort, 0644);
71MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
72
73typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
74
75static void *t4_uld_add(const struct cxgb4_lld_info *);
76static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
77static int t4_uld_state_change(void *, enum cxgb4_state state);
78
79static const struct cxgb4_uld_info cxgb4i_uld_info = {
80 .name = DRV_MODULE_NAME,
81 .add = t4_uld_add,
82 .rx_handler = t4_uld_rx_handler,
83 .state_change = t4_uld_state_change,
84};
85
86static struct scsi_host_template cxgb4i_host_template = {
87 .module = THIS_MODULE,
88 .name = DRV_MODULE_NAME,
89 .proc_name = DRV_MODULE_NAME,
90 .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
91 .queuecommand = iscsi_queuecommand,
db5ed4df 92 .change_queue_depth = scsi_change_queue_depth,
7b36b6e0 93 .sg_tablesize = SG_ALL,
94 .max_sectors = 0xFFFF,
95 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
96 .eh_abort_handler = iscsi_eh_abort,
97 .eh_device_reset_handler = iscsi_eh_device_reset,
98 .eh_target_reset_handler = iscsi_eh_recover_target,
99 .target_alloc = iscsi_target_alloc,
100 .use_clustering = DISABLE_CLUSTERING,
101 .this_id = -1,
c40ecc12 102 .track_queue_depth = 1,
7b36b6e0 103};
104
105static struct iscsi_transport cxgb4i_iscsi_transport = {
106 .owner = THIS_MODULE,
107 .name = DRV_MODULE_NAME,
108 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
109 CAP_DATADGST | CAP_DIGEST_OFFLOAD |
fdafd4df 110 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
3128c6c7 111 .attr_is_visible = cxgbi_attr_is_visible,
7b36b6e0 112 .get_host_param = cxgbi_get_host_param,
113 .set_host_param = cxgbi_set_host_param,
114 /* session management */
115 .create_session = cxgbi_create_session,
116 .destroy_session = cxgbi_destroy_session,
117 .get_session_param = iscsi_session_get_param,
118 /* connection management */
119 .create_conn = cxgbi_create_conn,
120 .bind_conn = cxgbi_bind_conn,
121 .destroy_conn = iscsi_tcp_conn_teardown,
122 .start_conn = iscsi_conn_start,
123 .stop_conn = iscsi_conn_stop,
c71b9b66 124 .get_conn_param = iscsi_conn_get_param,
7b36b6e0 125 .set_param = cxgbi_set_conn_param,
126 .get_stats = cxgbi_get_conn_stats,
127 /* pdu xmit req from user space */
128 .send_pdu = iscsi_conn_send_pdu,
129 /* task */
130 .init_task = iscsi_tcp_task_init,
131 .xmit_task = iscsi_tcp_task_xmit,
132 .cleanup_task = cxgbi_cleanup_task,
133 /* pdu */
134 .alloc_pdu = cxgbi_conn_alloc_pdu,
135 .init_pdu = cxgbi_conn_init_pdu,
136 .xmit_pdu = cxgbi_conn_xmit_pdu,
137 .parse_pdu_itt = cxgbi_parse_pdu_itt,
138 /* TCP connect/disconnect */
c71b9b66 139 .get_ep_param = cxgbi_get_ep_param,
7b36b6e0 140 .ep_connect = cxgbi_ep_connect,
141 .ep_poll = cxgbi_ep_poll,
142 .ep_disconnect = cxgbi_ep_disconnect,
143 /* Error recovery timeout call */
144 .session_recovery_timedout = iscsi_session_recovery_timedout,
145};
146
147static struct scsi_transport_template *cxgb4i_stt;
148
149/*
150 * CPL (Chelsio Protocol Language) defines a message passing interface between
151 * the host driver and Chelsio asic.
152 * The section below implments CPLs that related to iscsi tcp connection
153 * open/close/abort and data send/receive.
154 */
759a0cc5 155
7b36b6e0 156#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
157#define RCV_BUFSIZ_MASK 0x3FFU
158#define MAX_IMM_TX_PKT_LEN 128
159
160static inline void set_queue(struct sk_buff *skb, unsigned int queue,
161 const struct cxgbi_sock *csk)
162{
163 skb->queue_mapping = queue;
164}
165
166static int push_tx_frames(struct cxgbi_sock *, int);
167
168/*
169 * is_ofld_imm - check whether a packet can be sent as immediate data
170 * @skb: the packet
171 *
172 * Returns true if a packet can be sent as an offload WR with immediate
173 * data. We currently use the same limit as for Ethernet packets.
174 */
84944d8c 175static inline bool is_ofld_imm(const struct sk_buff *skb)
7b36b6e0 176{
84944d8c
KX
177 int len = skb->len;
178
179 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
180 len += sizeof(struct fw_ofld_tx_data_wr);
181
182 return len <= MAX_IMM_TX_PKT_LEN;
7b36b6e0 183}
184
185static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
186 struct l2t_entry *e)
187{
3bd3e8bf 188 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
759a0cc5 189 int t4 = is_t4(lldi->adapter_type);
7b36b6e0 190 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
191 unsigned long long opt0;
192 unsigned int opt2;
193 unsigned int qid_atid = ((unsigned int)csk->atid) |
194 (((unsigned int)csk->rss_qid) << 14);
195
d7990b0c
AB
196 opt0 = KEEP_ALIVE_F |
197 WND_SCALE_V(wscale) |
198 MSS_IDX_V(csk->mss_idx) |
199 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
200 TX_CHAN_V(csk->tx_chan) |
201 SMAC_SEL_V(csk->smac_idx) |
202 ULP_MODE_V(ULP_MODE_ISCSI) |
203 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
204 opt2 = RX_CHANNEL_V(0) |
205 RSS_QUEUE_VALID_F |
206 (RX_FC_DISABLE_F) |
207 RSS_QUEUE_V(csk->rss_qid);
7b36b6e0 208
3bd3e8bf
KX
209 if (is_t4(lldi->adapter_type)) {
210 struct cpl_act_open_req *req =
211 (struct cpl_act_open_req *)skb->head;
212
3bd3e8bf
KX
213 INIT_TP_WR(req, 0);
214 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
7b36b6e0 215 qid_atid));
3bd3e8bf
KX
216 req->local_port = csk->saddr.sin_port;
217 req->peer_port = csk->daddr.sin_port;
218 req->local_ip = csk->saddr.sin_addr.s_addr;
219 req->peer_ip = csk->daddr.sin_addr.s_addr;
220 req->opt0 = cpu_to_be64(opt0);
ac0245ff
KX
221 req->params = cpu_to_be32(cxgb4_select_ntuple(
222 csk->cdev->ports[csk->port_id],
223 csk->l2t));
d7990b0c 224 opt2 |= RX_FC_VALID_F;
3bd3e8bf 225 req->opt2 = cpu_to_be32(opt2);
7b36b6e0 226
3bd3e8bf
KX
227 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
228 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
229 csk, &req->local_ip, ntohs(req->local_port),
230 &req->peer_ip, ntohs(req->peer_port),
231 csk->atid, csk->rss_qid);
232 } else {
233 struct cpl_t5_act_open_req *req =
234 (struct cpl_t5_act_open_req *)skb->head;
235
3bd3e8bf
KX
236 INIT_TP_WR(req, 0);
237 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
238 qid_atid));
239 req->local_port = csk->saddr.sin_port;
240 req->peer_port = csk->daddr.sin_port;
241 req->local_ip = csk->saddr.sin_addr.s_addr;
242 req->peer_ip = csk->daddr.sin_addr.s_addr;
243 req->opt0 = cpu_to_be64(opt0);
d7990b0c 244 req->params = cpu_to_be64(FILTER_TUPLE_V(
ac0245ff
KX
245 cxgb4_select_ntuple(
246 csk->cdev->ports[csk->port_id],
247 csk->l2t)));
3bd3e8bf
KX
248 opt2 |= 1 << 31;
249 req->opt2 = cpu_to_be32(opt2);
250
251 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
252 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
253 csk, &req->local_ip, ntohs(req->local_port),
254 &req->peer_ip, ntohs(req->peer_port),
255 csk->atid, csk->rss_qid);
256 }
257
258 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
759a0cc5
AB
259
260 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
261 (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
262 csk->state, csk->flags, csk->atid, csk->rss_qid);
263
264 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
265}
266
f42bb57c 267#if IS_ENABLED(CONFIG_IPV6)
759a0cc5
AB
268static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
269 struct l2t_entry *e)
270{
271 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
272 int t4 = is_t4(lldi->adapter_type);
273 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
274 unsigned long long opt0;
275 unsigned int opt2;
276 unsigned int qid_atid = ((unsigned int)csk->atid) |
277 (((unsigned int)csk->rss_qid) << 14);
278
d7990b0c
AB
279 opt0 = KEEP_ALIVE_F |
280 WND_SCALE_V(wscale) |
281 MSS_IDX_V(csk->mss_idx) |
282 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
283 TX_CHAN_V(csk->tx_chan) |
284 SMAC_SEL_V(csk->smac_idx) |
285 ULP_MODE_V(ULP_MODE_ISCSI) |
286 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
759a0cc5 287
d7990b0c
AB
288 opt2 = RX_CHANNEL_V(0) |
289 RSS_QUEUE_VALID_F |
290 RX_FC_DISABLE_F |
291 RSS_QUEUE_V(csk->rss_qid);
759a0cc5
AB
292
293 if (t4) {
294 struct cpl_act_open_req6 *req =
295 (struct cpl_act_open_req6 *)skb->head;
296
297 INIT_TP_WR(req, 0);
298 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
299 qid_atid));
300 req->local_port = csk->saddr6.sin6_port;
301 req->peer_port = csk->daddr6.sin6_port;
302
303 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
304 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
305 8);
306 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
307 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
308 8);
309
310 req->opt0 = cpu_to_be64(opt0);
311
d7990b0c 312 opt2 |= RX_FC_VALID_F;
759a0cc5
AB
313 req->opt2 = cpu_to_be32(opt2);
314
315 req->params = cpu_to_be32(cxgb4_select_ntuple(
316 csk->cdev->ports[csk->port_id],
317 csk->l2t));
318 } else {
319 struct cpl_t5_act_open_req6 *req =
320 (struct cpl_t5_act_open_req6 *)skb->head;
321
322 INIT_TP_WR(req, 0);
323 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
324 qid_atid));
325 req->local_port = csk->saddr6.sin6_port;
326 req->peer_port = csk->daddr6.sin6_port;
327 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
328 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
329 8);
330 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
331 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
332 8);
333 req->opt0 = cpu_to_be64(opt0);
334
d7990b0c 335 opt2 |= T5_OPT_2_VALID_F;
759a0cc5
AB
336 req->opt2 = cpu_to_be32(opt2);
337
d7990b0c 338 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
759a0cc5
AB
339 csk->cdev->ports[csk->port_id],
340 csk->l2t)));
341 }
342
343 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
344
345 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
346 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
347 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
348 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
349 csk->rss_qid);
350
7b36b6e0 351 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
352}
f42bb57c 353#endif
7b36b6e0 354
355static void send_close_req(struct cxgbi_sock *csk)
356{
357 struct sk_buff *skb = csk->cpl_close;
358 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
359 unsigned int tid = csk->tid;
360
361 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
362 "csk 0x%p,%u,0x%lx, tid %u.\n",
363 csk, csk->state, csk->flags, csk->tid);
364 csk->cpl_close = NULL;
365 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
366 INIT_TP_WR(req, tid);
367 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
368 req->rsvd = 0;
369
370 cxgbi_sock_skb_entail(csk, skb);
371 if (csk->state >= CTP_ESTABLISHED)
372 push_tx_frames(csk, 1);
373}
374
375static void abort_arp_failure(void *handle, struct sk_buff *skb)
376{
377 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
378 struct cpl_abort_req *req;
379
380 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
381 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
382 csk, csk->state, csk->flags, csk->tid);
383 req = (struct cpl_abort_req *)skb->data;
384 req->cmd = CPL_ABORT_NO_RST;
385 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
386}
387
388static void send_abort_req(struct cxgbi_sock *csk)
389{
390 struct cpl_abort_req *req;
391 struct sk_buff *skb = csk->cpl_abort_req;
392
393 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
394 return;
395 cxgbi_sock_set_state(csk, CTP_ABORTING);
396 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
397 cxgbi_sock_purge_write_queue(csk);
398
399 csk->cpl_abort_req = NULL;
400 req = (struct cpl_abort_req *)skb->head;
401 set_queue(skb, CPL_PRIORITY_DATA, csk);
402 req->cmd = CPL_ABORT_SEND_RST;
403 t4_set_arp_err_handler(skb, csk, abort_arp_failure);
404 INIT_TP_WR(req, csk->tid);
405 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
406 req->rsvd0 = htonl(csk->snd_nxt);
407 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
408
409 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
410 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
411 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
412 req->rsvd1);
413
414 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
415}
416
417static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
418{
419 struct sk_buff *skb = csk->cpl_abort_rpl;
420 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
421
422 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
423 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
424 csk, csk->state, csk->flags, csk->tid, rst_status);
425
426 csk->cpl_abort_rpl = NULL;
427 set_queue(skb, CPL_PRIORITY_DATA, csk);
428 INIT_TP_WR(rpl, csk->tid);
429 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
430 rpl->cmd = rst_status;
431 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
432}
433
434/*
435 * CPL connection rx data ack: host ->
436 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
437 * credits sent.
438 */
439static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
440{
441 struct sk_buff *skb;
442 struct cpl_rx_data_ack *req;
443
444 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
445 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
446 csk, csk->state, csk->flags, csk->tid, credits);
447
24d3f95a 448 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
7b36b6e0 449 if (!skb) {
450 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
451 return 0;
452 }
453 req = (struct cpl_rx_data_ack *)skb->head;
454
455 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
456 INIT_TP_WR(req, csk->tid);
457 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
458 csk->tid));
d7990b0c
AB
459 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
460 | RX_FORCE_ACK_F);
7b36b6e0 461 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
462 return credits;
463}
464
465/*
466 * sgl_len - calculates the size of an SGL of the given capacity
467 * @n: the number of SGL entries
468 * Calculates the number of flits needed for a scatter/gather list that
469 * can hold the given number of entries.
470 */
471static inline unsigned int sgl_len(unsigned int n)
472{
473 n--;
474 return (3 * n) / 2 + (n & 1) + 2;
475}
476
477/*
478 * calc_tx_flits_ofld - calculate # of flits for an offload packet
479 * @skb: the packet
480 *
481 * Returns the number of flits needed for the given offload packet.
482 * These packets are already fully constructed and no additional headers
483 * will be added.
484 */
485static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
486{
487 unsigned int flits, cnt;
488
489 if (is_ofld_imm(skb))
490 return DIV_ROUND_UP(skb->len, 8);
491 flits = skb_transport_offset(skb) / 8;
492 cnt = skb_shinfo(skb)->nr_frags;
499e2e6f 493 if (skb_tail_pointer(skb) != skb_transport_header(skb))
7b36b6e0 494 cnt++;
495 return flits + sgl_len(cnt);
496}
497
498static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
499{
500 struct sk_buff *skb;
501 struct fw_flowc_wr *flowc;
502 int flowclen, i;
503
504 flowclen = 80;
24d3f95a 505 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
7b36b6e0 506 flowc = (struct fw_flowc_wr *)skb->head;
507 flowc->op_to_nparams =
e2ac9628 508 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(8));
7b36b6e0 509 flowc->flowid_len16 =
e2ac9628
HS
510 htonl(FW_WR_LEN16_V(DIV_ROUND_UP(72, 16)) |
511 FW_WR_FLOWID_V(csk->tid));
7b36b6e0 512 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
e27d6169 513 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
7b36b6e0 514 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
515 flowc->mnemval[1].val = htonl(csk->tx_chan);
516 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
517 flowc->mnemval[2].val = htonl(csk->tx_chan);
518 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
519 flowc->mnemval[3].val = htonl(csk->rss_qid);
520 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
521 flowc->mnemval[4].val = htonl(csk->snd_nxt);
522 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
523 flowc->mnemval[5].val = htonl(csk->rcv_nxt);
524 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
525 flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
526 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
527 flowc->mnemval[7].val = htonl(csk->advmss);
528 flowc->mnemval[8].mnemonic = 0;
529 flowc->mnemval[8].val = 0;
530 for (i = 0; i < 9; i++) {
531 flowc->mnemval[i].r4[0] = 0;
532 flowc->mnemval[i].r4[1] = 0;
533 flowc->mnemval[i].r4[2] = 0;
534 }
535 set_queue(skb, CPL_PRIORITY_DATA, csk);
536
537 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
538 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
539 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
540 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
541 csk->advmss);
542
543 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
544}
545
546static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
547 int dlen, int len, u32 credits, int compl)
548{
549 struct fw_ofld_tx_data_wr *req;
550 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
e2ac9628 551 unsigned int wr_ulp_mode = 0, val;
7b36b6e0 552
553 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
554
555 if (is_ofld_imm(skb)) {
e2ac9628
HS
556 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
557 FW_WR_COMPL_F |
558 FW_WR_IMMDLEN_V(dlen));
559 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
560 FW_WR_LEN16_V(credits));
7b36b6e0 561 } else {
562 req->op_to_immdlen =
e2ac9628
HS
563 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
564 FW_WR_COMPL_F |
565 FW_WR_IMMDLEN_V(0));
7b36b6e0 566 req->flowid_len16 =
e2ac9628
HS
567 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
568 FW_WR_LEN16_V(credits));
7b36b6e0 569 }
570 if (submode)
e2ac9628
HS
571 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
572 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
573 val = skb_peek(&csk->write_queue) ? 0 : 1;
6aca4112 574 req->tunnel_to_proxy = htonl(wr_ulp_mode |
e2ac9628 575 FW_OFLD_TX_DATA_WR_SHOVE_V(val));
7b36b6e0 576 req->plen = htonl(len);
577 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
578 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
579}
580
581static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
582{
583 kfree_skb(skb);
584}
585
586static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
587{
588 int total_size = 0;
589 struct sk_buff *skb;
590
591 if (unlikely(csk->state < CTP_ESTABLISHED ||
592 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
593 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
594 1 << CXGBI_DBG_PDU_TX,
595 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
596 csk, csk->state, csk->flags, csk->tid);
597 return 0;
598 }
599
600 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
601 int dlen = skb->len;
602 int len = skb->len;
603 unsigned int credits_needed;
604
605 skb_reset_transport_header(skb);
606 if (is_ofld_imm(skb))
84944d8c 607 credits_needed = DIV_ROUND_UP(dlen, 16);
7b36b6e0 608 else
84944d8c
KX
609 credits_needed = DIV_ROUND_UP(
610 8 * calc_tx_flits_ofld(skb),
611 16);
612
613 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
614 credits_needed += DIV_ROUND_UP(
615 sizeof(struct fw_ofld_tx_data_wr),
7b36b6e0 616 16);
617
618 if (csk->wr_cred < credits_needed) {
619 log_debug(1 << CXGBI_DBG_PDU_TX,
620 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
621 csk, skb->len, skb->data_len,
622 credits_needed, csk->wr_cred);
623 break;
624 }
625 __skb_unlink(skb, &csk->write_queue);
626 set_queue(skb, CPL_PRIORITY_DATA, csk);
627 skb->csum = credits_needed;
628 csk->wr_cred -= credits_needed;
629 csk->wr_una_cred += credits_needed;
630 cxgbi_sock_enqueue_wr(csk, skb);
631
632 log_debug(1 << CXGBI_DBG_PDU_TX,
633 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
634 csk, skb->len, skb->data_len, credits_needed,
635 csk->wr_cred, csk->wr_una_cred);
636
637 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
638 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
639 send_tx_flowc_wr(csk);
640 skb->csum += 5;
641 csk->wr_cred -= 5;
642 csk->wr_una_cred += 5;
643 }
644 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
645 make_tx_data_wr(csk, skb, dlen, len, credits_needed,
646 req_completion);
647 csk->snd_nxt += len;
648 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
649 }
650 total_size += skb->truesize;
651 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
652
653 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
654 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
655 csk, csk->state, csk->flags, csk->tid, skb, len);
656
657 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
658 }
659 return total_size;
660}
661
662static inline void free_atid(struct cxgbi_sock *csk)
663{
664 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
665
666 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
667 cxgb4_free_atid(lldi->tids, csk->atid);
668 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
669 cxgbi_sock_put(csk);
670 }
671}
672
673static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
674{
675 struct cxgbi_sock *csk;
676 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
677 unsigned short tcp_opt = ntohs(req->tcp_opt);
678 unsigned int tid = GET_TID(req);
679 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
680 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
681 struct tid_info *t = lldi->tids;
682 u32 rcv_isn = be32_to_cpu(req->rcv_isn);
683
684 csk = lookup_atid(t, atid);
685 if (unlikely(!csk)) {
686 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
687 goto rel_skb;
688 }
689
e27d6169 690 if (csk->atid != atid) {
691 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
692 atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
693 goto rel_skb;
694 }
695
759a0cc5
AB
696 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
697 (&csk->saddr), (&csk->daddr),
698 atid, tid, csk, csk->state, csk->flags, rcv_isn);
699
700 module_put(THIS_MODULE);
7b36b6e0 701
702 cxgbi_sock_get(csk);
703 csk->tid = tid;
704 cxgb4_insert_tid(lldi->tids, csk, tid);
705 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
706
707 free_atid(csk);
708
709 spin_lock_bh(&csk->lock);
710 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
711 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
712 csk, csk->state, csk->flags, csk->tid);
713
714 if (csk->retry_timer.function) {
715 del_timer(&csk->retry_timer);
716 csk->retry_timer.function = NULL;
717 }
718
719 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
720 /*
721 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
722 * pass through opt0.
723 */
724 if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
725 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
726
727 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
728 if (GET_TCPOPT_TSTAMP(tcp_opt))
729 csk->advmss -= 12;
730 if (csk->advmss < 128)
731 csk->advmss = 128;
732
733 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
734 "csk 0x%p, mss_idx %u, advmss %u.\n",
735 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
736
737 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
738
739 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
740 send_abort_req(csk);
741 else {
742 if (skb_queue_len(&csk->write_queue))
743 push_tx_frames(csk, 0);
744 cxgbi_conn_tx_open(csk);
745 }
746 spin_unlock_bh(&csk->lock);
747
748rel_skb:
749 __kfree_skb(skb);
750}
751
752static int act_open_rpl_status_to_errno(int status)
753{
754 switch (status) {
755 case CPL_ERR_CONN_RESET:
756 return -ECONNREFUSED;
757 case CPL_ERR_ARP_MISS:
758 return -EHOSTUNREACH;
759 case CPL_ERR_CONN_TIMEDOUT:
760 return -ETIMEDOUT;
761 case CPL_ERR_TCAM_FULL:
762 return -ENOMEM;
763 case CPL_ERR_CONN_EXIST:
764 return -EADDRINUSE;
765 default:
766 return -EIO;
767 }
768}
769
770static void csk_act_open_retry_timer(unsigned long data)
771{
001586a7 772 struct sk_buff *skb = NULL;
7b36b6e0 773 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
3bd3e8bf 774 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
759a0cc5
AB
775 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
776 struct l2t_entry *);
777 int t4 = is_t4(lldi->adapter_type), size, size6;
7b36b6e0 778
779 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
780 "csk 0x%p,%u,0x%lx,%u.\n",
781 csk, csk->state, csk->flags, csk->tid);
782
783 cxgbi_sock_get(csk);
784 spin_lock_bh(&csk->lock);
759a0cc5
AB
785
786 if (t4) {
787 size = sizeof(struct cpl_act_open_req);
788 size6 = sizeof(struct cpl_act_open_req6);
789 } else {
790 size = sizeof(struct cpl_t5_act_open_req);
791 size6 = sizeof(struct cpl_t5_act_open_req6);
792 }
793
794 if (csk->csk_family == AF_INET) {
795 send_act_open_func = send_act_open_req;
796 skb = alloc_wr(size, 0, GFP_ATOMIC);
f42bb57c 797#if IS_ENABLED(CONFIG_IPV6)
759a0cc5
AB
798 } else {
799 send_act_open_func = send_act_open_req6;
800 skb = alloc_wr(size6, 0, GFP_ATOMIC);
f42bb57c 801#endif
759a0cc5
AB
802 }
803
7b36b6e0 804 if (!skb)
805 cxgbi_sock_fail_act_open(csk, -ENOMEM);
806 else {
807 skb->sk = (struct sock *)csk;
808 t4_set_arp_err_handler(skb, csk,
759a0cc5
AB
809 cxgbi_sock_act_open_req_arp_failure);
810 send_act_open_func(csk, skb, csk->l2t);
7b36b6e0 811 }
759a0cc5 812
7b36b6e0 813 spin_unlock_bh(&csk->lock);
814 cxgbi_sock_put(csk);
759a0cc5 815
7b36b6e0 816}
817
818static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
819{
820 struct cxgbi_sock *csk;
821 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
822 unsigned int tid = GET_TID(rpl);
823 unsigned int atid =
824 GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
825 unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
826 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
827 struct tid_info *t = lldi->tids;
828
829 csk = lookup_atid(t, atid);
830 if (unlikely(!csk)) {
831 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
832 goto rel_skb;
833 }
834
759a0cc5
AB
835 pr_info_ipaddr("tid %u/%u, status %u.\n"
836 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
837 atid, tid, status, csk, csk->state, csk->flags);
7b36b6e0 838
150cca7c
KX
839 if (status == CPL_ERR_RTX_NEG_ADVICE)
840 goto rel_skb;
841
ee7255ad
AB
842 module_put(THIS_MODULE);
843
7b36b6e0 844 if (status && status != CPL_ERR_TCAM_FULL &&
845 status != CPL_ERR_CONN_EXIST &&
846 status != CPL_ERR_ARP_MISS)
847 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
848
849 cxgbi_sock_get(csk);
850 spin_lock_bh(&csk->lock);
851
852 if (status == CPL_ERR_CONN_EXIST &&
853 csk->retry_timer.function != csk_act_open_retry_timer) {
854 csk->retry_timer.function = csk_act_open_retry_timer;
855 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
856 } else
857 cxgbi_sock_fail_act_open(csk,
858 act_open_rpl_status_to_errno(status));
859
860 spin_unlock_bh(&csk->lock);
861 cxgbi_sock_put(csk);
862rel_skb:
863 __kfree_skb(skb);
864}
865
866static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
867{
868 struct cxgbi_sock *csk;
869 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
870 unsigned int tid = GET_TID(req);
871 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
872 struct tid_info *t = lldi->tids;
873
874 csk = lookup_tid(t, tid);
875 if (unlikely(!csk)) {
876 pr_err("can't find connection for tid %u.\n", tid);
877 goto rel_skb;
878 }
759a0cc5
AB
879 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
880 (&csk->saddr), (&csk->daddr),
881 csk, csk->state, csk->flags, csk->tid);
7b36b6e0 882 cxgbi_sock_rcv_peer_close(csk);
883rel_skb:
884 __kfree_skb(skb);
885}
886
887static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
888{
889 struct cxgbi_sock *csk;
890 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
891 unsigned int tid = GET_TID(rpl);
892 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
893 struct tid_info *t = lldi->tids;
894
895 csk = lookup_tid(t, tid);
896 if (unlikely(!csk)) {
897 pr_err("can't find connection for tid %u.\n", tid);
898 goto rel_skb;
899 }
759a0cc5
AB
900 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
901 (&csk->saddr), (&csk->daddr),
902 csk, csk->state, csk->flags, csk->tid);
7b36b6e0 903 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
904rel_skb:
905 __kfree_skb(skb);
906}
907
908static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
909 int *need_rst)
910{
911 switch (abort_reason) {
912 case CPL_ERR_BAD_SYN: /* fall through */
913 case CPL_ERR_CONN_RESET:
914 return csk->state > CTP_ESTABLISHED ?
915 -EPIPE : -ECONNRESET;
916 case CPL_ERR_XMIT_TIMEDOUT:
917 case CPL_ERR_PERSIST_TIMEDOUT:
918 case CPL_ERR_FINWAIT2_TIMEDOUT:
919 case CPL_ERR_KEEPALIVE_TIMEDOUT:
920 return -ETIMEDOUT;
921 default:
922 return -EIO;
923 }
924}
925
926static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
927{
928 struct cxgbi_sock *csk;
929 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
930 unsigned int tid = GET_TID(req);
931 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
932 struct tid_info *t = lldi->tids;
933 int rst_status = CPL_ABORT_NO_RST;
934
935 csk = lookup_tid(t, tid);
936 if (unlikely(!csk)) {
937 pr_err("can't find connection for tid %u.\n", tid);
938 goto rel_skb;
939 }
940
759a0cc5
AB
941 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
942 (&csk->saddr), (&csk->daddr),
943 csk, csk->state, csk->flags, csk->tid, req->status);
7b36b6e0 944
945 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
946 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
947 goto rel_skb;
948
949 cxgbi_sock_get(csk);
950 spin_lock_bh(&csk->lock);
951
7b07bf24
AB
952 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
953
954 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
955 send_tx_flowc_wr(csk);
956 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
7b36b6e0 957 }
958
7b07bf24
AB
959 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
960 cxgbi_sock_set_state(csk, CTP_ABORTING);
961
7b36b6e0 962 send_abort_rpl(csk, rst_status);
963
964 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
965 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
966 cxgbi_sock_closed(csk);
967 }
7b07bf24 968
7b36b6e0 969 spin_unlock_bh(&csk->lock);
970 cxgbi_sock_put(csk);
971rel_skb:
972 __kfree_skb(skb);
973}
974
975static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
976{
977 struct cxgbi_sock *csk;
978 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
979 unsigned int tid = GET_TID(rpl);
980 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
981 struct tid_info *t = lldi->tids;
982
983 csk = lookup_tid(t, tid);
984 if (!csk)
985 goto rel_skb;
986
759a0cc5
AB
987 if (csk)
988 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
989 (&csk->saddr), (&csk->daddr), csk,
990 csk->state, csk->flags, csk->tid, rpl->status);
7b36b6e0 991
992 if (rpl->status == CPL_ERR_ABORT_FAILED)
993 goto rel_skb;
994
995 cxgbi_sock_rcv_abort_rpl(csk);
996rel_skb:
997 __kfree_skb(skb);
998}
999
1000static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
1001{
1002 struct cxgbi_sock *csk;
1003 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1004 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1005 unsigned int tid = GET_TID(cpl);
1006 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1007 struct tid_info *t = lldi->tids;
7b36b6e0 1008
1009 csk = lookup_tid(t, tid);
1010 if (unlikely(!csk)) {
1011 pr_err("can't find conn. for tid %u.\n", tid);
1012 goto rel_skb;
1013 }
1014
1015 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1016 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1017 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1018 pdu_len_ddp);
1019
1020 spin_lock_bh(&csk->lock);
1021
1022 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1023 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1024 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1025 csk, csk->state, csk->flags, csk->tid);
1026 if (csk->state != CTP_ABORTING)
1027 goto abort_conn;
1028 else
1029 goto discard;
1030 }
1031
1032 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
e27d6169 1033 cxgbi_skcb_flags(skb) = 0;
1034
7b36b6e0 1035 skb_reset_transport_header(skb);
1036 __skb_pull(skb, sizeof(*cpl));
1037 __pskb_trim(skb, ntohs(cpl->len));
1038
1039 if (!csk->skb_ulp_lhdr) {
1040 unsigned char *bhs;
3bd3e8bf 1041 unsigned int hlen, dlen, plen;
7b36b6e0 1042
1043 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1044 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1045 csk, csk->state, csk->flags, csk->tid, skb);
1046 csk->skb_ulp_lhdr = skb;
e27d6169 1047 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
7b36b6e0 1048
e27d6169 1049 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
7b36b6e0 1050 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
e27d6169 1051 csk->tid, cxgbi_skcb_tcp_seq(skb),
7b36b6e0 1052 csk->rcv_nxt);
1053 goto abort_conn;
1054 }
1055
e27d6169 1056 bhs = skb->data;
7b36b6e0 1057 hlen = ntohs(cpl->len);
1058 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
1059
3bd3e8bf
KX
1060 plen = ISCSI_PDU_LEN(pdu_len_ddp);
1061 if (is_t4(lldi->adapter_type))
1062 plen -= 40;
1063
1064 if ((hlen + dlen) != plen) {
7b36b6e0 1065 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1066 "mismatch %u != %u + %u, seq 0x%x.\n",
3bd3e8bf
KX
1067 csk->tid, plen, hlen, dlen,
1068 cxgbi_skcb_tcp_seq(skb));
7b36b6e0 1069 goto abort_conn;
1070 }
1071
1072 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
1073 if (dlen)
1074 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1075 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1076
1077 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1078 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1079 csk, skb, *bhs, hlen, dlen,
1080 ntohl(*((unsigned int *)(bhs + 16))),
1081 ntohl(*((unsigned int *)(bhs + 24))));
1082
1083 } else {
e27d6169 1084 struct sk_buff *lskb = csk->skb_ulp_lhdr;
7b36b6e0 1085
e27d6169 1086 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
7b36b6e0 1087 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1088 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1089 csk, csk->state, csk->flags, skb, lskb);
1090 }
1091
1092 __skb_queue_tail(&csk->receive_queue, skb);
1093 spin_unlock_bh(&csk->lock);
1094 return;
1095
1096abort_conn:
1097 send_abort_req(csk);
1098discard:
1099 spin_unlock_bh(&csk->lock);
1100rel_skb:
1101 __kfree_skb(skb);
1102}
1103
1104static void do_rx_data_ddp(struct cxgbi_device *cdev,
1105 struct sk_buff *skb)
1106{
1107 struct cxgbi_sock *csk;
1108 struct sk_buff *lskb;
1109 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1110 unsigned int tid = GET_TID(rpl);
1111 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1112 struct tid_info *t = lldi->tids;
1113 unsigned int status = ntohl(rpl->ddpvld);
1114
1115 csk = lookup_tid(t, tid);
1116 if (unlikely(!csk)) {
1117 pr_err("can't find connection for tid %u.\n", tid);
1118 goto rel_skb;
1119 }
1120
1121 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1122 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1123 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
1124
1125 spin_lock_bh(&csk->lock);
1126
1127 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1128 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1129 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1130 csk, csk->state, csk->flags, csk->tid);
1131 if (csk->state != CTP_ABORTING)
1132 goto abort_conn;
1133 else
1134 goto discard;
1135 }
1136
1137 if (!csk->skb_ulp_lhdr) {
1138 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1139 goto abort_conn;
1140 }
1141
1142 lskb = csk->skb_ulp_lhdr;
1143 csk->skb_ulp_lhdr = NULL;
1144
7b36b6e0 1145 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1146
1147 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1148 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1149 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1150
1151 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
e27d6169 1152 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1153 csk, lskb, status, cxgbi_skcb_flags(lskb));
7b36b6e0 1154 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
1155 }
1156 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
e27d6169 1157 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1158 csk, lskb, status, cxgbi_skcb_flags(lskb));
7b36b6e0 1159 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1160 }
1161 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1162 log_debug(1 << CXGBI_DBG_PDU_RX,
1163 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1164 csk, lskb, status);
1165 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1166 }
1167 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1168 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1169 log_debug(1 << CXGBI_DBG_PDU_RX,
1170 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1171 csk, lskb, status);
1172 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1173 }
1174 log_debug(1 << CXGBI_DBG_PDU_RX,
1175 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1176 csk, lskb, cxgbi_skcb_flags(lskb));
1177
e27d6169 1178 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
7b36b6e0 1179 cxgbi_conn_pdu_ready(csk);
1180 spin_unlock_bh(&csk->lock);
1181 goto rel_skb;
1182
1183abort_conn:
1184 send_abort_req(csk);
1185discard:
1186 spin_unlock_bh(&csk->lock);
1187rel_skb:
1188 __kfree_skb(skb);
1189}
1190
1191static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1192{
1193 struct cxgbi_sock *csk;
1194 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1195 unsigned int tid = GET_TID(rpl);
1196 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1197 struct tid_info *t = lldi->tids;
1198
1199 csk = lookup_tid(t, tid);
1200 if (unlikely(!csk))
1201 pr_err("can't find connection for tid %u.\n", tid);
1202 else {
1203 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1204 "csk 0x%p,%u,0x%lx,%u.\n",
1205 csk, csk->state, csk->flags, csk->tid);
1206 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1207 rpl->seq_vld);
1208 }
1209 __kfree_skb(skb);
1210}
1211
1212static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1213{
1214 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1215 unsigned int tid = GET_TID(rpl);
1216 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1217 struct tid_info *t = lldi->tids;
1218 struct cxgbi_sock *csk;
1219
1220 csk = lookup_tid(t, tid);
1221 if (!csk)
1222 pr_err("can't find conn. for tid %u.\n", tid);
1223
1224 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1225 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1226 csk, csk->state, csk->flags, csk->tid, rpl->status);
1227
1228 if (rpl->status != CPL_ERR_NONE)
1229 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1230 csk, tid, rpl->status);
1231
1232 __kfree_skb(skb);
1233}
1234
1235static int alloc_cpls(struct cxgbi_sock *csk)
1236{
24d3f95a 1237 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1238 0, GFP_KERNEL);
7b36b6e0 1239 if (!csk->cpl_close)
1240 return -ENOMEM;
1241
24d3f95a 1242 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1243 0, GFP_KERNEL);
7b36b6e0 1244 if (!csk->cpl_abort_req)
1245 goto free_cpls;
1246
24d3f95a 1247 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1248 0, GFP_KERNEL);
7b36b6e0 1249 if (!csk->cpl_abort_rpl)
1250 goto free_cpls;
1251 return 0;
1252
1253free_cpls:
1254 cxgbi_sock_free_cpl_skbs(csk);
1255 return -ENOMEM;
1256}
1257
1258static inline void l2t_put(struct cxgbi_sock *csk)
1259{
1260 if (csk->l2t) {
1261 cxgb4_l2t_release(csk->l2t);
1262 csk->l2t = NULL;
1263 cxgbi_sock_put(csk);
1264 }
1265}
1266
1267static void release_offload_resources(struct cxgbi_sock *csk)
1268{
1269 struct cxgb4_lld_info *lldi;
1270
1271 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1272 "csk 0x%p,%u,0x%lx,%u.\n",
1273 csk, csk->state, csk->flags, csk->tid);
1274
1275 cxgbi_sock_free_cpl_skbs(csk);
1276 if (csk->wr_cred != csk->wr_max_cred) {
1277 cxgbi_sock_purge_wr_queue(csk);
1278 cxgbi_sock_reset_wr_list(csk);
1279 }
1280
1281 l2t_put(csk);
1282 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1283 free_atid(csk);
1284 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1285 lldi = cxgbi_cdev_priv(csk->cdev);
1286 cxgb4_remove_tid(lldi->tids, 0, csk->tid);
1287 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1288 cxgbi_sock_put(csk);
1289 }
1290 csk->dst = NULL;
1291 csk->cdev = NULL;
1292}
1293
1294static int init_act_open(struct cxgbi_sock *csk)
1295{
1296 struct cxgbi_device *cdev = csk->cdev;
1297 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1298 struct net_device *ndev = cdev->ports[csk->port_id];
7b36b6e0 1299 struct sk_buff *skb = NULL;
759a0cc5
AB
1300 struct neighbour *n = NULL;
1301 void *daddr;
7b36b6e0 1302 unsigned int step;
759a0cc5
AB
1303 unsigned int size, size6;
1304 int t4 = is_t4(lldi->adapter_type);
7b36b6e0 1305
1306 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1307 "csk 0x%p,%u,0x%lx,%u.\n",
1308 csk, csk->state, csk->flags, csk->tid);
1309
759a0cc5
AB
1310 if (csk->csk_family == AF_INET)
1311 daddr = &csk->daddr.sin_addr.s_addr;
e81fbf6c
AB
1312#if IS_ENABLED(CONFIG_IPV6)
1313 else if (csk->csk_family == AF_INET6)
759a0cc5 1314 daddr = &csk->daddr6.sin6_addr;
e81fbf6c
AB
1315#endif
1316 else {
1317 pr_err("address family 0x%x not supported\n", csk->csk_family);
1318 goto rel_resource;
1319 }
759a0cc5
AB
1320
1321 n = dst_neigh_lookup(csk->dst, daddr);
1322
1323 if (!n) {
1324 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1325 goto rel_resource;
1326 }
1327
7b36b6e0 1328 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1329 if (csk->atid < 0) {
1330 pr_err("%s, NO atid available.\n", ndev->name);
1331 return -EINVAL;
1332 }
1333 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1334 cxgbi_sock_get(csk);
1335
51e059bd 1336 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
7b36b6e0 1337 if (!csk->l2t) {
1338 pr_err("%s, cannot alloc l2t.\n", ndev->name);
1339 goto rel_resource;
1340 }
1341 cxgbi_sock_get(csk);
1342
759a0cc5
AB
1343 if (t4) {
1344 size = sizeof(struct cpl_act_open_req);
1345 size6 = sizeof(struct cpl_act_open_req6);
1346 } else {
1347 size = sizeof(struct cpl_t5_act_open_req);
1348 size6 = sizeof(struct cpl_t5_act_open_req6);
1349 }
1350
1351 if (csk->csk_family == AF_INET)
1352 skb = alloc_wr(size, 0, GFP_NOIO);
f42bb57c 1353#if IS_ENABLED(CONFIG_IPV6)
759a0cc5
AB
1354 else
1355 skb = alloc_wr(size6, 0, GFP_NOIO);
f42bb57c 1356#endif
759a0cc5 1357
7b36b6e0 1358 if (!skb)
1359 goto rel_resource;
1360 skb->sk = (struct sock *)csk;
1361 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1362
1363 if (!csk->mtu)
1364 csk->mtu = dst_mtu(csk->dst);
1365 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1366 csk->tx_chan = cxgb4_port_chan(ndev);
1367 /* SMT two entries per row */
1368 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
1369 step = lldi->ntxq / lldi->nchan;
1370 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1371 step = lldi->nrxq / lldi->nchan;
1372 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
759a0cc5
AB
1373 csk->wr_cred = lldi->wr_cred -
1374 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1375 csk->wr_max_cred = csk->wr_cred;
7b36b6e0 1376 csk->wr_una_cred = 0;
1377 cxgbi_sock_reset_wr_list(csk);
1378 csk->err = 0;
7b36b6e0 1379
759a0cc5
AB
1380 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1381 (&csk->saddr), (&csk->daddr), csk, csk->state,
1382 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1383 csk->mtu, csk->mss_idx, csk->smac_idx);
1384
1385 /* must wait for either a act_open_rpl or act_open_establish */
1386 try_module_get(THIS_MODULE);
7b36b6e0 1387 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
759a0cc5
AB
1388 if (csk->csk_family == AF_INET)
1389 send_act_open_req(csk, skb, csk->l2t);
f42bb57c 1390#if IS_ENABLED(CONFIG_IPV6)
759a0cc5
AB
1391 else
1392 send_act_open_req6(csk, skb, csk->l2t);
f42bb57c 1393#endif
c4737377 1394 neigh_release(n);
759a0cc5 1395
7b36b6e0 1396 return 0;
1397
1398rel_resource:
c4737377
DM
1399 if (n)
1400 neigh_release(n);
7b36b6e0 1401 if (skb)
1402 __kfree_skb(skb);
1403 return -EINVAL;
1404}
1405
1406cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1407 [CPL_ACT_ESTABLISH] = do_act_establish,
1408 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1409 [CPL_PEER_CLOSE] = do_peer_close,
1410 [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1411 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1412 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1413 [CPL_FW4_ACK] = do_fw4_ack,
1414 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
3bd3e8bf 1415 [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
7b36b6e0 1416 [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1417 [CPL_RX_DATA_DDP] = do_rx_data_ddp,
3bd3e8bf 1418 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
7b36b6e0 1419};
1420
1421int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1422{
1423 int rc;
1424
1425 if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1426 cxgb4i_max_connect = CXGB4I_MAX_CONN;
1427
1428 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1429 cxgb4i_max_connect);
1430 if (rc < 0)
1431 return rc;
1432
1433 cdev->csk_release_offload_resources = release_offload_resources;
1434 cdev->csk_push_tx_frames = push_tx_frames;
1435 cdev->csk_send_abort_req = send_abort_req;
1436 cdev->csk_send_close_req = send_close_req;
1437 cdev->csk_send_rx_credits = send_rx_credits;
1438 cdev->csk_alloc_cpls = alloc_cpls;
1439 cdev->csk_init_act_open = init_act_open;
1440
1441 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1442 return 0;
1443}
1444
1445/*
1446 * functions to program the pagepod in h/w
1447 */
e27d6169 1448#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
3bd3e8bf
KX
1449static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
1450 struct ulp_mem_io *req,
e27d6169 1451 unsigned int wr_len, unsigned int dlen,
1452 unsigned int pm_addr)
7b36b6e0 1453{
e27d6169 1454 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
7b36b6e0 1455
1456 INIT_ULPTX_WR(req, wr_len, 0, 0);
3bd3e8bf 1457 if (is_t4(lldi->adapter_type))
d7990b0c
AB
1458 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1459 (ULP_MEMIO_ORDER_F));
3bd3e8bf 1460 else
d7990b0c
AB
1461 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1462 (T5_ULP_MEMIO_IMM_F));
1463 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1464 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
7b36b6e0 1465 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
e27d6169 1466
d7990b0c 1467 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
e27d6169 1468 idata->len = htonl(dlen);
7b36b6e0 1469}
1470
e27d6169 1471static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
7b36b6e0 1472 struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
1473 unsigned int npods,
1474 struct cxgbi_gather_list *gl,
1475 unsigned int gl_pidx)
1476{
1477 struct cxgbi_ddp_info *ddp = cdev->ddp;
3bd3e8bf 1478 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
7b36b6e0 1479 struct sk_buff *skb;
1480 struct ulp_mem_io *req;
e27d6169 1481 struct ulptx_idata *idata;
7b36b6e0 1482 struct cxgbi_pagepod *ppod;
e27d6169 1483 unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
1484 unsigned int dlen = PPOD_SIZE * npods;
1485 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1486 sizeof(struct ulptx_idata) + dlen, 16);
7b36b6e0 1487 unsigned int i;
1488
e27d6169 1489 skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
7b36b6e0 1490 if (!skb) {
1491 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1492 cdev, idx, npods);
1493 return -ENOMEM;
1494 }
1495 req = (struct ulp_mem_io *)skb->head;
1496 set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
1497
3bd3e8bf 1498 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
e27d6169 1499 idata = (struct ulptx_idata *)(req + 1);
1500 ppod = (struct cxgbi_pagepod *)(idata + 1);
7b36b6e0 1501
1502 for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
1503 if (!hdr && !gl)
1504 cxgbi_ddp_ppod_clear(ppod);
1505 else
1506 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
1507 }
1508
1509 cxgb4_ofld_send(cdev->ports[port_id], skb);
1510 return 0;
1511}
1512
1513static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1514 unsigned int idx, unsigned int npods,
1515 struct cxgbi_gather_list *gl)
1516{
1517 unsigned int i, cnt;
1518 int err = 0;
1519
1520 for (i = 0; i < npods; i += cnt, idx += cnt) {
1521 cnt = npods - i;
e27d6169 1522 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1523 cnt = ULPMEM_IDATA_MAX_NPPODS;
1524 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
7b36b6e0 1525 idx, cnt, gl, 4 * i);
1526 if (err < 0)
1527 break;
1528 }
1529 return err;
1530}
1531
1532static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1533 unsigned int idx, unsigned int npods)
1534{
1535 unsigned int i, cnt;
1536 int err;
1537
1538 for (i = 0; i < npods; i += cnt, idx += cnt) {
1539 cnt = npods - i;
e27d6169 1540 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1541 cnt = ULPMEM_IDATA_MAX_NPPODS;
1542 err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
7b36b6e0 1543 idx, cnt, NULL, 0);
1544 if (err < 0)
1545 break;
1546 }
1547}
1548
1549static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1550 int pg_idx, bool reply)
1551{
1552 struct sk_buff *skb;
1553 struct cpl_set_tcb_field *req;
7b36b6e0 1554
e27d6169 1555 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
7b36b6e0 1556 return 0;
1557
24d3f95a 1558 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
7b36b6e0 1559 if (!skb)
1560 return -ENOMEM;
1561
e27d6169 1562 /* set up ulp page size */
7b36b6e0 1563 req = (struct cpl_set_tcb_field *)skb->head;
1564 INIT_TP_WR(req, csk->tid);
1565 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1566 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
e27d6169 1567 req->word_cookie = htons(0);
1568 req->mask = cpu_to_be64(0x3 << 8);
1569 req->val = cpu_to_be64(pg_idx << 8);
7b36b6e0 1570 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1571
1572 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1573 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1574
1575 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1576 return 0;
1577}
1578
1579static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1580 int hcrc, int dcrc, int reply)
1581{
1582 struct sk_buff *skb;
1583 struct cpl_set_tcb_field *req;
7b36b6e0 1584
e27d6169 1585 if (!hcrc && !dcrc)
1586 return 0;
7b36b6e0 1587
24d3f95a 1588 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
7b36b6e0 1589 if (!skb)
1590 return -ENOMEM;
1591
1592 csk->hcrc_len = (hcrc ? 4 : 0);
1593 csk->dcrc_len = (dcrc ? 4 : 0);
e27d6169 1594 /* set up ulp submode */
7b36b6e0 1595 req = (struct cpl_set_tcb_field *)skb->head;
1596 INIT_TP_WR(req, tid);
1597 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1598 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
e27d6169 1599 req->word_cookie = htons(0);
1600 req->mask = cpu_to_be64(0x3 << 4);
1601 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1602 (dcrc ? ULP_CRC_DATA : 0)) << 4);
7b36b6e0 1603 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1604
1605 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1606 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1607
1608 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1609 return 0;
1610}
1611
1612static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1613{
1614 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1615 struct cxgbi_ddp_info *ddp = cdev->ddp;
1616 unsigned int tagmask, pgsz_factor[4];
1617 int err;
1618
1619 if (ddp) {
1620 kref_get(&ddp->refcnt);
1621 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1622 cdev, cdev->ddp);
1623 return -EALREADY;
1624 }
1625
1626 err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
1627 lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
1628 lldi->iscsi_iolen, lldi->iscsi_iolen);
1629 if (err < 0)
1630 return err;
1631
1632 ddp = cdev->ddp;
1633
1634 tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1635 cxgbi_ddp_page_size_factor(pgsz_factor);
1636 cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
1637
7b36b6e0 1638 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1639 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1640 cdev->csk_ddp_set = ddp_set_map;
1641 cdev->csk_ddp_clear = ddp_clear_map;
1642
1643 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1644 cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
1645 cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
1646 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1647 " %u/%u.\n",
1648 cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1649 ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
1650 ddp->max_rxsz, lldi->iscsi_iolen);
1651 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1652 cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
1653 ddp->max_rxsz);
1654 return 0;
1655}
1656
1657static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1658{
1659 struct cxgbi_device *cdev;
1660 struct port_info *pi;
1661 int i, rc;
1662
1663 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
1664 if (!cdev) {
1665 pr_info("t4 device 0x%p, register failed.\n", lldi);
1666 return NULL;
1667 }
1668 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1669 cdev, lldi->adapter_type, lldi->nports,
1670 lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
1671 lldi->nrxq, lldi->wr_cred);
1672 for (i = 0; i < lldi->nrxq; i++)
1673 log_debug(1 << CXGBI_DBG_DEV,
1674 "t4 0x%p, rxq id #%d: %u.\n",
1675 cdev, i, lldi->rxq_ids[i]);
1676
1677 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
1678 cdev->flags = CXGBI_FLAG_DEV_T4;
1679 cdev->pdev = lldi->pdev;
1680 cdev->ports = lldi->ports;
1681 cdev->nports = lldi->nports;
1682 cdev->mtus = lldi->mtus;
1683 cdev->nmtus = NMTUS;
1684 cdev->snd_win = cxgb4i_snd_win;
1685 cdev->rcv_win = cxgb4i_rcv_win;
1686 cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1687 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1688 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1689 cdev->itp = &cxgb4i_iscsi_transport;
1690
d7990b0c
AB
1691 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
1692 << FW_VIID_PFN_S;
e27d6169 1693 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1694 cdev, lldi->ports[0]->name, cdev->pfvf);
1695
7b36b6e0 1696 rc = cxgb4i_ddp_init(cdev);
1697 if (rc) {
1698 pr_info("t4 0x%p ddp init failed.\n", cdev);
1699 goto err_out;
1700 }
1701 rc = cxgb4i_ofld_init(cdev);
1702 if (rc) {
1703 pr_info("t4 0x%p ofld init failed.\n", cdev);
1704 goto err_out;
1705 }
1706
1707 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
1708 &cxgb4i_host_template, cxgb4i_stt);
1709 if (rc)
1710 goto err_out;
1711
1712 for (i = 0; i < cdev->nports; i++) {
1713 pi = netdev_priv(lldi->ports[i]);
1714 cdev->hbas[i]->port_id = pi->port_id;
1715 }
1716 return cdev;
1717
1718err_out:
1719 cxgbi_device_unregister(cdev);
1720 return ERR_PTR(-ENOMEM);
1721}
1722
1723#define RX_PULL_LEN 128
1724static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
1725 const struct pkt_gl *pgl)
1726{
1727 const struct cpl_act_establish *rpl;
1728 struct sk_buff *skb;
1729 unsigned int opc;
1730 struct cxgbi_device *cdev = handle;
1731
1732 if (pgl == NULL) {
1733 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1734
24d3f95a 1735 skb = alloc_wr(len, 0, GFP_ATOMIC);
7b36b6e0 1736 if (!skb)
1737 goto nomem;
1738 skb_copy_to_linear_data(skb, &rsp[1], len);
1739 } else {
1740 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
1741 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1742 pgl->va, be64_to_cpu(*rsp),
1743 be64_to_cpu(*(u64 *)pgl->va),
1744 pgl->tot_len);
1745 return 0;
1746 }
1747 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
1748 if (unlikely(!skb))
1749 goto nomem;
1750 }
1751
1752 rpl = (struct cpl_act_establish *)skb->data;
1753 opc = rpl->ot.opcode;
1754 log_debug(1 << CXGBI_DBG_TOE,
1755 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1756 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
1757 if (cxgb4i_cplhandlers[opc])
1758 cxgb4i_cplhandlers[opc](cdev, skb);
1759 else {
1760 pr_err("No handler for opcode 0x%x.\n", opc);
1761 __kfree_skb(skb);
1762 }
1763 return 0;
1764nomem:
1765 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
1766 return 1;
1767}
1768
1769static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1770{
1771 struct cxgbi_device *cdev = handle;
1772
1773 switch (state) {
1774 case CXGB4_STATE_UP:
1775 pr_info("cdev 0x%p, UP.\n", cdev);
7b36b6e0 1776 break;
1777 case CXGB4_STATE_START_RECOVERY:
1778 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
1779 /* close all connections */
1780 break;
1781 case CXGB4_STATE_DOWN:
1782 pr_info("cdev 0x%p, DOWN.\n", cdev);
1783 break;
1784 case CXGB4_STATE_DETACH:
1785 pr_info("cdev 0x%p, DETACH.\n", cdev);
c3b331a3 1786 cxgbi_device_unregister(cdev);
7b36b6e0 1787 break;
1788 default:
1789 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
1790 break;
1791 }
1792 return 0;
1793}
1794
1795static int __init cxgb4i_init_module(void)
1796{
1797 int rc;
1798
1799 printk(KERN_INFO "%s", version);
1800
1801 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1802 if (rc < 0)
1803 return rc;
1804 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
759a0cc5 1805
7b36b6e0 1806 return 0;
1807}
1808
1809static void __exit cxgb4i_exit_module(void)
1810{
1811 cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1812 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1813 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1814}
1815
1816module_init(cxgb4i_init_module);
1817module_exit(cxgb4i_exit_module);
This page took 0.342325 seconds and 5 git commands to generate.