latent_entropy: Mark functions with __latent_entropy
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qed / qed_sp_commands.c
1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/bitops.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include "qed.h"
16 #include <linux/qed/qed_chain.h>
17 #include "qed_cxt.h"
18 #include "qed_dcbx.h"
19 #include "qed_hsi.h"
20 #include "qed_hw.h"
21 #include "qed_int.h"
22 #include "qed_reg_addr.h"
23 #include "qed_sp.h"
24 #include "qed_sriov.h"
25
26 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
27 struct qed_spq_entry **pp_ent,
28 u8 cmd,
29 u8 protocol,
30 struct qed_sp_init_data *p_data)
31 {
32 u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
33 struct qed_spq_entry *p_ent = NULL;
34 int rc;
35
36 if (!pp_ent)
37 return -ENOMEM;
38
39 rc = qed_spq_get_entry(p_hwfn, pp_ent);
40
41 if (rc != 0)
42 return rc;
43
44 p_ent = *pp_ent;
45
46 p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid);
47 p_ent->elem.hdr.cmd_id = cmd;
48 p_ent->elem.hdr.protocol_id = protocol;
49
50 p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
51 p_ent->comp_mode = p_data->comp_mode;
52 p_ent->comp_done.done = 0;
53
54 switch (p_ent->comp_mode) {
55 case QED_SPQ_MODE_EBLOCK:
56 p_ent->comp_cb.cookie = &p_ent->comp_done;
57 break;
58
59 case QED_SPQ_MODE_BLOCK:
60 if (!p_data->p_comp_data)
61 return -EINVAL;
62
63 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
64 break;
65
66 case QED_SPQ_MODE_CB:
67 if (!p_data->p_comp_data)
68 p_ent->comp_cb.function = NULL;
69 else
70 p_ent->comp_cb = *p_data->p_comp_data;
71 break;
72
73 default:
74 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
75 p_ent->comp_mode);
76 return -EINVAL;
77 }
78
79 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
80 "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
81 opaque_cid, cmd, protocol,
82 (unsigned long)&p_ent->ramrod,
83 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
84 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
85 "MODE_CB"));
86
87 memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
88
89 return 0;
90 }
91
92 static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
93 {
94 switch (type) {
95 case QED_TUNN_CLSS_MAC_VLAN:
96 return TUNNEL_CLSS_MAC_VLAN;
97 case QED_TUNN_CLSS_MAC_VNI:
98 return TUNNEL_CLSS_MAC_VNI;
99 case QED_TUNN_CLSS_INNER_MAC_VLAN:
100 return TUNNEL_CLSS_INNER_MAC_VLAN;
101 case QED_TUNN_CLSS_INNER_MAC_VNI:
102 return TUNNEL_CLSS_INNER_MAC_VNI;
103 default:
104 return TUNNEL_CLSS_MAC_VLAN;
105 }
106 }
107
108 static void
109 qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
110 struct qed_tunn_update_params *p_src,
111 struct pf_update_tunnel_config *p_tunn_cfg)
112 {
113 unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
114 unsigned long update_mask = p_src->tunn_mode_update_mask;
115 unsigned long tunn_mode = p_src->tunn_mode;
116 unsigned long new_tunn_mode = 0;
117
118 if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
119 if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
120 __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
121 } else {
122 if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
123 __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
124 }
125
126 if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
127 if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
128 __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
129 } else {
130 if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
131 __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
132 }
133
134 if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
135 if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
136 __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
137 } else {
138 if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
139 __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
140 }
141
142 if (p_src->update_geneve_udp_port) {
143 p_tunn_cfg->set_geneve_udp_port_flg = 1;
144 p_tunn_cfg->geneve_udp_port =
145 cpu_to_le16(p_src->geneve_udp_port);
146 }
147
148 if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
149 if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
150 __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
151 } else {
152 if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
153 __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
154 }
155
156 if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
157 if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
158 __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
159 } else {
160 if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
161 __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
162 }
163
164 p_src->tunn_mode = new_tunn_mode;
165 }
166
167 static void
168 qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
169 struct qed_tunn_update_params *p_src,
170 struct pf_update_tunnel_config *p_tunn_cfg)
171 {
172 unsigned long tunn_mode = p_src->tunn_mode;
173 enum tunnel_clss type;
174
175 qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
176 p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
177 p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
178
179 type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
180 p_tunn_cfg->tunnel_clss_vxlan = type;
181
182 type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
183 p_tunn_cfg->tunnel_clss_l2gre = type;
184
185 type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
186 p_tunn_cfg->tunnel_clss_ipgre = type;
187
188 if (p_src->update_vxlan_udp_port) {
189 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
190 p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
191 }
192
193 if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
194 p_tunn_cfg->tx_enable_l2gre = 1;
195
196 if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
197 p_tunn_cfg->tx_enable_ipgre = 1;
198
199 if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
200 p_tunn_cfg->tx_enable_vxlan = 1;
201
202 if (p_src->update_geneve_udp_port) {
203 p_tunn_cfg->set_geneve_udp_port_flg = 1;
204 p_tunn_cfg->geneve_udp_port =
205 cpu_to_le16(p_src->geneve_udp_port);
206 }
207
208 if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
209 p_tunn_cfg->tx_enable_l2geneve = 1;
210
211 if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
212 p_tunn_cfg->tx_enable_ipgeneve = 1;
213
214 type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
215 p_tunn_cfg->tunnel_clss_l2geneve = type;
216
217 type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
218 p_tunn_cfg->tunnel_clss_ipgeneve = type;
219 }
220
221 static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
222 struct qed_ptt *p_ptt,
223 unsigned long tunn_mode)
224 {
225 u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
226 u8 l2geneve_enable = 0, ipgeneve_enable = 0;
227
228 if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
229 l2gre_enable = 1;
230
231 if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
232 ipgre_enable = 1;
233
234 if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
235 vxlan_enable = 1;
236
237 qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
238 qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
239
240 if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
241 l2geneve_enable = 1;
242
243 if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
244 ipgeneve_enable = 1;
245
246 qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
247 ipgeneve_enable);
248 }
249
250 static void
251 qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
252 struct qed_tunn_start_params *p_src,
253 struct pf_start_tunnel_config *p_tunn_cfg)
254 {
255 unsigned long tunn_mode;
256 enum tunnel_clss type;
257
258 if (!p_src)
259 return;
260
261 tunn_mode = p_src->tunn_mode;
262 type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
263 p_tunn_cfg->tunnel_clss_vxlan = type;
264 type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
265 p_tunn_cfg->tunnel_clss_l2gre = type;
266 type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
267 p_tunn_cfg->tunnel_clss_ipgre = type;
268
269 if (p_src->update_vxlan_udp_port) {
270 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
271 p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
272 }
273
274 if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
275 p_tunn_cfg->tx_enable_l2gre = 1;
276
277 if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
278 p_tunn_cfg->tx_enable_ipgre = 1;
279
280 if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
281 p_tunn_cfg->tx_enable_vxlan = 1;
282
283 if (p_src->update_geneve_udp_port) {
284 p_tunn_cfg->set_geneve_udp_port_flg = 1;
285 p_tunn_cfg->geneve_udp_port =
286 cpu_to_le16(p_src->geneve_udp_port);
287 }
288
289 if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
290 p_tunn_cfg->tx_enable_l2geneve = 1;
291
292 if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
293 p_tunn_cfg->tx_enable_ipgeneve = 1;
294
295 type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
296 p_tunn_cfg->tunnel_clss_l2geneve = type;
297 type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
298 p_tunn_cfg->tunnel_clss_ipgeneve = type;
299 }
300
301 int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
302 struct qed_tunn_start_params *p_tunn,
303 enum qed_mf_mode mode, bool allow_npar_tx_switch)
304 {
305 struct pf_start_ramrod_data *p_ramrod = NULL;
306 u16 sb = qed_int_get_sp_sb_id(p_hwfn);
307 u8 sb_index = p_hwfn->p_eq->eq_sb_index;
308 struct qed_spq_entry *p_ent = NULL;
309 struct qed_sp_init_data init_data;
310 int rc = -EINVAL;
311 u8 page_cnt;
312
313 /* update initial eq producer */
314 qed_eq_prod_update(p_hwfn,
315 qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
316
317 memset(&init_data, 0, sizeof(init_data));
318 init_data.cid = qed_spq_get_cid(p_hwfn);
319 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
320 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
321
322 rc = qed_sp_init_request(p_hwfn, &p_ent,
323 COMMON_RAMROD_PF_START,
324 PROTOCOLID_COMMON,
325 &init_data);
326 if (rc)
327 return rc;
328
329 p_ramrod = &p_ent->ramrod.pf_start;
330
331 p_ramrod->event_ring_sb_id = cpu_to_le16(sb);
332 p_ramrod->event_ring_sb_index = sb_index;
333 p_ramrod->path_id = QED_PATH_ID(p_hwfn);
334 p_ramrod->dont_log_ramrods = 0;
335 p_ramrod->log_type_mask = cpu_to_le16(0xf);
336
337 switch (mode) {
338 case QED_MF_DEFAULT:
339 case QED_MF_NPAR:
340 p_ramrod->mf_mode = MF_NPAR;
341 break;
342 case QED_MF_OVLAN:
343 p_ramrod->mf_mode = MF_OVLAN;
344 break;
345 default:
346 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
347 p_ramrod->mf_mode = MF_NPAR;
348 }
349 p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
350
351 /* Place EQ address in RAMROD */
352 DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
353 p_hwfn->p_eq->chain.pbl.p_phys_table);
354 page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
355 p_ramrod->event_ring_num_pages = page_cnt;
356 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
357 p_hwfn->p_consq->chain.pbl.p_phys_table);
358
359 qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
360 &p_ramrod->tunnel_config);
361
362 if (IS_MF_SI(p_hwfn))
363 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
364
365 switch (p_hwfn->hw_info.personality) {
366 case QED_PCI_ETH:
367 p_ramrod->personality = PERSONALITY_ETH;
368 break;
369 case QED_PCI_ISCSI:
370 p_ramrod->personality = PERSONALITY_ISCSI;
371 break;
372 case QED_PCI_ETH_ROCE:
373 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
374 break;
375 default:
376 DP_NOTICE(p_hwfn, "Unkown personality %d\n",
377 p_hwfn->hw_info.personality);
378 p_ramrod->personality = PERSONALITY_ETH;
379 }
380
381 if (p_hwfn->cdev->p_iov_info) {
382 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
383
384 p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
385 p_ramrod->num_vfs = (u8) p_iov->total_vfs;
386 }
387 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
388 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
389
390 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
391 "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
392 sb, sb_index,
393 p_ramrod->outer_tag);
394
395 rc = qed_spq_post(p_hwfn, p_ent, NULL);
396
397 if (p_tunn) {
398 qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
399 p_tunn->tunn_mode);
400 p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
401 }
402
403 return rc;
404 }
405
406 int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
407 {
408 struct qed_spq_entry *p_ent = NULL;
409 struct qed_sp_init_data init_data;
410 int rc = -EINVAL;
411
412 /* Get SPQ entry */
413 memset(&init_data, 0, sizeof(init_data));
414 init_data.cid = qed_spq_get_cid(p_hwfn);
415 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
416 init_data.comp_mode = QED_SPQ_MODE_CB;
417
418 rc = qed_sp_init_request(p_hwfn, &p_ent,
419 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
420 &init_data);
421 if (rc)
422 return rc;
423
424 qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
425 &p_ent->ramrod.pf_update);
426
427 return qed_spq_post(p_hwfn, p_ent, NULL);
428 }
429
430 /* Set pf update ramrod command params */
431 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
432 struct qed_tunn_update_params *p_tunn,
433 enum spq_mode comp_mode,
434 struct qed_spq_comp_cb *p_comp_data)
435 {
436 struct qed_spq_entry *p_ent = NULL;
437 struct qed_sp_init_data init_data;
438 int rc = -EINVAL;
439
440 /* Get SPQ entry */
441 memset(&init_data, 0, sizeof(init_data));
442 init_data.cid = qed_spq_get_cid(p_hwfn);
443 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
444 init_data.comp_mode = comp_mode;
445 init_data.p_comp_data = p_comp_data;
446
447 rc = qed_sp_init_request(p_hwfn, &p_ent,
448 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
449 &init_data);
450 if (rc)
451 return rc;
452
453 qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
454 &p_ent->ramrod.pf_update.tunnel_config);
455
456 rc = qed_spq_post(p_hwfn, p_ent, NULL);
457 if (rc)
458 return rc;
459
460 if (p_tunn->update_vxlan_udp_port)
461 qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
462 p_tunn->vxlan_udp_port);
463 if (p_tunn->update_geneve_udp_port)
464 qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
465 p_tunn->geneve_udp_port);
466
467 qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
468 p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
469
470 return rc;
471 }
472
473 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
474 {
475 struct qed_spq_entry *p_ent = NULL;
476 struct qed_sp_init_data init_data;
477 int rc = -EINVAL;
478
479 /* Get SPQ entry */
480 memset(&init_data, 0, sizeof(init_data));
481 init_data.cid = qed_spq_get_cid(p_hwfn);
482 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
483 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
484
485 rc = qed_sp_init_request(p_hwfn, &p_ent,
486 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
487 &init_data);
488 if (rc)
489 return rc;
490
491 return qed_spq_post(p_hwfn, p_ent, NULL);
492 }
493
494 int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
495 {
496 struct qed_spq_entry *p_ent = NULL;
497 struct qed_sp_init_data init_data;
498 int rc;
499
500 /* Get SPQ entry */
501 memset(&init_data, 0, sizeof(init_data));
502 init_data.cid = qed_spq_get_cid(p_hwfn);
503 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
504 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
505
506 rc = qed_sp_init_request(p_hwfn, &p_ent,
507 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
508 &init_data);
509 if (rc)
510 return rc;
511
512 return qed_spq_post(p_hwfn, p_ent, NULL);
513 }
This page took 0.042543 seconds and 5 git commands to generate.