1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
19 #include "qed_init_ops.h"
20 #include "qed_reg_addr.h"
21 #include "qed_sriov.h"
23 #define QED_INIT_MAX_POLL_COUNT 100
24 #define QED_INIT_POLL_PERIOD_US 500
26 static u32 pxp_global_win
[] = {
29 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
30 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
31 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
32 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
33 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
34 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
35 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
36 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
37 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
38 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
48 void qed_init_iro_array(struct qed_dev
*cdev
)
50 cdev
->iro_arr
= iro_arr
;
53 /* Runtime configuration helpers */
54 void qed_init_clear_rt_data(struct qed_hwfn
*p_hwfn
)
58 for (i
= 0; i
< RUNTIME_ARRAY_SIZE
; i
++)
59 p_hwfn
->rt_data
.b_valid
[i
] = false;
62 void qed_init_store_rt_reg(struct qed_hwfn
*p_hwfn
, u32 rt_offset
, u32 val
)
64 p_hwfn
->rt_data
.init_val
[rt_offset
] = val
;
65 p_hwfn
->rt_data
.b_valid
[rt_offset
] = true;
68 void qed_init_store_rt_agg(struct qed_hwfn
*p_hwfn
,
69 u32 rt_offset
, u32
*p_val
, size_t size
)
73 for (i
= 0; i
< size
/ sizeof(u32
); i
++) {
74 p_hwfn
->rt_data
.init_val
[rt_offset
+ i
] = p_val
[i
];
75 p_hwfn
->rt_data
.b_valid
[rt_offset
+ i
] = true;
79 static int qed_init_rt(struct qed_hwfn
*p_hwfn
,
80 struct qed_ptt
*p_ptt
,
81 u32 addr
, u16 rt_offset
, u16 size
, bool b_must_dmae
)
83 u32
*p_init_val
= &p_hwfn
->rt_data
.init_val
[rt_offset
];
84 bool *p_valid
= &p_hwfn
->rt_data
.b_valid
[rt_offset
];
88 /* Since not all RT entries are initialized, go over the RT and
89 * for each segment of initialized values use DMA.
91 for (i
= 0; i
< size
; i
++) {
95 /* In case there isn't any wide-bus configuration here,
96 * simply write the data instead of using dmae.
99 qed_wr(p_hwfn
, p_ptt
, addr
+ (i
<< 2), p_init_val
[i
]);
103 /* Start of a new segment */
104 for (segment
= 1; i
+ segment
< size
; segment
++)
105 if (!p_valid
[i
+ segment
])
108 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
109 (uintptr_t)(p_init_val
+ i
),
110 addr
+ (i
<< 2), segment
, 0);
114 /* Jump over the entire segment, including invalid entry */
121 int qed_init_alloc(struct qed_hwfn
*p_hwfn
)
123 struct qed_rt_data
*rt_data
= &p_hwfn
->rt_data
;
125 if (IS_VF(p_hwfn
->cdev
))
128 rt_data
->b_valid
= kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE
,
130 if (!rt_data
->b_valid
)
133 rt_data
->init_val
= kzalloc(sizeof(u32
) * RUNTIME_ARRAY_SIZE
,
135 if (!rt_data
->init_val
) {
136 kfree(rt_data
->b_valid
);
143 void qed_init_free(struct qed_hwfn
*p_hwfn
)
145 kfree(p_hwfn
->rt_data
.init_val
);
146 kfree(p_hwfn
->rt_data
.b_valid
);
149 static int qed_init_array_dmae(struct qed_hwfn
*p_hwfn
,
150 struct qed_ptt
*p_ptt
,
152 u32 dmae_data_offset
,
160 /* Perform DMAE only for lengthy enough sections or for wide-bus */
161 if (!b_can_dmae
|| (!b_must_dmae
&& (size
< 16))) {
162 const u32
*data
= buf
+ dmae_data_offset
;
165 for (i
= 0; i
< size
; i
++)
166 qed_wr(p_hwfn
, p_ptt
, addr
+ (i
<< 2), data
[i
]);
168 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
169 (uintptr_t)(buf
+ dmae_data_offset
),
176 static int qed_init_fill_dmae(struct qed_hwfn
*p_hwfn
,
177 struct qed_ptt
*p_ptt
,
178 u32 addr
, u32 fill
, u32 fill_count
)
180 static u32 zero_buffer
[DMAE_MAX_RW_SIZE
];
182 memset(zero_buffer
, 0, sizeof(u32
) * DMAE_MAX_RW_SIZE
);
184 /* invoke the DMAE virtual/physical buffer API with
185 * 1. DMAE init channel
187 * 3. p_hwfb->temp_data,
191 return qed_dmae_host2grc(p_hwfn
, p_ptt
,
192 (uintptr_t)(&zero_buffer
[0]),
193 addr
, fill_count
, QED_DMAE_FLAG_RW_REPL_SRC
);
196 static void qed_init_fill(struct qed_hwfn
*p_hwfn
,
197 struct qed_ptt
*p_ptt
,
198 u32 addr
, u32 fill
, u32 fill_count
)
202 for (i
= 0; i
< fill_count
; i
++, addr
+= sizeof(u32
))
203 qed_wr(p_hwfn
, p_ptt
, addr
, fill
);
206 static int qed_init_cmd_array(struct qed_hwfn
*p_hwfn
,
207 struct qed_ptt
*p_ptt
,
208 struct init_write_op
*cmd
,
209 bool b_must_dmae
, bool b_can_dmae
)
211 u32 dmae_array_offset
= le32_to_cpu(cmd
->args
.array_offset
);
212 u32 data
= le32_to_cpu(cmd
->data
);
213 u32 addr
= GET_FIELD(data
, INIT_WRITE_OP_ADDRESS
) << 2;
215 u32 offset
, output_len
, input_len
, max_size
;
216 struct qed_dev
*cdev
= p_hwfn
->cdev
;
217 union init_array_hdr
*hdr
;
218 const u32
*array_data
;
222 array_data
= cdev
->fw_data
->arr_data
;
224 hdr
= (union init_array_hdr
*)(array_data
+ dmae_array_offset
);
225 data
= le32_to_cpu(hdr
->raw
.data
);
226 switch (GET_FIELD(data
, INIT_ARRAY_RAW_HDR_TYPE
)) {
227 case INIT_ARR_ZIPPED
:
228 offset
= dmae_array_offset
+ 1;
229 input_len
= GET_FIELD(data
,
230 INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE
);
231 max_size
= MAX_ZIPPED_SIZE
* 4;
232 memset(p_hwfn
->unzip_buf
, 0, max_size
);
234 output_len
= qed_unzip_data(p_hwfn
, input_len
,
235 (u8
*)&array_data
[offset
],
236 max_size
, (u8
*)p_hwfn
->unzip_buf
);
238 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
, 0,
241 b_must_dmae
, b_can_dmae
);
243 DP_NOTICE(p_hwfn
, "Failed to unzip dmae data\n");
247 case INIT_ARR_PATTERN
:
249 u32 repeats
= GET_FIELD(data
,
250 INIT_ARRAY_PATTERN_HDR_REPETITIONS
);
253 size
= GET_FIELD(data
, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE
);
255 for (i
= 0; i
< repeats
; i
++, addr
+= size
<< 2) {
256 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
,
257 dmae_array_offset
+ 1,
259 b_must_dmae
, b_can_dmae
);
265 case INIT_ARR_STANDARD
:
266 size
= GET_FIELD(data
, INIT_ARRAY_STANDARD_HDR_SIZE
);
267 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
,
268 dmae_array_offset
+ 1,
270 b_must_dmae
, b_can_dmae
);
277 /* init_ops write command */
278 static int qed_init_cmd_wr(struct qed_hwfn
*p_hwfn
,
279 struct qed_ptt
*p_ptt
,
280 struct init_write_op
*p_cmd
, bool b_can_dmae
)
282 u32 data
= le32_to_cpu(p_cmd
->data
);
283 bool b_must_dmae
= GET_FIELD(data
, INIT_WRITE_OP_WIDE_BUS
);
284 u32 addr
= GET_FIELD(data
, INIT_WRITE_OP_ADDRESS
) << 2;
285 union init_write_args
*arg
= &p_cmd
->args
;
289 if (b_must_dmae
&& !b_can_dmae
) {
291 "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
296 switch (GET_FIELD(data
, INIT_WRITE_OP_SOURCE
)) {
297 case INIT_SRC_INLINE
:
298 data
= le32_to_cpu(p_cmd
->args
.inline_val
);
299 qed_wr(p_hwfn
, p_ptt
, addr
, data
);
302 data
= le32_to_cpu(p_cmd
->args
.zeros_count
);
303 if (b_must_dmae
|| (b_can_dmae
&& (data
>= 64)))
304 rc
= qed_init_fill_dmae(p_hwfn
, p_ptt
, addr
, 0, data
);
306 qed_init_fill(p_hwfn
, p_ptt
, addr
, 0, data
);
309 rc
= qed_init_cmd_array(p_hwfn
, p_ptt
, p_cmd
,
310 b_must_dmae
, b_can_dmae
);
312 case INIT_SRC_RUNTIME
:
313 qed_init_rt(p_hwfn
, p_ptt
, addr
,
314 le16_to_cpu(arg
->runtime
.offset
),
315 le16_to_cpu(arg
->runtime
.size
),
323 static inline bool comp_eq(u32 val
, u32 expected_val
)
325 return val
== expected_val
;
328 static inline bool comp_and(u32 val
, u32 expected_val
)
330 return (val
& expected_val
) == expected_val
;
333 static inline bool comp_or(u32 val
, u32 expected_val
)
335 return (val
| expected_val
) > 0;
338 /* init_ops read/poll commands */
339 static void qed_init_cmd_rd(struct qed_hwfn
*p_hwfn
,
340 struct qed_ptt
*p_ptt
, struct init_read_op
*cmd
)
342 bool (*comp_check
)(u32 val
, u32 expected_val
);
343 u32 delay
= QED_INIT_POLL_PERIOD_US
, val
;
344 u32 data
, addr
, poll
;
347 data
= le32_to_cpu(cmd
->op_data
);
348 addr
= GET_FIELD(data
, INIT_READ_OP_ADDRESS
) << 2;
349 poll
= GET_FIELD(data
, INIT_READ_OP_POLL_TYPE
);
352 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
354 if (poll
== INIT_POLL_NONE
)
359 comp_check
= comp_eq
;
362 comp_check
= comp_or
;
365 comp_check
= comp_and
;
368 DP_ERR(p_hwfn
, "Invalid poll comparison type %08x\n",
373 data
= le32_to_cpu(cmd
->expected_val
);
375 i
< QED_INIT_MAX_POLL_COUNT
&& !comp_check(val
, data
);
378 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
381 if (i
== QED_INIT_MAX_POLL_COUNT
) {
383 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
384 addr
, le32_to_cpu(cmd
->expected_val
),
385 val
, le32_to_cpu(cmd
->op_data
));
389 /* init_ops callbacks entry point */
390 static void qed_init_cmd_cb(struct qed_hwfn
*p_hwfn
,
391 struct qed_ptt
*p_ptt
,
392 struct init_callback_op
*p_cmd
)
394 DP_NOTICE(p_hwfn
, "Currently init values have no need of callbacks\n");
397 static u8
qed_init_cmd_mode_match(struct qed_hwfn
*p_hwfn
,
398 u16
*p_offset
, int modes
)
400 struct qed_dev
*cdev
= p_hwfn
->cdev
;
401 const u8
*modes_tree_buf
;
402 u8 arg1
, arg2
, tree_val
;
404 modes_tree_buf
= cdev
->fw_data
->modes_tree_buf
;
405 tree_val
= modes_tree_buf
[(*p_offset
)++];
407 case INIT_MODE_OP_NOT
:
408 return qed_init_cmd_mode_match(p_hwfn
, p_offset
, modes
) ^ 1;
409 case INIT_MODE_OP_OR
:
410 arg1
= qed_init_cmd_mode_match(p_hwfn
, p_offset
, modes
);
411 arg2
= qed_init_cmd_mode_match(p_hwfn
, p_offset
, modes
);
413 case INIT_MODE_OP_AND
:
414 arg1
= qed_init_cmd_mode_match(p_hwfn
, p_offset
, modes
);
415 arg2
= qed_init_cmd_mode_match(p_hwfn
, p_offset
, modes
);
418 tree_val
-= MAX_INIT_MODE_OPS
;
419 return (modes
& BIT(tree_val
)) ? 1 : 0;
423 static u32
qed_init_cmd_mode(struct qed_hwfn
*p_hwfn
,
424 struct init_if_mode_op
*p_cmd
, int modes
)
426 u16 offset
= le16_to_cpu(p_cmd
->modes_buf_offset
);
428 if (qed_init_cmd_mode_match(p_hwfn
, &offset
, modes
))
431 return GET_FIELD(le32_to_cpu(p_cmd
->op_data
),
432 INIT_IF_MODE_OP_CMD_OFFSET
);
435 static u32
qed_init_cmd_phase(struct qed_hwfn
*p_hwfn
,
436 struct init_if_phase_op
*p_cmd
,
437 u32 phase
, u32 phase_id
)
439 u32 data
= le32_to_cpu(p_cmd
->phase_data
);
440 u32 op_data
= le32_to_cpu(p_cmd
->op_data
);
442 if (!(GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE
) == phase
&&
443 (GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE_ID
) == ANY_PHASE_ID
||
444 GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE_ID
) == phase_id
)))
445 return GET_FIELD(op_data
, INIT_IF_PHASE_OP_CMD_OFFSET
);
450 int qed_init_run(struct qed_hwfn
*p_hwfn
,
451 struct qed_ptt
*p_ptt
, int phase
, int phase_id
, int modes
)
453 struct qed_dev
*cdev
= p_hwfn
->cdev
;
454 u32 cmd_num
, num_init_ops
;
455 union init_op
*init_ops
;
459 num_init_ops
= cdev
->fw_data
->init_ops_size
;
460 init_ops
= cdev
->fw_data
->init_ops
;
462 p_hwfn
->unzip_buf
= kzalloc(MAX_ZIPPED_SIZE
* 4, GFP_ATOMIC
);
463 if (!p_hwfn
->unzip_buf
)
466 for (cmd_num
= 0; cmd_num
< num_init_ops
; cmd_num
++) {
467 union init_op
*cmd
= &init_ops
[cmd_num
];
468 u32 data
= le32_to_cpu(cmd
->raw
.op_data
);
470 switch (GET_FIELD(data
, INIT_CALLBACK_OP_OP
)) {
472 rc
= qed_init_cmd_wr(p_hwfn
, p_ptt
, &cmd
->write
,
476 qed_init_cmd_rd(p_hwfn
, p_ptt
, &cmd
->read
);
478 case INIT_OP_IF_MODE
:
479 cmd_num
+= qed_init_cmd_mode(p_hwfn
, &cmd
->if_mode
,
482 case INIT_OP_IF_PHASE
:
483 cmd_num
+= qed_init_cmd_phase(p_hwfn
, &cmd
->if_phase
,
485 b_dmae
= GET_FIELD(data
, INIT_IF_PHASE_OP_DMAE_ENABLE
);
488 /* qed_init_run is always invoked from
491 udelay(le32_to_cpu(cmd
->delay
.delay
));
494 case INIT_OP_CALLBACK
:
495 qed_init_cmd_cb(p_hwfn
, p_ptt
, &cmd
->callback
);
503 kfree(p_hwfn
->unzip_buf
);
507 void qed_gtt_init(struct qed_hwfn
*p_hwfn
)
512 /* Set the global windows */
513 gtt_base
= PXP_PF_WINDOW_ADMIN_START
+ PXP_PF_WINDOW_ADMIN_GLOBAL_START
;
515 for (i
= 0; i
< ARRAY_SIZE(pxp_global_win
); i
++)
516 if (pxp_global_win
[i
])
517 REG_WR(p_hwfn
, gtt_base
+ i
* PXP_GLOBAL_ENTRY_SIZE
,
521 int qed_init_fw_data(struct qed_dev
*cdev
, const u8
*data
)
523 struct qed_fw_data
*fw
= cdev
->fw_data
;
524 struct bin_buffer_hdr
*buf_hdr
;
528 DP_NOTICE(cdev
, "Invalid fw data\n");
532 /* First Dword contains metadata and should be skipped */
533 buf_hdr
= (struct bin_buffer_hdr
*)(data
+ sizeof(u32
));
535 offset
= buf_hdr
[BIN_BUF_INIT_FW_VER_INFO
].offset
;
536 fw
->fw_ver_info
= (struct fw_ver_info
*)(data
+ offset
);
538 offset
= buf_hdr
[BIN_BUF_INIT_CMD
].offset
;
539 fw
->init_ops
= (union init_op
*)(data
+ offset
);
541 offset
= buf_hdr
[BIN_BUF_INIT_VAL
].offset
;
542 fw
->arr_data
= (u32
*)(data
+ offset
);
544 offset
= buf_hdr
[BIN_BUF_INIT_MODE_TREE
].offset
;
545 fw
->modes_tree_buf
= (u8
*)(data
+ offset
);
546 len
= buf_hdr
[BIN_BUF_INIT_CMD
].length
;
547 fw
->init_ops_size
= len
/ sizeof(struct init_raw_op
);