cxgb4/cxgb4vf/csiostor: Cleanup PL, XGMAC, SF and MC related register defines
[deliverable/linux.git] / drivers / scsi / csiostor / csio_hw.c
CommitLineData
a3667aae
NKI
1/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/pci_regs.h>
37#include <linux/firmware.h>
38#include <linux/stddef.h>
39#include <linux/delay.h>
40#include <linux/string.h>
41#include <linux/compiler.h>
42#include <linux/jiffies.h>
43#include <linux/kernel.h>
44#include <linux/log2.h>
45
46#include "csio_hw.h"
47#include "csio_lnode.h"
48#include "csio_rnode.h"
49
a3667aae
NKI
50int csio_dbg_level = 0xFEFF;
51unsigned int csio_port_mask = 0xf;
52
53/* Default FW event queue entries. */
54static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
55
56/* Default MSI param level */
57int csio_msi = 2;
58
59/* FCoE function instances */
60static int dev_num;
61
62/* FCoE Adapter types & its description */
7cc16380 63static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
a3667aae
NKI
64 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
65 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
66 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
67 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
68 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
69 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
70 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
71 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
72 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
73 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
74 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
75 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
76 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
77 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
78 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
7cc16380
AB
79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
80 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
81 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
82 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
83 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
84 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
85 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
86 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
87 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
88};
89
90static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
91 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
92 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
93 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
94 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
95 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
96 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
97 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
98 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
99 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
100 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
101 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
102 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
103 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
104 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
105 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
106 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
107 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
108 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
109 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
110 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
a3667aae
NKI
111};
112
113static void csio_mgmtm_cleanup(struct csio_mgmtm *);
114static void csio_hw_mbm_cleanup(struct csio_hw *);
115
116/* State machine forward declarations */
117static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
118static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
119static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
120static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
121static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
122static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
123static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
124static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
125static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
126
127static void csio_hw_initialize(struct csio_hw *hw);
128static void csio_evtq_stop(struct csio_hw *hw);
129static void csio_evtq_start(struct csio_hw *hw);
130
131int csio_is_hw_ready(struct csio_hw *hw)
132{
133 return csio_match_state(hw, csio_hws_ready);
134}
135
136int csio_is_hw_removing(struct csio_hw *hw)
137{
138 return csio_match_state(hw, csio_hws_removing);
139}
140
141
142/*
143 * csio_hw_wait_op_done_val - wait until an operation is completed
144 * @hw: the HW module
145 * @reg: the register to check for completion
146 * @mask: a single-bit field within @reg that indicates completion
147 * @polarity: the value of the field when the operation is completed
148 * @attempts: number of check iterations
149 * @delay: delay in usecs between iterations
150 * @valp: where to store the value of the register at completion time
151 *
152 * Wait until an operation is completed by checking a bit in a register
153 * up to @attempts times. If @valp is not NULL the value of the register
154 * at the time it indicated completion is stored there. Returns 0 if the
155 * operation completes and -EAGAIN otherwise.
156 */
7cc16380 157int
a3667aae
NKI
158csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
159 int polarity, int attempts, int delay, uint32_t *valp)
160{
161 uint32_t val;
162 while (1) {
163 val = csio_rd_reg32(hw, reg);
164
165 if (!!(val & mask) == polarity) {
166 if (valp)
167 *valp = val;
168 return 0;
169 }
170
171 if (--attempts == 0)
172 return -EAGAIN;
173 if (delay)
174 udelay(delay);
175 }
176}
177
7cc16380
AB
178/*
179 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
180 * @hw: the adapter
181 * @addr: the indirect TP register address
182 * @mask: specifies the field within the register to modify
183 * @val: new value for the field
184 *
185 * Sets a field of an indirect TP register to the given value.
186 */
187void
188csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
189 unsigned int mask, unsigned int val)
190{
837e4a42
HS
191 csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
192 val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
193 csio_wr_reg32(hw, val, TP_PIO_DATA_A);
7cc16380
AB
194}
195
a3667aae
NKI
196void
197csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
198 uint32_t value)
199{
200 uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
201
202 csio_wr_reg32(hw, val | value, reg);
203 /* Flush */
204 csio_rd_reg32(hw, reg);
205
206}
207
a3667aae 208static int
5036f0a0 209csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
a3667aae 210{
7cc16380
AB
211 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
212 addr, len, buf, 0);
a3667aae
NKI
213}
214
215/*
216 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
217 */
7cc16380
AB
218#define EEPROM_MAX_RD_POLL 40
219#define EEPROM_MAX_WR_POLL 6
220#define EEPROM_STAT_ADDR 0x7bfc
221#define VPD_BASE 0x400
222#define VPD_BASE_OLD 0
223#define VPD_LEN 1024
a3667aae
NKI
224#define VPD_INFO_FLD_HDR_SIZE 3
225
226/*
227 * csio_hw_seeprom_read - read a serial EEPROM location
228 * @hw: hw to read
229 * @addr: EEPROM virtual address
230 * @data: where to store the read data
231 *
232 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
233 * VPD capability. Note that this function must be called with a virtual
234 * address.
235 */
236static int
237csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
238{
239 uint16_t val = 0;
240 int attempts = EEPROM_MAX_RD_POLL;
241 uint32_t base = hw->params.pci.vpd_cap_addr;
242
243 if (addr >= EEPROMVSIZE || (addr & 3))
244 return -EINVAL;
245
246 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
247
248 do {
249 udelay(10);
250 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
251 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
252
253 if (!(val & PCI_VPD_ADDR_F)) {
254 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
255 return -EINVAL;
256 }
257
258 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
259 *data = le32_to_cpu(*data);
5036f0a0 260
a3667aae
NKI
261 return 0;
262}
263
264/*
265 * Partial EEPROM Vital Product Data structure. Includes only the ID and
266 * VPD-R sections.
267 */
268struct t4_vpd_hdr {
269 u8 id_tag;
270 u8 id_len[2];
271 u8 id_data[ID_LEN];
272 u8 vpdr_tag;
273 u8 vpdr_len[2];
274};
275
276/*
277 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
278 * the VPD
279 * @v: Pointer to buffered vpd data structure
280 * @kw: The keyword to search for
281 *
282 * Returns the value of the information field keyword or
283 * -EINVAL otherwise.
284 */
285static int
286csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
287{
288 int32_t i;
289 int32_t offset , len;
290 const uint8_t *buf = &v->id_tag;
291 const uint8_t *vpdr_len = &v->vpdr_tag;
292 offset = sizeof(struct t4_vpd_hdr);
293 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
294
295 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
296 return -EINVAL;
297
298 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
299 if (memcmp(buf + i , kw, 2) == 0) {
300 i += VPD_INFO_FLD_HDR_SIZE;
301 return i;
302 }
303
304 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
305 }
306
307 return -EINVAL;
308}
309
310static int
311csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
312{
313 *pos = pci_find_capability(pdev, cap);
314 if (*pos)
315 return 0;
316
317 return -1;
318}
319
320/*
321 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
322 * @hw: HW module
323 * @p: where to store the parameters
324 *
325 * Reads card parameters stored in VPD EEPROM.
326 */
327static int
328csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
329{
330 int i, ret, ec, sn, addr;
331 uint8_t *vpd, csum;
332 const struct t4_vpd_hdr *v;
333 /* To get around compilation warning from strstrip */
334 char *s;
335
336 if (csio_is_valid_vpd(hw))
337 return 0;
338
339 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
340 &hw->params.pci.vpd_cap_addr);
341 if (ret)
342 return -EINVAL;
343
344 vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
345 if (vpd == NULL)
346 return -ENOMEM;
347
348 /*
349 * Card information normally starts at VPD_BASE but early cards had
350 * it at 0.
351 */
352 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
353 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
354
355 for (i = 0; i < VPD_LEN; i += 4) {
356 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
357 if (ret) {
358 kfree(vpd);
359 return ret;
360 }
361 }
362
363 /* Reset the VPD flag! */
364 hw->flags &= (~CSIO_HWF_VPD_VALID);
365
366 v = (const struct t4_vpd_hdr *)vpd;
367
368#define FIND_VPD_KW(var, name) do { \
369 var = csio_hw_get_vpd_keyword_val(v, name); \
370 if (var < 0) { \
371 csio_err(hw, "missing VPD keyword " name "\n"); \
372 kfree(vpd); \
373 return -EINVAL; \
374 } \
375} while (0)
376
377 FIND_VPD_KW(i, "RV");
378 for (csum = 0; i >= 0; i--)
379 csum += vpd[i];
380
381 if (csum) {
382 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
383 kfree(vpd);
384 return -EINVAL;
385 }
386 FIND_VPD_KW(ec, "EC");
387 FIND_VPD_KW(sn, "SN");
388#undef FIND_VPD_KW
389
390 memcpy(p->id, v->id_data, ID_LEN);
391 s = strstrip(p->id);
392 memcpy(p->ec, vpd + ec, EC_LEN);
393 s = strstrip(p->ec);
394 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
395 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
396 s = strstrip(p->sn);
397
398 csio_valid_vpd_copied(hw);
399
400 kfree(vpd);
401 return 0;
402}
403
404/*
405 * csio_hw_sf1_read - read data from the serial flash
406 * @hw: the HW module
407 * @byte_cnt: number of bytes to read
408 * @cont: whether another operation will be chained
409 * @lock: whether to lock SF for PL access only
410 * @valp: where to store the read data
411 *
412 * Reads up to 4 bytes of data from the serial flash. The location of
413 * the read needs to be specified prior to calling this by issuing the
414 * appropriate commands to the serial flash.
415 */
416static int
417csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
418 int32_t lock, uint32_t *valp)
419{
420 int ret;
421
422 if (!byte_cnt || byte_cnt > 4)
423 return -EINVAL;
0d804338 424 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
a3667aae
NKI
425 return -EBUSY;
426
0d804338
HS
427 csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) |
428 BYTECNT_V(byte_cnt - 1), SF_OP_A);
429 ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
430 10, NULL);
a3667aae 431 if (!ret)
0d804338 432 *valp = csio_rd_reg32(hw, SF_DATA_A);
a3667aae
NKI
433 return ret;
434}
435
436/*
437 * csio_hw_sf1_write - write data to the serial flash
438 * @hw: the HW module
439 * @byte_cnt: number of bytes to write
440 * @cont: whether another operation will be chained
441 * @lock: whether to lock SF for PL access only
442 * @val: value to write
443 *
444 * Writes up to 4 bytes of data to the serial flash. The location of
445 * the write needs to be specified prior to calling this by issuing the
446 * appropriate commands to the serial flash.
447 */
448static int
449csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
450 int32_t lock, uint32_t val)
451{
452 if (!byte_cnt || byte_cnt > 4)
453 return -EINVAL;
0d804338 454 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
a3667aae
NKI
455 return -EBUSY;
456
0d804338
HS
457 csio_wr_reg32(hw, val, SF_DATA_A);
458 csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) |
459 OP_V(1) | SF_LOCK_V(lock), SF_OP_A);
a3667aae 460
0d804338 461 return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
a3667aae
NKI
462 10, NULL);
463}
464
465/*
466 * csio_hw_flash_wait_op - wait for a flash operation to complete
467 * @hw: the HW module
468 * @attempts: max number of polls of the status register
469 * @delay: delay between polls in ms
470 *
471 * Wait for a flash operation to complete by polling the status register.
472 */
473static int
474csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
475{
476 int ret;
477 uint32_t status;
478
479 while (1) {
480 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
481 if (ret != 0)
482 return ret;
483
484 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
485 if (ret != 0)
486 return ret;
487
488 if (!(status & 1))
489 return 0;
490 if (--attempts == 0)
491 return -EAGAIN;
492 if (delay)
493 msleep(delay);
494 }
495}
496
497/*
498 * csio_hw_read_flash - read words from serial flash
499 * @hw: the HW module
500 * @addr: the start address for the read
501 * @nwords: how many 32-bit words to read
502 * @data: where to store the read data
503 * @byte_oriented: whether to store data as bytes or as words
504 *
505 * Read the specified number of 32-bit words from the serial flash.
506 * If @byte_oriented is set the read data is stored as a byte array
507 * (i.e., big-endian), otherwise as 32-bit words in the platform's
508 * natural endianess.
509 */
510static int
511csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
512 uint32_t *data, int32_t byte_oriented)
513{
514 int ret;
515
516 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
517 return -EINVAL;
518
519 addr = swab32(addr) | SF_RD_DATA_FAST;
520
521 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
522 if (ret != 0)
523 return ret;
524
525 ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
526 if (ret != 0)
527 return ret;
528
529 for ( ; nwords; nwords--, data++) {
530 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
531 if (nwords == 1)
0d804338 532 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
a3667aae
NKI
533 if (ret)
534 return ret;
535 if (byte_oriented)
536 *data = htonl(*data);
537 }
538 return 0;
539}
540
541/*
542 * csio_hw_write_flash - write up to a page of data to the serial flash
543 * @hw: the hw
544 * @addr: the start address to write
545 * @n: length of data to write in bytes
546 * @data: the data to write
547 *
548 * Writes up to a page of data (256 bytes) to the serial flash starting
549 * at the given address. All the data must be written to the same page.
550 */
551static int
552csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
553 uint32_t n, const uint8_t *data)
554{
555 int ret = -EINVAL;
556 uint32_t buf[64];
557 uint32_t i, c, left, val, offset = addr & 0xff;
558
559 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
560 return -EINVAL;
561
562 val = swab32(addr) | SF_PROG_PAGE;
563
564 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
565 if (ret != 0)
566 goto unlock;
567
568 ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
569 if (ret != 0)
570 goto unlock;
571
572 for (left = n; left; left -= c) {
573 c = min(left, 4U);
574 for (val = 0, i = 0; i < c; ++i)
575 val = (val << 8) + *data++;
576
577 ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
578 if (ret)
579 goto unlock;
580 }
581 ret = csio_hw_flash_wait_op(hw, 8, 1);
582 if (ret)
583 goto unlock;
584
0d804338 585 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
a3667aae
NKI
586
587 /* Read the page to verify the write succeeded */
588 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
589 if (ret)
590 return ret;
591
592 if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
593 csio_err(hw,
594 "failed to correctly write the flash page at %#x\n",
595 addr);
596 return -EINVAL;
597 }
598
599 return 0;
600
601unlock:
0d804338 602 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
a3667aae
NKI
603 return ret;
604}
605
606/*
607 * csio_hw_flash_erase_sectors - erase a range of flash sectors
608 * @hw: the HW module
609 * @start: the first sector to erase
610 * @end: the last sector to erase
611 *
612 * Erases the sectors in the given inclusive range.
613 */
614static int
615csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
616{
617 int ret = 0;
618
619 while (start <= end) {
620
621 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
622 if (ret != 0)
623 goto out;
624
625 ret = csio_hw_sf1_write(hw, 4, 0, 1,
626 SF_ERASE_SECTOR | (start << 8));
627 if (ret != 0)
628 goto out;
629
630 ret = csio_hw_flash_wait_op(hw, 14, 500);
631 if (ret != 0)
632 goto out;
633
634 start++;
635 }
636out:
637 if (ret)
638 csio_err(hw, "erase of flash sector %d failed, error %d\n",
639 start, ret);
0d804338 640 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
a3667aae
NKI
641 return 0;
642}
643
a3667aae
NKI
644static void
645csio_hw_print_fw_version(struct csio_hw *hw, char *str)
646{
647 csio_info(hw, "%s: %u.%u.%u.%u\n", str,
b2e1a3f0
HS
648 FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
649 FW_HDR_FW_VER_MINOR_G(hw->fwrev),
650 FW_HDR_FW_VER_MICRO_G(hw->fwrev),
651 FW_HDR_FW_VER_BUILD_G(hw->fwrev));
a3667aae
NKI
652}
653
654/*
655 * csio_hw_get_fw_version - read the firmware version
656 * @hw: HW module
657 * @vers: where to place the version
658 *
659 * Reads the FW version from flash.
660 */
661static int
662csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
663{
664 return csio_hw_read_flash(hw, FW_IMG_START +
665 offsetof(struct fw_hdr, fw_ver), 1,
666 vers, 0);
667}
668
669/*
670 * csio_hw_get_tp_version - read the TP microcode version
671 * @hw: HW module
672 * @vers: where to place the version
673 *
674 * Reads the TP microcode version from flash.
675 */
676static int
677csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
678{
679 return csio_hw_read_flash(hw, FLASH_FW_START +
680 offsetof(struct fw_hdr, tp_microcode_ver), 1,
681 vers, 0);
682}
683
684/*
685 * csio_hw_check_fw_version - check if the FW is compatible with
686 * this driver
687 * @hw: HW module
688 *
689 * Checks if an adapter's FW is compatible with the driver. Returns 0
690 * if there's exact match, a negative error if the version could not be
691 * read or there's a major/minor version mismatch/minor.
692 */
693static int
694csio_hw_check_fw_version(struct csio_hw *hw)
695{
696 int ret, major, minor, micro;
697
698 ret = csio_hw_get_fw_version(hw, &hw->fwrev);
699 if (!ret)
700 ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
701 if (ret)
702 return ret;
703
b2e1a3f0
HS
704 major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev);
705 minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev);
706 micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev);
a3667aae 707
7cc16380 708 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
a3667aae 709 csio_err(hw, "card FW has major version %u, driver wants %u\n",
7cc16380 710 major, FW_VERSION_MAJOR(hw));
a3667aae
NKI
711 return -EINVAL;
712 }
713
7cc16380 714 if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
a3667aae
NKI
715 return 0; /* perfect match */
716
717 /* Minor/micro version mismatch */
718 return -EINVAL;
719}
720
721/*
722 * csio_hw_fw_dload - download firmware.
723 * @hw: HW module
724 * @fw_data: firmware image to write.
725 * @size: image size
726 *
727 * Write the supplied firmware image to the card's serial flash.
728 */
729static int
730csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
731{
732 uint32_t csum;
733 int32_t addr;
734 int ret;
735 uint32_t i;
736 uint8_t first_page[SF_PAGE_SIZE];
5036f0a0 737 const __be32 *p = (const __be32 *)fw_data;
a3667aae
NKI
738 struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
739 uint32_t sf_sec_size;
740
741 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
742 csio_err(hw, "Serial Flash data invalid\n");
743 return -EINVAL;
744 }
745
746 if (!size) {
747 csio_err(hw, "FW image has no data\n");
748 return -EINVAL;
749 }
750
751 if (size & 511) {
752 csio_err(hw, "FW image size not multiple of 512 bytes\n");
753 return -EINVAL;
754 }
755
756 if (ntohs(hdr->len512) * 512 != size) {
757 csio_err(hw, "FW image size differs from size in FW header\n");
758 return -EINVAL;
759 }
760
761 if (size > FW_MAX_SIZE) {
762 csio_err(hw, "FW image too large, max is %u bytes\n",
763 FW_MAX_SIZE);
764 return -EINVAL;
765 }
766
767 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
768 csum += ntohl(p[i]);
769
770 if (csum != 0xffffffff) {
771 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
772 return -EINVAL;
773 }
774
775 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
776 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
777
778 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
779 FW_START_SEC, FW_START_SEC + i - 1);
780
781 ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,
782 FW_START_SEC + i - 1);
783 if (ret) {
784 csio_err(hw, "Flash Erase failed\n");
785 goto out;
786 }
787
788 /*
789 * We write the correct version at the end so the driver can see a bad
790 * version if the FW write fails. Start by writing a copy of the
791 * first page with a bad version.
792 */
793 memcpy(first_page, fw_data, SF_PAGE_SIZE);
794 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
795 ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);
796 if (ret)
797 goto out;
798
799 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
800 FW_IMG_START, FW_IMG_START + size);
801
802 addr = FW_IMG_START;
803 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
804 addr += SF_PAGE_SIZE;
805 fw_data += SF_PAGE_SIZE;
806 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
807 if (ret)
808 goto out;
809 }
810
811 ret = csio_hw_write_flash(hw,
812 FW_IMG_START +
813 offsetof(struct fw_hdr, fw_ver),
814 sizeof(hdr->fw_ver),
815 (const uint8_t *)&hdr->fw_ver);
816
817out:
818 if (ret)
819 csio_err(hw, "firmware download failed, error %d\n", ret);
820 return ret;
821}
822
823static int
824csio_hw_get_flash_params(struct csio_hw *hw)
825{
826 int ret;
827 uint32_t info = 0;
828
829 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
830 if (!ret)
831 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
0d804338 832 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
a3667aae
NKI
833 if (ret != 0)
834 return ret;
835
836 if ((info & 0xff) != 0x20) /* not a Numonix flash */
837 return -EINVAL;
838 info >>= 16; /* log2 of size */
839 if (info >= 0x14 && info < 0x18)
840 hw->params.sf_nsec = 1 << (info - 16);
841 else if (info == 0x18)
842 hw->params.sf_nsec = 64;
843 else
844 return -EINVAL;
845 hw->params.sf_size = 1 << info;
846
847 return 0;
848}
849
a3667aae
NKI
850/*****************************************************************************/
851/* HW State machine assists */
852/*****************************************************************************/
853
854static int
855csio_hw_dev_ready(struct csio_hw *hw)
856{
857 uint32_t reg;
858 int cnt = 6;
859
0d804338
HS
860 while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
861 (--cnt != 0))
a3667aae
NKI
862 mdelay(100);
863
0d804338
HS
864 if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) ||
865 (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) {
a3667aae
NKI
866 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
867 return -EIO;
868 }
869
0d804338 870 hw->pfn = SOURCEPF_G(reg);
a3667aae
NKI
871
872 return 0;
873}
874
875/*
876 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
877 * @hw: HW module
878 * @state: Device state
879 *
880 * FW_HELLO_CMD has to be polled for completion.
881 */
882static int
883csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
884{
885 struct csio_mb *mbp;
886 int rv = 0;
a3667aae
NKI
887 enum fw_retval retval;
888 uint8_t mpfn;
889 char state_str[16];
890 int retries = FW_CMD_HELLO_RETRIES;
891
892 memset(state_str, 0, sizeof(state_str));
893
894 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
895 if (!mbp) {
896 rv = -ENOMEM;
897 CSIO_INC_STATS(hw, n_err_nomem);
898 goto out;
899 }
900
a3667aae
NKI
901retry:
902 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
666224d4 903 hw->pfn, CSIO_MASTER_MAY, NULL);
a3667aae
NKI
904
905 rv = csio_mb_issue(hw, mbp);
906 if (rv) {
907 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
908 goto out_free_mb;
909 }
910
911 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
912 if (retval != FW_SUCCESS) {
913 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
914 rv = -EINVAL;
915 goto out_free_mb;
916 }
917
918 /* Firmware has designated us to be master */
919 if (hw->pfn == mpfn) {
920 hw->flags |= CSIO_HWF_MASTER;
921 } else if (*state == CSIO_DEV_STATE_UNINIT) {
922 /*
923 * If we're not the Master PF then we need to wait around for
924 * the Master PF Driver to finish setting up the adapter.
925 *
926 * Note that we also do this wait if we're a non-Master-capable
927 * PF and there is no current Master PF; a Master PF may show up
928 * momentarily and we wouldn't want to fail pointlessly. (This
929 * can happen when an OS loads lots of different drivers rapidly
930 * at the same time). In this case, the Master PF returned by
931 * the firmware will be PCIE_FW_MASTER_MASK so the test below
932 * will work ...
933 */
934
935 int waiting = FW_CMD_HELLO_TIMEOUT;
936
937 /*
938 * Wait for the firmware to either indicate an error or
939 * initialized state. If we see either of these we bail out
940 * and report the issue to the caller. If we exhaust the
941 * "hello timeout" and we haven't exhausted our retries, try
942 * again. Otherwise bail with a timeout error.
943 */
944 for (;;) {
945 uint32_t pcie_fw;
946
7cc16380 947 spin_unlock_irq(&hw->lock);
a3667aae 948 msleep(50);
7cc16380 949 spin_lock_irq(&hw->lock);
a3667aae
NKI
950 waiting -= 50;
951
952 /*
953 * If neither Error nor Initialialized are indicated
954 * by the firmware keep waiting till we exaust our
955 * timeout ... and then retry if we haven't exhausted
956 * our retries ...
957 */
f061de42
HS
958 pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
959 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
a3667aae
NKI
960 if (waiting <= 0) {
961 if (retries-- > 0)
962 goto retry;
963
964 rv = -ETIMEDOUT;
965 break;
966 }
967 continue;
968 }
969
970 /*
971 * We either have an Error or Initialized condition
972 * report errors preferentially.
973 */
974 if (state) {
f061de42 975 if (pcie_fw & PCIE_FW_ERR_F) {
a3667aae
NKI
976 *state = CSIO_DEV_STATE_ERR;
977 rv = -ETIMEDOUT;
f061de42 978 } else if (pcie_fw & PCIE_FW_INIT_F)
a3667aae
NKI
979 *state = CSIO_DEV_STATE_INIT;
980 }
981
982 /*
983 * If we arrived before a Master PF was selected and
984 * there's not a valid Master PF, grab its identity
985 * for our caller.
986 */
f061de42
HS
987 if (mpfn == PCIE_FW_MASTER_M &&
988 (pcie_fw & PCIE_FW_MASTER_VLD_F))
989 mpfn = PCIE_FW_MASTER_G(pcie_fw);
a3667aae
NKI
990 break;
991 }
992 hw->flags &= ~CSIO_HWF_MASTER;
993 }
994
995 switch (*state) {
996 case CSIO_DEV_STATE_UNINIT:
997 strcpy(state_str, "Initializing");
998 break;
999 case CSIO_DEV_STATE_INIT:
1000 strcpy(state_str, "Initialized");
1001 break;
1002 case CSIO_DEV_STATE_ERR:
1003 strcpy(state_str, "Error");
1004 break;
1005 default:
1006 strcpy(state_str, "Unknown");
1007 break;
1008 }
1009
1010 if (hw->pfn == mpfn)
1011 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
1012 hw->pfn, state_str);
1013 else
1014 csio_info(hw,
1015 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
1016 hw->pfn, mpfn, state_str);
1017
1018out_free_mb:
1019 mempool_free(mbp, hw->mb_mempool);
1020out:
1021 return rv;
1022}
1023
1024/*
1025 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
1026 * @hw: HW module
1027 *
1028 */
1029static int
1030csio_do_bye(struct csio_hw *hw)
1031{
1032 struct csio_mb *mbp;
1033 enum fw_retval retval;
1034
1035 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1036 if (!mbp) {
1037 CSIO_INC_STATS(hw, n_err_nomem);
1038 return -ENOMEM;
1039 }
1040
1041 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1042
1043 if (csio_mb_issue(hw, mbp)) {
1044 csio_err(hw, "Issue of BYE command failed\n");
1045 mempool_free(mbp, hw->mb_mempool);
1046 return -EINVAL;
1047 }
1048
1049 retval = csio_mb_fw_retval(mbp);
1050 if (retval != FW_SUCCESS) {
1051 mempool_free(mbp, hw->mb_mempool);
1052 return -EINVAL;
1053 }
1054
1055 mempool_free(mbp, hw->mb_mempool);
1056
1057 return 0;
1058}
1059
1060/*
1061 * csio_do_reset- Perform the device reset.
1062 * @hw: HW module
1063 * @fw_rst: FW reset
1064 *
1065 * If fw_rst is set, issues FW reset mbox cmd otherwise
1066 * does PIO reset.
1067 * Performs reset of the function.
1068 */
1069static int
1070csio_do_reset(struct csio_hw *hw, bool fw_rst)
1071{
1072 struct csio_mb *mbp;
1073 enum fw_retval retval;
1074
1075 if (!fw_rst) {
1076 /* PIO reset */
0d804338 1077 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
a3667aae
NKI
1078 mdelay(2000);
1079 return 0;
1080 }
1081
1082 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1083 if (!mbp) {
1084 CSIO_INC_STATS(hw, n_err_nomem);
1085 return -ENOMEM;
1086 }
1087
1088 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
0d804338 1089 PIORSTMODE_F | PIORST_F, 0, NULL);
a3667aae
NKI
1090
1091 if (csio_mb_issue(hw, mbp)) {
1092 csio_err(hw, "Issue of RESET command failed.n");
1093 mempool_free(mbp, hw->mb_mempool);
1094 return -EINVAL;
1095 }
1096
1097 retval = csio_mb_fw_retval(mbp);
1098 if (retval != FW_SUCCESS) {
1099 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
1100 mempool_free(mbp, hw->mb_mempool);
1101 return -EINVAL;
1102 }
1103
1104 mempool_free(mbp, hw->mb_mempool);
1105
1106 return 0;
1107}
1108
1109static int
1110csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
1111{
1112 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
1113 uint16_t caps;
1114
1115 caps = ntohs(rsp->fcoecaps);
1116
1117 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
1118 csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
1119 return -EINVAL;
1120 }
1121
1122 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
1123 csio_err(hw, "No FCoE Control Offload capability\n");
1124 return -EINVAL;
1125 }
1126
1127 return 0;
1128}
1129
1130/*
1131 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1132 * @hw: the HW module
1133 * @mbox: mailbox to use for the FW RESET command (if desired)
1134 * @force: force uP into RESET even if FW RESET command fails
1135 *
1136 * Issues a RESET command to firmware (if desired) with a HALT indication
1137 * and then puts the microprocessor into RESET state. The RESET command
1138 * will only be issued if a legitimate mailbox is provided (mbox <=
1139 * PCIE_FW_MASTER_MASK).
1140 *
1141 * This is generally used in order for the host to safely manipulate the
1142 * adapter without fear of conflicting with whatever the firmware might
1143 * be doing. The only way out of this state is to RESTART the firmware
1144 * ...
1145 */
1146static int
1147csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
1148{
1149 enum fw_retval retval = 0;
1150
1151 /*
1152 * If a legitimate mailbox is provided, issue a RESET command
1153 * with a HALT indication.
1154 */
f061de42 1155 if (mbox <= PCIE_FW_MASTER_M) {
a3667aae
NKI
1156 struct csio_mb *mbp;
1157
1158 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1159 if (!mbp) {
1160 CSIO_INC_STATS(hw, n_err_nomem);
1161 return -ENOMEM;
1162 }
1163
1164 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
0d804338 1165 PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F,
a3667aae
NKI
1166 NULL);
1167
1168 if (csio_mb_issue(hw, mbp)) {
1169 csio_err(hw, "Issue of RESET command failed!\n");
1170 mempool_free(mbp, hw->mb_mempool);
1171 return -EINVAL;
1172 }
1173
1174 retval = csio_mb_fw_retval(mbp);
1175 mempool_free(mbp, hw->mb_mempool);
1176 }
1177
1178 /*
1179 * Normally we won't complete the operation if the firmware RESET
1180 * command fails but if our caller insists we'll go ahead and put the
1181 * uP into RESET. This can be useful if the firmware is hung or even
1182 * missing ... We'll have to take the risk of putting the uP into
1183 * RESET without the cooperation of firmware in that case.
1184 *
1185 * We also force the firmware's HALT flag to be on in case we bypassed
1186 * the firmware RESET command above or we're dealing with old firmware
1187 * which doesn't have the HALT capability. This will serve as a flag
1188 * for the incoming firmware to know that it's coming out of a HALT
1189 * rather than a RESET ... if it's new enough to understand that ...
1190 */
1191 if (retval == 0 || force) {
89c3a86c 1192 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
f061de42
HS
1193 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
1194 PCIE_FW_HALT_F);
a3667aae
NKI
1195 }
1196
1197 /*
1198 * And we always return the result of the firmware RESET command
1199 * even when we force the uP into RESET ...
1200 */
1201 return retval ? -EINVAL : 0;
1202}
1203
1204/*
1205 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1206 * @hw: the HW module
1207 * @reset: if we want to do a RESET to restart things
1208 *
1209 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1210 * return the previous PF Master remains as the new PF Master and there
1211 * is no need to issue a new HELLO command, etc.
1212 *
1213 * We do this in two ways:
1214 *
1215 * 1. If we're dealing with newer firmware we'll simply want to take
1216 * the chip's microprocessor out of RESET. This will cause the
1217 * firmware to start up from its start vector. And then we'll loop
1218 * until the firmware indicates it's started again (PCIE_FW.HALT
1219 * reset to 0) or we timeout.
1220 *
1221 * 2. If we're dealing with older firmware then we'll need to RESET
1222 * the chip since older firmware won't recognize the PCIE_FW.HALT
1223 * flag and automatically RESET itself on startup.
1224 */
1225static int
1226csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
1227{
1228 if (reset) {
1229 /*
1230 * Since we're directing the RESET instead of the firmware
1231 * doing it automatically, we need to clear the PCIE_FW.HALT
1232 * bit.
1233 */
f061de42 1234 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
a3667aae
NKI
1235
1236 /*
1237 * If we've been given a valid mailbox, first try to get the
1238 * firmware to do the RESET. If that works, great and we can
1239 * return success. Otherwise, if we haven't been given a
1240 * valid mailbox or the RESET command failed, fall back to
1241 * hitting the chip with a hammer.
1242 */
f061de42 1243 if (mbox <= PCIE_FW_MASTER_M) {
89c3a86c 1244 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
a3667aae
NKI
1245 msleep(100);
1246 if (csio_do_reset(hw, true) == 0)
1247 return 0;
1248 }
1249
0d804338 1250 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
a3667aae
NKI
1251 msleep(2000);
1252 } else {
1253 int ms;
1254
89c3a86c 1255 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
a3667aae 1256 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
f061de42 1257 if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
a3667aae
NKI
1258 return 0;
1259 msleep(100);
1260 ms += 100;
1261 }
1262 return -ETIMEDOUT;
1263 }
1264 return 0;
1265}
1266
1267/*
1268 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1269 * @hw: the HW module
1270 * @mbox: mailbox to use for the FW RESET command (if desired)
1271 * @fw_data: the firmware image to write
1272 * @size: image size
1273 * @force: force upgrade even if firmware doesn't cooperate
1274 *
1275 * Perform all of the steps necessary for upgrading an adapter's
1276 * firmware image. Normally this requires the cooperation of the
1277 * existing firmware in order to halt all existing activities
1278 * but if an invalid mailbox token is passed in we skip that step
1279 * (though we'll still put the adapter microprocessor into RESET in
1280 * that case).
1281 *
1282 * On successful return the new firmware will have been loaded and
1283 * the adapter will have been fully RESET losing all previous setup
1284 * state. On unsuccessful return the adapter may be completely hosed ...
1285 * positive errno indicates that the adapter is ~probably~ intact, a
1286 * negative errno indicates that things are looking bad ...
1287 */
1288static int
1289csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
1290 const u8 *fw_data, uint32_t size, int32_t force)
1291{
1292 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
1293 int reset, ret;
1294
1295 ret = csio_hw_fw_halt(hw, mbox, force);
1296 if (ret != 0 && !force)
1297 return ret;
1298
1299 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
1300 if (ret != 0)
1301 return ret;
1302
1303 /*
1304 * Older versions of the firmware don't understand the new
1305 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1306 * restart. So for newly loaded older firmware we'll have to do the
1307 * RESET for it so it starts up on a clean slate. We can tell if
1308 * the newly loaded firmware will handle this right by checking
1309 * its header flags to see if it advertises the capability.
1310 */
1311 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
1312 return csio_hw_fw_restart(hw, mbox, reset);
1313}
1314
1315
1316/*
1317 * csio_hw_fw_config_file - setup an adapter via a Configuration File
1318 * @hw: the HW module
1319 * @mbox: mailbox to use for the FW command
1320 * @mtype: the memory type where the Configuration File is located
1321 * @maddr: the memory address where the Configuration File is located
1322 * @finiver: return value for CF [fini] version
1323 * @finicsum: return value for CF [fini] checksum
1324 * @cfcsum: return value for CF computed checksum
1325 *
1326 * Issue a command to get the firmware to process the Configuration
1327 * File located at the specified mtype/maddress. If the Configuration
1328 * File is processed successfully and return value pointers are
1329 * provided, the Configuration File "[fini] section version and
1330 * checksum values will be returned along with the computed checksum.
1331 * It's up to the caller to decide how it wants to respond to the
1332 * checksums not matching but it recommended that a prominant warning
1333 * be emitted in order to help people rapidly identify changed or
1334 * corrupted Configuration Files.
1335 *
1336 * Also note that it's possible to modify things like "niccaps",
1337 * "toecaps",etc. between processing the Configuration File and telling
1338 * the firmware to use the new configuration. Callers which want to
1339 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
1340 * Configuration Files if they want to do this.
1341 */
1342static int
1343csio_hw_fw_config_file(struct csio_hw *hw,
1344 unsigned int mtype, unsigned int maddr,
1345 uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)
1346{
1347 struct csio_mb *mbp;
1348 struct fw_caps_config_cmd *caps_cmd;
1349 int rv = -EINVAL;
1350 enum fw_retval ret;
1351
1352 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1353 if (!mbp) {
1354 CSIO_INC_STATS(hw, n_err_nomem);
1355 return -ENOMEM;
1356 }
1357 /*
1358 * Tell the firmware to process the indicated Configuration File.
1359 * If there are no errors and the caller has provided return value
1360 * pointers for the [fini] section version, checksum and computed
1361 * checksum, pass those back to the caller.
1362 */
1363 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
1364 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
1365 caps_cmd->op_to_write =
e2ac9628
HS
1366 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
1367 FW_CMD_REQUEST_F |
1368 FW_CMD_READ_F);
a3667aae 1369 caps_cmd->cfvalid_to_len16 =
5167865a
HS
1370 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
1371 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
1372 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
a3667aae
NKI
1373 FW_LEN16(*caps_cmd));
1374
1375 if (csio_mb_issue(hw, mbp)) {
1376 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1377 goto out;
1378 }
1379
1380 ret = csio_mb_fw_retval(mbp);
1381 if (ret != FW_SUCCESS) {
1382 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
1383 goto out;
1384 }
1385
1386 if (finiver)
1387 *finiver = ntohl(caps_cmd->finiver);
1388 if (finicsum)
1389 *finicsum = ntohl(caps_cmd->finicsum);
1390 if (cfcsum)
1391 *cfcsum = ntohl(caps_cmd->cfcsum);
1392
1393 /* Validate device capabilities */
1394 if (csio_hw_validate_caps(hw, mbp)) {
1395 rv = -ENOENT;
1396 goto out;
1397 }
1398
1399 /*
1400 * And now tell the firmware to use the configuration we just loaded.
1401 */
1402 caps_cmd->op_to_write =
e2ac9628
HS
1403 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
1404 FW_CMD_REQUEST_F |
1405 FW_CMD_WRITE_F);
a3667aae
NKI
1406 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
1407
1408 if (csio_mb_issue(hw, mbp)) {
1409 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1410 goto out;
1411 }
1412
1413 ret = csio_mb_fw_retval(mbp);
1414 if (ret != FW_SUCCESS) {
1415 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
1416 goto out;
1417 }
1418
1419 rv = 0;
1420out:
1421 mempool_free(mbp, hw->mb_mempool);
1422 return rv;
1423}
1424
1425/*
1426 * csio_get_device_params - Get device parameters.
1427 * @hw: HW module
1428 *
1429 */
1430static int
1431csio_get_device_params(struct csio_hw *hw)
1432{
1433 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1434 struct csio_mb *mbp;
1435 enum fw_retval retval;
1436 u32 param[6];
1437 int i, j = 0;
1438
1439 /* Initialize portids to -1 */
1440 for (i = 0; i < CSIO_MAX_PPORTS; i++)
1441 hw->pport[i].portid = -1;
1442
1443 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1444 if (!mbp) {
1445 CSIO_INC_STATS(hw, n_err_nomem);
1446 return -ENOMEM;
1447 }
1448
1449 /* Get port vec information. */
1450 param[0] = FW_PARAM_DEV(PORTVEC);
1451
1452 /* Get Core clock. */
1453 param[1] = FW_PARAM_DEV(CCLK);
1454
1455 /* Get EQ id start and end. */
1456 param[2] = FW_PARAM_PFVF(EQ_START);
1457 param[3] = FW_PARAM_PFVF(EQ_END);
1458
1459 /* Get IQ id start and end. */
1460 param[4] = FW_PARAM_PFVF(IQFLINT_START);
1461 param[5] = FW_PARAM_PFVF(IQFLINT_END);
1462
1463 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1464 ARRAY_SIZE(param), param, NULL, false, NULL);
1465 if (csio_mb_issue(hw, mbp)) {
1466 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1467 mempool_free(mbp, hw->mb_mempool);
1468 return -EINVAL;
1469 }
1470
1471 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1472 ARRAY_SIZE(param), param);
1473 if (retval != FW_SUCCESS) {
1474 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1475 retval);
1476 mempool_free(mbp, hw->mb_mempool);
1477 return -EINVAL;
1478 }
1479
1480 /* cache the information. */
1481 hw->port_vec = param[0];
1482 hw->vpd.cclk = param[1];
1483 wrm->fw_eq_start = param[2];
1484 wrm->fw_iq_start = param[4];
1485
1486 /* Using FW configured max iqs & eqs */
1487 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
1488 !csio_is_hw_master(hw)) {
1489 hw->cfg_niq = param[5] - param[4] + 1;
1490 hw->cfg_neq = param[3] - param[2] + 1;
1491 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
1492 hw->cfg_niq, hw->cfg_neq);
1493 }
1494
1495 hw->port_vec &= csio_port_mask;
1496
1497 hw->num_pports = hweight32(hw->port_vec);
1498
1499 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
1500 hw->port_vec, hw->num_pports);
1501
1502 for (i = 0; i < hw->num_pports; i++) {
1503 while ((hw->port_vec & (1 << j)) == 0)
1504 j++;
1505 hw->pport[i].portid = j++;
1506 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
1507 }
1508 mempool_free(mbp, hw->mb_mempool);
1509
1510 return 0;
1511}
1512
1513
1514/*
1515 * csio_config_device_caps - Get and set device capabilities.
1516 * @hw: HW module
1517 *
1518 */
1519static int
1520csio_config_device_caps(struct csio_hw *hw)
1521{
1522 struct csio_mb *mbp;
1523 enum fw_retval retval;
1524 int rv = -EINVAL;
1525
1526 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1527 if (!mbp) {
1528 CSIO_INC_STATS(hw, n_err_nomem);
1529 return -ENOMEM;
1530 }
1531
1532 /* Get device capabilities */
1533 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
1534
1535 if (csio_mb_issue(hw, mbp)) {
1536 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1537 goto out;
1538 }
1539
1540 retval = csio_mb_fw_retval(mbp);
1541 if (retval != FW_SUCCESS) {
1542 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
1543 goto out;
1544 }
1545
1546 /* Validate device capabilities */
1547 if (csio_hw_validate_caps(hw, mbp))
1548 goto out;
1549
1550 /* Don't config device capabilities if already configured */
1551 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
1552 rv = 0;
1553 goto out;
1554 }
1555
1556 /* Write back desired device capabilities */
1557 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
1558 false, true, NULL);
1559
1560 if (csio_mb_issue(hw, mbp)) {
1561 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1562 goto out;
1563 }
1564
1565 retval = csio_mb_fw_retval(mbp);
1566 if (retval != FW_SUCCESS) {
1567 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
1568 goto out;
1569 }
1570
1571 rv = 0;
1572out:
1573 mempool_free(mbp, hw->mb_mempool);
1574 return rv;
1575}
1576
a3667aae
NKI
1577/*
1578 * csio_enable_ports - Bring up all available ports.
1579 * @hw: HW module.
1580 *
1581 */
1582static int
1583csio_enable_ports(struct csio_hw *hw)
1584{
1585 struct csio_mb *mbp;
1586 enum fw_retval retval;
1587 uint8_t portid;
1588 int i;
1589
1590 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1591 if (!mbp) {
1592 CSIO_INC_STATS(hw, n_err_nomem);
1593 return -ENOMEM;
1594 }
1595
1596 for (i = 0; i < hw->num_pports; i++) {
1597 portid = hw->pport[i].portid;
1598
1599 /* Read PORT information */
1600 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1601 false, 0, 0, NULL);
1602
1603 if (csio_mb_issue(hw, mbp)) {
1604 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
1605 portid);
1606 mempool_free(mbp, hw->mb_mempool);
1607 return -EINVAL;
1608 }
1609
1610 csio_mb_process_read_port_rsp(hw, mbp, &retval,
1611 &hw->pport[i].pcap);
1612 if (retval != FW_SUCCESS) {
1613 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1614 portid, retval);
1615 mempool_free(mbp, hw->mb_mempool);
1616 return -EINVAL;
1617 }
1618
1619 /* Write back PORT information */
1620 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,
1621 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);
1622
1623 if (csio_mb_issue(hw, mbp)) {
1624 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
1625 portid);
1626 mempool_free(mbp, hw->mb_mempool);
1627 return -EINVAL;
1628 }
1629
1630 retval = csio_mb_fw_retval(mbp);
1631 if (retval != FW_SUCCESS) {
1632 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1633 portid, retval);
1634 mempool_free(mbp, hw->mb_mempool);
1635 return -EINVAL;
1636 }
1637
1638 } /* For all ports */
1639
1640 mempool_free(mbp, hw->mb_mempool);
1641
1642 return 0;
1643}
1644
1645/*
1646 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1647 * @hw: HW module
1648 * Issued with lock held.
1649 */
1650static int
1651csio_get_fcoe_resinfo(struct csio_hw *hw)
1652{
1653 struct csio_fcoe_res_info *res_info = &hw->fres_info;
1654 struct fw_fcoe_res_info_cmd *rsp;
1655 struct csio_mb *mbp;
1656 enum fw_retval retval;
1657
1658 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1659 if (!mbp) {
1660 CSIO_INC_STATS(hw, n_err_nomem);
1661 return -ENOMEM;
1662 }
1663
1664 /* Get FCoE FW resource information */
1665 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1666
1667 if (csio_mb_issue(hw, mbp)) {
1668 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1669 mempool_free(mbp, hw->mb_mempool);
1670 return -EINVAL;
1671 }
1672
1673 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
e2ac9628 1674 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
a3667aae
NKI
1675 if (retval != FW_SUCCESS) {
1676 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1677 retval);
1678 mempool_free(mbp, hw->mb_mempool);
1679 return -EINVAL;
1680 }
1681
1682 res_info->e_d_tov = ntohs(rsp->e_d_tov);
1683 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
1684 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
1685 res_info->r_r_tov = ntohs(rsp->r_r_tov);
1686 res_info->max_xchgs = ntohl(rsp->max_xchgs);
1687 res_info->max_ssns = ntohl(rsp->max_ssns);
1688 res_info->used_xchgs = ntohl(rsp->used_xchgs);
1689 res_info->used_ssns = ntohl(rsp->used_ssns);
1690 res_info->max_fcfs = ntohl(rsp->max_fcfs);
1691 res_info->max_vnps = ntohl(rsp->max_vnps);
1692 res_info->used_fcfs = ntohl(rsp->used_fcfs);
1693 res_info->used_vnps = ntohl(rsp->used_vnps);
1694
1695 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
1696 res_info->max_xchgs);
1697 mempool_free(mbp, hw->mb_mempool);
1698
1699 return 0;
1700}
1701
1702static int
1703csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
1704{
1705 struct csio_mb *mbp;
1706 enum fw_retval retval;
1707 u32 _param[1];
1708
1709 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1710 if (!mbp) {
1711 CSIO_INC_STATS(hw, n_err_nomem);
1712 return -ENOMEM;
1713 }
1714
1715 /*
1716 * Find out whether we're dealing with a version of
1717 * the firmware which has configuration file support.
1718 */
5167865a
HS
1719 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1720 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
a3667aae
NKI
1721
1722 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1723 ARRAY_SIZE(_param), _param, NULL, false, NULL);
1724 if (csio_mb_issue(hw, mbp)) {
1725 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1726 mempool_free(mbp, hw->mb_mempool);
1727 return -EINVAL;
1728 }
1729
1730 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1731 ARRAY_SIZE(_param), _param);
1732 if (retval != FW_SUCCESS) {
1733 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1734 retval);
1735 mempool_free(mbp, hw->mb_mempool);
1736 return -EINVAL;
1737 }
1738
1739 mempool_free(mbp, hw->mb_mempool);
1740 *param = _param[0];
1741
1742 return 0;
1743}
1744
1745static int
1746csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
1747{
1748 int ret = 0;
1749 const struct firmware *cf;
1750 struct pci_dev *pci_dev = hw->pdev;
1751 struct device *dev = &pci_dev->dev;
a3667aae
NKI
1752 unsigned int mtype = 0, maddr = 0;
1753 uint32_t *cfg_data;
1754 int value_to_add = 0;
1755
7cc16380
AB
1756 if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
1757 csio_err(hw, "could not find config file %s, err: %d\n",
1758 CSIO_CF_FNAME(hw), ret);
a3667aae
NKI
1759 return -ENOENT;
1760 }
1761
1762 if (cf->size%4 != 0)
1763 value_to_add = 4 - (cf->size % 4);
1764
1765 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
02db3db5
JJ
1766 if (cfg_data == NULL) {
1767 ret = -ENOMEM;
1768 goto leave;
1769 }
a3667aae
NKI
1770
1771 memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
02db3db5
JJ
1772 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
1773 ret = -EINVAL;
1774 goto leave;
1775 }
a3667aae 1776
5167865a
HS
1777 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
1778 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
a3667aae
NKI
1779
1780 ret = csio_memory_write(hw, mtype, maddr,
1781 cf->size + value_to_add, cfg_data);
7cc16380
AB
1782
1783 if ((ret == 0) && (value_to_add != 0)) {
1784 union {
1785 u32 word;
1786 char buf[4];
1787 } last;
1788 size_t size = cf->size & ~0x3;
1789 int i;
1790
1791 last.word = cfg_data[size >> 2];
1792 for (i = value_to_add; i < 4; i++)
1793 last.buf[i] = 0;
1794 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
1795 }
a3667aae 1796 if (ret == 0) {
7cc16380
AB
1797 csio_info(hw, "config file upgraded to %s\n",
1798 CSIO_CF_FNAME(hw));
1799 snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
a3667aae
NKI
1800 }
1801
02db3db5 1802leave:
a3667aae
NKI
1803 kfree(cfg_data);
1804 release_firmware(cf);
a3667aae
NKI
1805 return ret;
1806}
1807
1808/*
1809 * HW initialization: contact FW, obtain config, perform basic init.
1810 *
1811 * If the firmware we're dealing with has Configuration File support, then
1812 * we use that to perform all configuration -- either using the configuration
1813 * file stored in flash on the adapter or using a filesystem-local file
1814 * if available.
1815 *
1816 * If we don't have configuration file support in the firmware, then we'll
1817 * have to set things up the old fashioned way with hard-coded register
1818 * writes and firmware commands ...
1819 */
1820
1821/*
1822 * Attempt to initialize the HW via a Firmware Configuration File.
1823 */
1824static int
1825csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1826{
1827 unsigned int mtype, maddr;
1828 int rv;
7cc16380 1829 uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
a3667aae
NKI
1830 int using_flash;
1831 char path[64];
1832
1833 /*
1834 * Reset device if necessary
1835 */
1836 if (reset) {
1837 rv = csio_do_reset(hw, true);
1838 if (rv != 0)
1839 goto bye;
1840 }
1841
1842 /*
1843 * If we have a configuration file in host ,
1844 * then use that. Otherwise, use the configuration file stored
1845 * in the HW flash ...
1846 */
1847 spin_unlock_irq(&hw->lock);
1848 rv = csio_hw_flash_config(hw, fw_cfg_param, path);
1849 spin_lock_irq(&hw->lock);
1850 if (rv != 0) {
1851 if (rv == -ENOENT) {
1852 /*
1853 * config file was not found. Use default
1854 * config file from flash.
1855 */
1856 mtype = FW_MEMTYPE_CF_FLASH;
7cc16380 1857 maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
a3667aae
NKI
1858 using_flash = 1;
1859 } else {
1860 /*
1861 * we revert back to the hardwired config if
1862 * flashing failed.
1863 */
1864 goto bye;
1865 }
1866 } else {
5167865a
HS
1867 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
1868 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
a3667aae
NKI
1869 using_flash = 0;
1870 }
1871
1872 hw->cfg_store = (uint8_t)mtype;
1873
1874 /*
1875 * Issue a Capability Configuration command to the firmware to get it
1876 * to parse the Configuration File.
1877 */
1878 rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,
1879 &finicsum, &cfcsum);
1880 if (rv != 0)
1881 goto bye;
1882
1883 hw->cfg_finiver = finiver;
1884 hw->cfg_finicsum = finicsum;
1885 hw->cfg_cfcsum = cfcsum;
1886 hw->cfg_csum_status = true;
1887
1888 if (finicsum != cfcsum) {
1889 csio_warn(hw,
1890 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
1891 finicsum, cfcsum);
1892
1893 hw->cfg_csum_status = false;
1894 }
1895
1896 /*
1897 * Note that we're operating with parameters
1898 * not supplied by the driver, rather than from hard-wired
1899 * initialization constants buried in the driver.
1900 */
1901 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
1902
1903 /* device parameters */
1904 rv = csio_get_device_params(hw);
1905 if (rv != 0)
1906 goto bye;
1907
1908 /* Configure SGE */
1909 csio_wr_sge_init(hw);
1910
1911 /*
1912 * And finally tell the firmware to initialize itself using the
1913 * parameters from the Configuration File.
1914 */
1915 /* Post event to notify completion of configuration */
1916 csio_post_event(&hw->sm, CSIO_HWE_INIT);
1917
1918 csio_info(hw,
1919 "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
1920 (using_flash ? "in device FLASH" : path), finiver, cfcsum);
1921
1922 return 0;
1923
1924 /*
1925 * Something bad happened. Return the error ...
1926 */
1927bye:
1928 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
1929 csio_dbg(hw, "Configuration file error %d\n", rv);
1930 return rv;
1931}
1932
1933/*
1934 * Attempt to initialize the adapter via hard-coded, driver supplied
1935 * parameters ...
1936 */
1937static int
1938csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
1939{
1940 int rv;
1941 /*
1942 * Reset device if necessary
1943 */
1944 if (reset) {
1945 rv = csio_do_reset(hw, true);
1946 if (rv != 0)
1947 goto out;
1948 }
1949
1950 /* Get and set device capabilities */
1951 rv = csio_config_device_caps(hw);
1952 if (rv != 0)
1953 goto out;
1954
a3667aae
NKI
1955 /* device parameters */
1956 rv = csio_get_device_params(hw);
1957 if (rv != 0)
1958 goto out;
1959
1960 /* Configure SGE */
1961 csio_wr_sge_init(hw);
1962
1963 /* Post event to notify completion of configuration */
1964 csio_post_event(&hw->sm, CSIO_HWE_INIT);
1965
1966out:
1967 return rv;
1968}
1969
1970/*
1971 * Returns -EINVAL if attempts to flash the firmware failed
1972 * else returns 0,
1973 * if flashing was not attempted because the card had the
1974 * latest firmware ECANCELED is returned
1975 */
1976static int
1977csio_hw_flash_fw(struct csio_hw *hw)
1978{
1979 int ret = -ECANCELED;
1980 const struct firmware *fw;
1981 const struct fw_hdr *hdr;
1982 u32 fw_ver;
1983 struct pci_dev *pci_dev = hw->pdev;
1984 struct device *dev = &pci_dev->dev ;
1985
7cc16380
AB
1986 if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
1987 csio_err(hw, "could not find firmware image %s, err: %d\n",
1988 CSIO_FW_FNAME(hw), ret);
a3667aae
NKI
1989 return -EINVAL;
1990 }
1991
1992 hdr = (const struct fw_hdr *)fw->data;
1993 fw_ver = ntohl(hdr->fw_ver);
b2e1a3f0 1994 if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw))
a3667aae
NKI
1995 return -EINVAL; /* wrong major version, won't do */
1996
1997 /*
1998 * If the flash FW is unusable or we found something newer, load it.
1999 */
b2e1a3f0 2000 if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
a3667aae
NKI
2001 fw_ver > hw->fwrev) {
2002 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
2003 /*force=*/false);
2004 if (!ret)
7cc16380
AB
2005 csio_info(hw,
2006 "firmware upgraded to version %pI4 from %s\n",
2007 &hdr->fw_ver, CSIO_FW_FNAME(hw));
a3667aae
NKI
2008 else
2009 csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
7cc16380
AB
2010 } else
2011 ret = -EINVAL;
a3667aae
NKI
2012
2013 release_firmware(fw);
2014
2015 return ret;
2016}
2017
2018
2019/*
2020 * csio_hw_configure - Configure HW
2021 * @hw - HW module
2022 *
2023 */
2024static void
2025csio_hw_configure(struct csio_hw *hw)
2026{
2027 int reset = 1;
2028 int rv;
2029 u32 param[1];
2030
2031 rv = csio_hw_dev_ready(hw);
2032 if (rv != 0) {
2033 CSIO_INC_STATS(hw, n_err_fatal);
2034 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2035 goto out;
2036 }
2037
2038 /* HW version */
0d804338 2039 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A);
a3667aae
NKI
2040
2041 /* Needed for FW download */
2042 rv = csio_hw_get_flash_params(hw);
2043 if (rv != 0) {
2044 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
2045 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2046 goto out;
2047 }
2048
ad4d35f8
YW
2049 /* Set PCIe completion timeout to 4 seconds */
2050 if (pci_is_pcie(hw->pdev))
2051 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
2052 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
a3667aae 2053
7cc16380 2054 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
a3667aae
NKI
2055
2056 rv = csio_hw_get_fw_version(hw, &hw->fwrev);
2057 if (rv != 0)
2058 goto out;
2059
2060 csio_hw_print_fw_version(hw, "Firmware revision");
2061
2062 rv = csio_do_hello(hw, &hw->fw_state);
2063 if (rv != 0) {
2064 CSIO_INC_STATS(hw, n_err_fatal);
2065 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2066 goto out;
2067 }
2068
2069 /* Read vpd */
2070 rv = csio_hw_get_vpd_params(hw, &hw->vpd);
2071 if (rv != 0)
2072 goto out;
2073
2074 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2075 rv = csio_hw_check_fw_version(hw);
2076 if (rv == -EINVAL) {
2077
2078 /* Do firmware update */
2079 spin_unlock_irq(&hw->lock);
2080 rv = csio_hw_flash_fw(hw);
2081 spin_lock_irq(&hw->lock);
2082
2083 if (rv == 0) {
2084 reset = 0;
2085 /*
2086 * Note that the chip was reset as part of the
2087 * firmware upgrade so we don't reset it again
2088 * below and grab the new firmware version.
2089 */
2090 rv = csio_hw_check_fw_version(hw);
2091 }
2092 }
2093 /*
2094 * If the firmware doesn't support Configuration
2095 * Files, use the old Driver-based, hard-wired
2096 * initialization. Otherwise, try using the
2097 * Configuration File support and fall back to the
2098 * Driver-based initialization if there's no
2099 * Configuration File found.
2100 */
2101 if (csio_hw_check_fwconfig(hw, param) == 0) {
2102 rv = csio_hw_use_fwconfig(hw, reset, param);
2103 if (rv == -ENOENT)
2104 goto out;
2105 if (rv != 0) {
2106 csio_info(hw,
2107 "No Configuration File present "
2108 "on adapter. Using hard-wired "
2109 "configuration parameters.\n");
2110 rv = csio_hw_no_fwconfig(hw, reset);
2111 }
2112 } else {
2113 rv = csio_hw_no_fwconfig(hw, reset);
2114 }
2115
2116 if (rv != 0)
2117 goto out;
2118
2119 } else {
2120 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
2121
7cc16380
AB
2122 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2123
a3667aae
NKI
2124 /* device parameters */
2125 rv = csio_get_device_params(hw);
2126 if (rv != 0)
2127 goto out;
2128
2129 /* Get device capabilities */
2130 rv = csio_config_device_caps(hw);
2131 if (rv != 0)
2132 goto out;
2133
2134 /* Configure SGE */
2135 csio_wr_sge_init(hw);
2136
2137 /* Post event to notify completion of configuration */
2138 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2139 goto out;
2140 }
2141 } /* if not master */
2142
2143out:
2144 return;
2145}
2146
2147/*
2148 * csio_hw_initialize - Initialize HW
2149 * @hw - HW module
2150 *
2151 */
2152static void
2153csio_hw_initialize(struct csio_hw *hw)
2154{
2155 struct csio_mb *mbp;
2156 enum fw_retval retval;
2157 int rv;
2158 int i;
2159
2160 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2161 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2162 if (!mbp)
2163 goto out;
2164
2165 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
2166
2167 if (csio_mb_issue(hw, mbp)) {
2168 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
2169 goto free_and_out;
2170 }
2171
2172 retval = csio_mb_fw_retval(mbp);
2173 if (retval != FW_SUCCESS) {
2174 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
2175 retval);
2176 goto free_and_out;
2177 }
2178
2179 mempool_free(mbp, hw->mb_mempool);
2180 }
2181
2182 rv = csio_get_fcoe_resinfo(hw);
2183 if (rv != 0) {
2184 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
2185 goto out;
2186 }
2187
2188 spin_unlock_irq(&hw->lock);
2189 rv = csio_config_queues(hw);
2190 spin_lock_irq(&hw->lock);
2191
2192 if (rv != 0) {
2193 csio_err(hw, "Config of queues failed!: %d\n", rv);
2194 goto out;
2195 }
2196
2197 for (i = 0; i < hw->num_pports; i++)
2198 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
2199
2200 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2201 rv = csio_enable_ports(hw);
2202 if (rv != 0) {
2203 csio_err(hw, "Failed to enable ports: %d\n", rv);
2204 goto out;
2205 }
2206 }
2207
2208 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
2209 return;
2210
2211free_and_out:
2212 mempool_free(mbp, hw->mb_mempool);
2213out:
2214 return;
2215}
2216
0d804338 2217#define PF_INTR_MASK (PFSW_F | PFCIM_F)
a3667aae
NKI
2218
2219/*
2220 * csio_hw_intr_enable - Enable HW interrupts
2221 * @hw: Pointer to HW module.
2222 *
2223 * Enable interrupts in HW registers.
2224 */
2225static void
2226csio_hw_intr_enable(struct csio_hw *hw)
2227{
2228 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
0d804338
HS
2229 uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2230 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
a3667aae
NKI
2231
2232 /*
2233 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2234 * by FW, so do nothing for INTX.
2235 */
2236 if (hw->intr_mode == CSIO_IM_MSIX)
f061de42
HS
2237 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2238 AIVEC_V(AIVEC_M), vec);
a3667aae 2239 else if (hw->intr_mode == CSIO_IM_MSI)
f061de42
HS
2240 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2241 AIVEC_V(AIVEC_M), 0);
a3667aae 2242
0d804338 2243 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A));
a3667aae
NKI
2244
2245 /* Turn on MB interrupts - this will internally flush PIO as well */
2246 csio_mb_intr_enable(hw);
2247
2248 /* These are common registers - only a master can modify them */
2249 if (csio_is_hw_master(hw)) {
2250 /*
2251 * Disable the Serial FLASH interrupt, if enabled!
2252 */
0d804338
HS
2253 pl &= (~SF_F);
2254 csio_wr_reg32(hw, pl, PL_INT_ENABLE_A);
a3667aae 2255
f612b815
HS
2256 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
2257 EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
2258 ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
2259 ERR_DATA_CPL_ON_HIGH_QID1_F |
2260 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
2261 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
2262 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
2263 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
2264 SGE_INT_ENABLE3_A);
0d804338 2265 csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf);
a3667aae
NKI
2266 }
2267
2268 hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
2269
2270}
2271
2272/*
2273 * csio_hw_intr_disable - Disable HW interrupts
2274 * @hw: Pointer to HW module.
2275 *
2276 * Turn off Mailbox and PCI_PF_CFG interrupts.
2277 */
2278void
2279csio_hw_intr_disable(struct csio_hw *hw)
2280{
0d804338 2281 uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
a3667aae
NKI
2282
2283 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
2284 return;
2285
2286 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
2287
0d804338 2288 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A));
a3667aae 2289 if (csio_is_hw_master(hw))
0d804338 2290 csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0);
a3667aae
NKI
2291
2292 /* Turn off MB interrupts */
2293 csio_mb_intr_disable(hw);
2294
2295}
2296
7cc16380 2297void
a3667aae
NKI
2298csio_hw_fatal_err(struct csio_hw *hw)
2299{
f612b815 2300 csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
a3667aae
NKI
2301 csio_hw_intr_disable(hw);
2302
2303 /* Do not reset HW, we may need FW state for debugging */
2304 csio_fatal(hw, "HW Fatal error encountered!\n");
2305}
2306
2307/*****************************************************************************/
2308/* START: HW SM */
2309/*****************************************************************************/
2310/*
2311 * csio_hws_uninit - Uninit state
2312 * @hw - HW module
2313 * @evt - Event
2314 *
2315 */
2316static void
2317csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
2318{
2319 hw->prev_evt = hw->cur_evt;
2320 hw->cur_evt = evt;
2321 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2322
2323 switch (evt) {
2324 case CSIO_HWE_CFG:
2325 csio_set_state(&hw->sm, csio_hws_configuring);
2326 csio_hw_configure(hw);
2327 break;
2328
2329 default:
2330 CSIO_INC_STATS(hw, n_evt_unexp);
2331 break;
2332 }
2333}
2334
2335/*
2336 * csio_hws_configuring - Configuring state
2337 * @hw - HW module
2338 * @evt - Event
2339 *
2340 */
2341static void
2342csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
2343{
2344 hw->prev_evt = hw->cur_evt;
2345 hw->cur_evt = evt;
2346 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2347
2348 switch (evt) {
2349 case CSIO_HWE_INIT:
2350 csio_set_state(&hw->sm, csio_hws_initializing);
2351 csio_hw_initialize(hw);
2352 break;
2353
2354 case CSIO_HWE_INIT_DONE:
2355 csio_set_state(&hw->sm, csio_hws_ready);
2356 /* Fan out event to all lnode SMs */
2357 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2358 break;
2359
2360 case CSIO_HWE_FATAL:
2361 csio_set_state(&hw->sm, csio_hws_uninit);
2362 break;
2363
2364 case CSIO_HWE_PCI_REMOVE:
2365 csio_do_bye(hw);
2366 break;
2367 default:
2368 CSIO_INC_STATS(hw, n_evt_unexp);
2369 break;
2370 }
2371}
2372
2373/*
2374 * csio_hws_initializing - Initialiazing state
2375 * @hw - HW module
2376 * @evt - Event
2377 *
2378 */
2379static void
2380csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
2381{
2382 hw->prev_evt = hw->cur_evt;
2383 hw->cur_evt = evt;
2384 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2385
2386 switch (evt) {
2387 case CSIO_HWE_INIT_DONE:
2388 csio_set_state(&hw->sm, csio_hws_ready);
2389
2390 /* Fan out event to all lnode SMs */
2391 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2392
2393 /* Enable interrupts */
2394 csio_hw_intr_enable(hw);
2395 break;
2396
2397 case CSIO_HWE_FATAL:
2398 csio_set_state(&hw->sm, csio_hws_uninit);
2399 break;
2400
2401 case CSIO_HWE_PCI_REMOVE:
2402 csio_do_bye(hw);
2403 break;
2404
2405 default:
2406 CSIO_INC_STATS(hw, n_evt_unexp);
2407 break;
2408 }
2409}
2410
2411/*
2412 * csio_hws_ready - Ready state
2413 * @hw - HW module
2414 * @evt - Event
2415 *
2416 */
2417static void
2418csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
2419{
2420 /* Remember the event */
2421 hw->evtflag = evt;
2422
2423 hw->prev_evt = hw->cur_evt;
2424 hw->cur_evt = evt;
2425 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2426
2427 switch (evt) {
2428 case CSIO_HWE_HBA_RESET:
2429 case CSIO_HWE_FW_DLOAD:
2430 case CSIO_HWE_SUSPEND:
2431 case CSIO_HWE_PCI_REMOVE:
2432 case CSIO_HWE_PCIERR_DETECTED:
2433 csio_set_state(&hw->sm, csio_hws_quiescing);
2434 /* cleanup all outstanding cmds */
2435 if (evt == CSIO_HWE_HBA_RESET ||
2436 evt == CSIO_HWE_PCIERR_DETECTED)
2437 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
2438 else
2439 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
2440
2441 csio_hw_intr_disable(hw);
2442 csio_hw_mbm_cleanup(hw);
2443 csio_evtq_stop(hw);
2444 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
2445 csio_evtq_flush(hw);
2446 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
2447 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
2448 break;
2449
2450 case CSIO_HWE_FATAL:
2451 csio_set_state(&hw->sm, csio_hws_uninit);
2452 break;
2453
2454 default:
2455 CSIO_INC_STATS(hw, n_evt_unexp);
2456 break;
2457 }
2458}
2459
2460/*
2461 * csio_hws_quiescing - Quiescing state
2462 * @hw - HW module
2463 * @evt - Event
2464 *
2465 */
2466static void
2467csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
2468{
2469 hw->prev_evt = hw->cur_evt;
2470 hw->cur_evt = evt;
2471 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2472
2473 switch (evt) {
2474 case CSIO_HWE_QUIESCED:
2475 switch (hw->evtflag) {
2476 case CSIO_HWE_FW_DLOAD:
2477 csio_set_state(&hw->sm, csio_hws_resetting);
2478 /* Download firmware */
2479 /* Fall through */
2480
2481 case CSIO_HWE_HBA_RESET:
2482 csio_set_state(&hw->sm, csio_hws_resetting);
2483 /* Start reset of the HBA */
2484 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
2485 csio_wr_destroy_queues(hw, false);
2486 csio_do_reset(hw, false);
2487 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
2488 break;
2489
2490 case CSIO_HWE_PCI_REMOVE:
2491 csio_set_state(&hw->sm, csio_hws_removing);
2492 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
2493 csio_wr_destroy_queues(hw, true);
2494 /* Now send the bye command */
2495 csio_do_bye(hw);
2496 break;
2497
2498 case CSIO_HWE_SUSPEND:
2499 csio_set_state(&hw->sm, csio_hws_quiesced);
2500 break;
2501
2502 case CSIO_HWE_PCIERR_DETECTED:
2503 csio_set_state(&hw->sm, csio_hws_pcierr);
2504 csio_wr_destroy_queues(hw, false);
2505 break;
2506
2507 default:
2508 CSIO_INC_STATS(hw, n_evt_unexp);
2509 break;
2510
2511 }
2512 break;
2513
2514 default:
2515 CSIO_INC_STATS(hw, n_evt_unexp);
2516 break;
2517 }
2518}
2519
2520/*
2521 * csio_hws_quiesced - Quiesced state
2522 * @hw - HW module
2523 * @evt - Event
2524 *
2525 */
2526static void
2527csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
2528{
2529 hw->prev_evt = hw->cur_evt;
2530 hw->cur_evt = evt;
2531 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2532
2533 switch (evt) {
2534 case CSIO_HWE_RESUME:
2535 csio_set_state(&hw->sm, csio_hws_configuring);
2536 csio_hw_configure(hw);
2537 break;
2538
2539 default:
2540 CSIO_INC_STATS(hw, n_evt_unexp);
2541 break;
2542 }
2543}
2544
2545/*
2546 * csio_hws_resetting - HW Resetting state
2547 * @hw - HW module
2548 * @evt - Event
2549 *
2550 */
2551static void
2552csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
2553{
2554 hw->prev_evt = hw->cur_evt;
2555 hw->cur_evt = evt;
2556 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2557
2558 switch (evt) {
2559 case CSIO_HWE_HBA_RESET_DONE:
2560 csio_evtq_start(hw);
2561 csio_set_state(&hw->sm, csio_hws_configuring);
2562 csio_hw_configure(hw);
2563 break;
2564
2565 default:
2566 CSIO_INC_STATS(hw, n_evt_unexp);
2567 break;
2568 }
2569}
2570
2571/*
2572 * csio_hws_removing - PCI Hotplug removing state
2573 * @hw - HW module
2574 * @evt - Event
2575 *
2576 */
2577static void
2578csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
2579{
2580 hw->prev_evt = hw->cur_evt;
2581 hw->cur_evt = evt;
2582 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2583
2584 switch (evt) {
2585 case CSIO_HWE_HBA_RESET:
2586 if (!csio_is_hw_master(hw))
2587 break;
2588 /*
2589 * The BYE should have alerady been issued, so we cant
2590 * use the mailbox interface. Hence we use the PL_RST
2591 * register directly.
2592 */
2593 csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
0d804338 2594 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
a3667aae
NKI
2595 mdelay(2000);
2596 break;
2597
2598 /* Should never receive any new events */
2599 default:
2600 CSIO_INC_STATS(hw, n_evt_unexp);
2601 break;
2602
2603 }
2604}
2605
2606/*
2607 * csio_hws_pcierr - PCI Error state
2608 * @hw - HW module
2609 * @evt - Event
2610 *
2611 */
2612static void
2613csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
2614{
2615 hw->prev_evt = hw->cur_evt;
2616 hw->cur_evt = evt;
2617 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2618
2619 switch (evt) {
2620 case CSIO_HWE_PCIERR_SLOT_RESET:
2621 csio_evtq_start(hw);
2622 csio_set_state(&hw->sm, csio_hws_configuring);
2623 csio_hw_configure(hw);
2624 break;
2625
2626 default:
2627 CSIO_INC_STATS(hw, n_evt_unexp);
2628 break;
2629 }
2630}
2631
2632/*****************************************************************************/
2633/* END: HW SM */
2634/*****************************************************************************/
2635
a3667aae
NKI
2636/*
2637 * csio_handle_intr_status - table driven interrupt handler
2638 * @hw: HW instance
2639 * @reg: the interrupt status register to process
2640 * @acts: table of interrupt actions
2641 *
2642 * A table driven interrupt handler that applies a set of masks to an
2643 * interrupt status word and performs the corresponding actions if the
2644 * interrupts described by the mask have occured. The actions include
2645 * optionally emitting a warning or alert message. The table is terminated
2646 * by an entry specifying mask 0. Returns the number of fatal interrupt
2647 * conditions.
2648 */
7cc16380 2649int
a3667aae
NKI
2650csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
2651 const struct intr_info *acts)
2652{
2653 int fatal = 0;
2654 unsigned int mask = 0;
2655 unsigned int status = csio_rd_reg32(hw, reg);
2656
2657 for ( ; acts->mask; ++acts) {
2658 if (!(status & acts->mask))
2659 continue;
2660 if (acts->fatal) {
2661 fatal++;
2662 csio_fatal(hw, "Fatal %s (0x%x)\n",
2663 acts->msg, status & acts->mask);
2664 } else if (acts->msg)
2665 csio_info(hw, "%s (0x%x)\n",
2666 acts->msg, status & acts->mask);
2667 mask |= acts->mask;
2668 }
2669 status &= mask;
2670 if (status) /* clear processed interrupts */
2671 csio_wr_reg32(hw, status, reg);
2672 return fatal;
2673}
2674
a3667aae
NKI
2675/*
2676 * TP interrupt handler.
2677 */
2678static void csio_tp_intr_handler(struct csio_hw *hw)
2679{
2680 static struct intr_info tp_intr_info[] = {
2681 { 0x3fffffff, "TP parity error", -1, 1 },
837e4a42 2682 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
a3667aae
NKI
2683 { 0, NULL, 0, 0 }
2684 };
2685
837e4a42 2686 if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
a3667aae
NKI
2687 csio_hw_fatal_err(hw);
2688}
2689
2690/*
2691 * SGE interrupt handler.
2692 */
2693static void csio_sge_intr_handler(struct csio_hw *hw)
2694{
2695 uint64_t v;
2696
2697 static struct intr_info sge_intr_info[] = {
f612b815 2698 { ERR_CPL_EXCEED_IQE_SIZE_F,
a3667aae 2699 "SGE received CPL exceeding IQE size", -1, 1 },
f612b815 2700 { ERR_INVALID_CIDX_INC_F,
a3667aae 2701 "SGE GTS CIDX increment too large", -1, 0 },
f612b815
HS
2702 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
2703 { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
2704 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
a3667aae 2705 "SGE IQID > 1023 received CPL for FL", -1, 0 },
f612b815 2706 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
a3667aae 2707 0 },
f612b815 2708 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
a3667aae 2709 0 },
f612b815 2710 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
a3667aae 2711 0 },
f612b815 2712 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
a3667aae 2713 0 },
f612b815 2714 { ERR_ING_CTXT_PRIO_F,
a3667aae 2715 "SGE too many priority ingress contexts", -1, 0 },
f612b815 2716 { ERR_EGR_CTXT_PRIO_F,
a3667aae 2717 "SGE too many priority egress contexts", -1, 0 },
f612b815
HS
2718 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
2719 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
a3667aae
NKI
2720 { 0, NULL, 0, 0 }
2721 };
2722
f612b815
HS
2723 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
2724 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
a3667aae
NKI
2725 if (v) {
2726 csio_fatal(hw, "SGE parity error (%#llx)\n",
2727 (unsigned long long)v);
2728 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
f612b815
HS
2729 SGE_INT_CAUSE1_A);
2730 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
a3667aae
NKI
2731 }
2732
f612b815 2733 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
a3667aae 2734
f612b815 2735 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
a3667aae
NKI
2736 v != 0)
2737 csio_hw_fatal_err(hw);
2738}
2739
89c3a86c
HS
2740#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2741 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2742#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2743 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
a3667aae
NKI
2744
2745/*
2746 * CIM interrupt handler.
2747 */
2748static void csio_cim_intr_handler(struct csio_hw *hw)
2749{
2750 static struct intr_info cim_intr_info[] = {
89c3a86c 2751 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
a3667aae
NKI
2752 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2753 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
89c3a86c
HS
2754 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
2755 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
2756 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
2757 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
a3667aae
NKI
2758 { 0, NULL, 0, 0 }
2759 };
2760 static struct intr_info cim_upintr_info[] = {
89c3a86c
HS
2761 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
2762 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
2763 { ILLWRINT_F, "CIM illegal write", -1, 1 },
2764 { ILLRDINT_F, "CIM illegal read", -1, 1 },
2765 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
2766 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
2767 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
2768 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
2769 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
2770 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
2771 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
2772 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
2773 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
2774 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
2775 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
2776 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
2777 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
2778 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
2779 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
2780 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
2781 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
2782 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
2783 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
2784 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
2785 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
2786 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
2787 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
2788 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
a3667aae
NKI
2789 { 0, NULL, 0, 0 }
2790 };
2791
2792 int fat;
2793
89c3a86c
HS
2794 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
2795 cim_intr_info) +
2796 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
2797 cim_upintr_info);
a3667aae
NKI
2798 if (fat)
2799 csio_hw_fatal_err(hw);
2800}
2801
2802/*
2803 * ULP RX interrupt handler.
2804 */
2805static void csio_ulprx_intr_handler(struct csio_hw *hw)
2806{
2807 static struct intr_info ulprx_intr_info[] = {
2808 { 0x1800000, "ULPRX context error", -1, 1 },
2809 { 0x7fffff, "ULPRX parity error", -1, 1 },
2810 { 0, NULL, 0, 0 }
2811 };
2812
0d804338 2813 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
a3667aae
NKI
2814 csio_hw_fatal_err(hw);
2815}
2816
2817/*
2818 * ULP TX interrupt handler.
2819 */
2820static void csio_ulptx_intr_handler(struct csio_hw *hw)
2821{
2822 static struct intr_info ulptx_intr_info[] = {
837e4a42 2823 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
a3667aae 2824 0 },
837e4a42 2825 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
a3667aae 2826 0 },
837e4a42 2827 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
a3667aae 2828 0 },
837e4a42 2829 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
a3667aae
NKI
2830 0 },
2831 { 0xfffffff, "ULPTX parity error", -1, 1 },
2832 { 0, NULL, 0, 0 }
2833 };
2834
837e4a42 2835 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
a3667aae
NKI
2836 csio_hw_fatal_err(hw);
2837}
2838
2839/*
2840 * PM TX interrupt handler.
2841 */
2842static void csio_pmtx_intr_handler(struct csio_hw *hw)
2843{
2844 static struct intr_info pmtx_intr_info[] = {
837e4a42
HS
2845 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
2846 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
2847 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
2848 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
a3667aae 2849 { 0xffffff0, "PMTX framing error", -1, 1 },
837e4a42
HS
2850 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
2851 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1,
a3667aae 2852 1 },
837e4a42
HS
2853 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
2854 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
a3667aae
NKI
2855 { 0, NULL, 0, 0 }
2856 };
2857
837e4a42 2858 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
a3667aae
NKI
2859 csio_hw_fatal_err(hw);
2860}
2861
2862/*
2863 * PM RX interrupt handler.
2864 */
2865static void csio_pmrx_intr_handler(struct csio_hw *hw)
2866{
2867 static struct intr_info pmrx_intr_info[] = {
837e4a42 2868 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
a3667aae 2869 { 0x3ffff0, "PMRX framing error", -1, 1 },
837e4a42
HS
2870 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
2871 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1,
a3667aae 2872 1 },
837e4a42
HS
2873 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
2874 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
a3667aae
NKI
2875 { 0, NULL, 0, 0 }
2876 };
2877
837e4a42 2878 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
a3667aae
NKI
2879 csio_hw_fatal_err(hw);
2880}
2881
2882/*
2883 * CPL switch interrupt handler.
2884 */
2885static void csio_cplsw_intr_handler(struct csio_hw *hw)
2886{
2887 static struct intr_info cplsw_intr_info[] = {
0d804338
HS
2888 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
2889 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
2890 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
2891 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
2892 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
2893 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
a3667aae
NKI
2894 { 0, NULL, 0, 0 }
2895 };
2896
0d804338 2897 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info))
a3667aae
NKI
2898 csio_hw_fatal_err(hw);
2899}
2900
2901/*
2902 * LE interrupt handler.
2903 */
2904static void csio_le_intr_handler(struct csio_hw *hw)
2905{
2906 static struct intr_info le_intr_info[] = {
0d804338
HS
2907 { LIPMISS_F, "LE LIP miss", -1, 0 },
2908 { LIP0_F, "LE 0 LIP error", -1, 0 },
2909 { PARITYERR_F, "LE parity error", -1, 1 },
2910 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
2911 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
a3667aae
NKI
2912 { 0, NULL, 0, 0 }
2913 };
2914
0d804338 2915 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info))
a3667aae
NKI
2916 csio_hw_fatal_err(hw);
2917}
2918
2919/*
2920 * MPS interrupt handler.
2921 */
2922static void csio_mps_intr_handler(struct csio_hw *hw)
2923{
2924 static struct intr_info mps_rx_intr_info[] = {
2925 { 0xffffff, "MPS Rx parity error", -1, 1 },
2926 { 0, NULL, 0, 0 }
2927 };
2928 static struct intr_info mps_tx_intr_info[] = {
837e4a42
HS
2929 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
2930 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2931 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
2932 -1, 1 },
2933 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
2934 -1, 1 },
2935 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
2936 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
2937 { FRMERR_F, "MPS Tx framing error", -1, 1 },
a3667aae
NKI
2938 { 0, NULL, 0, 0 }
2939 };
2940 static struct intr_info mps_trc_intr_info[] = {
837e4a42
HS
2941 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
2942 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
2943 -1, 1 },
2944 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
a3667aae
NKI
2945 { 0, NULL, 0, 0 }
2946 };
2947 static struct intr_info mps_stat_sram_intr_info[] = {
2948 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2949 { 0, NULL, 0, 0 }
2950 };
2951 static struct intr_info mps_stat_tx_intr_info[] = {
2952 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2953 { 0, NULL, 0, 0 }
2954 };
2955 static struct intr_info mps_stat_rx_intr_info[] = {
2956 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2957 { 0, NULL, 0, 0 }
2958 };
2959 static struct intr_info mps_cls_intr_info[] = {
837e4a42
HS
2960 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
2961 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
2962 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
a3667aae
NKI
2963 { 0, NULL, 0, 0 }
2964 };
2965
2966 int fat;
2967
837e4a42
HS
2968 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
2969 mps_rx_intr_info) +
2970 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
2971 mps_tx_intr_info) +
2972 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
2973 mps_trc_intr_info) +
2974 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
2975 mps_stat_sram_intr_info) +
2976 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
2977 mps_stat_tx_intr_info) +
2978 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
2979 mps_stat_rx_intr_info) +
2980 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
2981 mps_cls_intr_info);
2982
2983 csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
2984 csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */
a3667aae
NKI
2985 if (fat)
2986 csio_hw_fatal_err(hw);
2987}
2988
89c3a86c
HS
2989#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
2990 ECC_UE_INT_CAUSE_F)
a3667aae
NKI
2991
2992/*
2993 * EDC/MC interrupt handler.
2994 */
2995static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
2996{
2997 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2998
2999 unsigned int addr, cnt_addr, v;
3000
3001 if (idx <= MEM_EDC1) {
89c3a86c
HS
3002 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
3003 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
a3667aae 3004 } else {
89c3a86c
HS
3005 addr = MC_INT_CAUSE_A;
3006 cnt_addr = MC_ECC_STATUS_A;
a3667aae
NKI
3007 }
3008
3009 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
89c3a86c 3010 if (v & PERR_INT_CAUSE_F)
a3667aae 3011 csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
89c3a86c
HS
3012 if (v & ECC_CE_INT_CAUSE_F) {
3013 uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
a3667aae 3014
89c3a86c 3015 csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
a3667aae
NKI
3016 csio_warn(hw, "%u %s correctable ECC data error%s\n",
3017 cnt, name[idx], cnt > 1 ? "s" : "");
3018 }
89c3a86c 3019 if (v & ECC_UE_INT_CAUSE_F)
a3667aae
NKI
3020 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
3021
3022 csio_wr_reg32(hw, v, addr);
89c3a86c 3023 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
a3667aae
NKI
3024 csio_hw_fatal_err(hw);
3025}
3026
3027/*
3028 * MA interrupt handler.
3029 */
3030static void csio_ma_intr_handler(struct csio_hw *hw)
3031{
89c3a86c 3032 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
a3667aae 3033
89c3a86c 3034 if (status & MEM_PERR_INT_CAUSE_F)
a3667aae 3035 csio_fatal(hw, "MA parity error, parity status %#x\n",
89c3a86c
HS
3036 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
3037 if (status & MEM_WRAP_INT_CAUSE_F) {
3038 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
a3667aae
NKI
3039 csio_fatal(hw,
3040 "MA address wrap-around error by client %u to address %#x\n",
89c3a86c 3041 MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
a3667aae 3042 }
89c3a86c 3043 csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
a3667aae
NKI
3044 csio_hw_fatal_err(hw);
3045}
3046
3047/*
3048 * SMB interrupt handler.
3049 */
3050static void csio_smb_intr_handler(struct csio_hw *hw)
3051{
3052 static struct intr_info smb_intr_info[] = {
0d804338
HS
3053 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
3054 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
3055 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
a3667aae
NKI
3056 { 0, NULL, 0, 0 }
3057 };
3058
0d804338 3059 if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info))
a3667aae
NKI
3060 csio_hw_fatal_err(hw);
3061}
3062
3063/*
3064 * NC-SI interrupt handler.
3065 */
3066static void csio_ncsi_intr_handler(struct csio_hw *hw)
3067{
3068 static struct intr_info ncsi_intr_info[] = {
0d804338
HS
3069 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
3070 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
3071 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
3072 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
a3667aae
NKI
3073 { 0, NULL, 0, 0 }
3074 };
3075
0d804338 3076 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info))
a3667aae
NKI
3077 csio_hw_fatal_err(hw);
3078}
3079
3080/*
3081 * XGMAC interrupt handler.
3082 */
3083static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3084{
7cc16380 3085 uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
a3667aae 3086
0d804338 3087 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
a3667aae
NKI
3088 if (!v)
3089 return;
3090
0d804338 3091 if (v & TXFIFO_PRTY_ERR_F)
a3667aae 3092 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
0d804338 3093 if (v & RXFIFO_PRTY_ERR_F)
a3667aae 3094 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
7cc16380 3095 csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
a3667aae
NKI
3096 csio_hw_fatal_err(hw);
3097}
3098
3099/*
3100 * PL interrupt handler.
3101 */
3102static void csio_pl_intr_handler(struct csio_hw *hw)
3103{
3104 static struct intr_info pl_intr_info[] = {
0d804338
HS
3105 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
3106 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
a3667aae
NKI
3107 { 0, NULL, 0, 0 }
3108 };
3109
0d804338 3110 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info))
a3667aae
NKI
3111 csio_hw_fatal_err(hw);
3112}
3113
3114/*
3115 * csio_hw_slow_intr_handler - control path interrupt handler
3116 * @hw: HW module
3117 *
3118 * Interrupt handler for non-data global interrupt events, e.g., errors.
3119 * The designation 'slow' is because it involves register reads, while
3120 * data interrupts typically don't involve any MMIOs.
3121 */
3122int
3123csio_hw_slow_intr_handler(struct csio_hw *hw)
3124{
0d804338 3125 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A);
a3667aae
NKI
3126
3127 if (!(cause & CSIO_GLBL_INTR_MASK)) {
3128 CSIO_INC_STATS(hw, n_plint_unexp);
3129 return 0;
3130 }
3131
3132 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
3133
3134 CSIO_INC_STATS(hw, n_plint_cnt);
3135
0d804338 3136 if (cause & CIM_F)
a3667aae
NKI
3137 csio_cim_intr_handler(hw);
3138
0d804338 3139 if (cause & MPS_F)
a3667aae
NKI
3140 csio_mps_intr_handler(hw);
3141
0d804338 3142 if (cause & NCSI_F)
a3667aae
NKI
3143 csio_ncsi_intr_handler(hw);
3144
0d804338 3145 if (cause & PL_F)
a3667aae
NKI
3146 csio_pl_intr_handler(hw);
3147
0d804338 3148 if (cause & SMB_F)
a3667aae
NKI
3149 csio_smb_intr_handler(hw);
3150
0d804338 3151 if (cause & XGMAC0_F)
a3667aae
NKI
3152 csio_xgmac_intr_handler(hw, 0);
3153
0d804338 3154 if (cause & XGMAC1_F)
a3667aae
NKI
3155 csio_xgmac_intr_handler(hw, 1);
3156
0d804338 3157 if (cause & XGMAC_KR0_F)
a3667aae
NKI
3158 csio_xgmac_intr_handler(hw, 2);
3159
0d804338 3160 if (cause & XGMAC_KR1_F)
a3667aae
NKI
3161 csio_xgmac_intr_handler(hw, 3);
3162
0d804338 3163 if (cause & PCIE_F)
7cc16380 3164 hw->chip_ops->chip_pcie_intr_handler(hw);
a3667aae 3165
0d804338 3166 if (cause & MC_F)
a3667aae
NKI
3167 csio_mem_intr_handler(hw, MEM_MC);
3168
0d804338 3169 if (cause & EDC0_F)
a3667aae
NKI
3170 csio_mem_intr_handler(hw, MEM_EDC0);
3171
0d804338 3172 if (cause & EDC1_F)
a3667aae
NKI
3173 csio_mem_intr_handler(hw, MEM_EDC1);
3174
0d804338 3175 if (cause & LE_F)
a3667aae
NKI
3176 csio_le_intr_handler(hw);
3177
0d804338 3178 if (cause & TP_F)
a3667aae
NKI
3179 csio_tp_intr_handler(hw);
3180
0d804338 3181 if (cause & MA_F)
a3667aae
NKI
3182 csio_ma_intr_handler(hw);
3183
0d804338 3184 if (cause & PM_TX_F)
a3667aae
NKI
3185 csio_pmtx_intr_handler(hw);
3186
0d804338 3187 if (cause & PM_RX_F)
a3667aae
NKI
3188 csio_pmrx_intr_handler(hw);
3189
0d804338 3190 if (cause & ULP_RX_F)
a3667aae
NKI
3191 csio_ulprx_intr_handler(hw);
3192
0d804338 3193 if (cause & CPL_SWITCH_F)
a3667aae
NKI
3194 csio_cplsw_intr_handler(hw);
3195
0d804338 3196 if (cause & SGE_F)
a3667aae
NKI
3197 csio_sge_intr_handler(hw);
3198
0d804338 3199 if (cause & ULP_TX_F)
a3667aae
NKI
3200 csio_ulptx_intr_handler(hw);
3201
3202 /* Clear the interrupts just processed for which we are the master. */
0d804338
HS
3203 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A);
3204 csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */
a3667aae
NKI
3205
3206 return 1;
3207}
3208
3209/*****************************************************************************
3210 * HW <--> mailbox interfacing routines.
3211 ****************************************************************************/
3212/*
3213 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3214 *
3215 * @data: Private data pointer.
3216 *
3217 * Called from worker thread context.
3218 */
3219static void
3220csio_mberr_worker(void *data)
3221{
3222 struct csio_hw *hw = (struct csio_hw *)data;
3223 struct csio_mbm *mbm = &hw->mbm;
3224 LIST_HEAD(cbfn_q);
3225 struct csio_mb *mbp_next;
3226 int rv;
3227
3228 del_timer_sync(&mbm->timer);
3229
3230 spin_lock_irq(&hw->lock);
3231 if (list_empty(&mbm->cbfn_q)) {
3232 spin_unlock_irq(&hw->lock);
3233 return;
3234 }
3235
3236 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
3237 mbm->stats.n_cbfnq = 0;
3238
3239 /* Try to start waiting mailboxes */
3240 if (!list_empty(&mbm->req_q)) {
3241 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
3242 list_del_init(&mbp_next->list);
3243
3244 rv = csio_mb_issue(hw, mbp_next);
3245 if (rv != 0)
3246 list_add_tail(&mbp_next->list, &mbm->req_q);
3247 else
3248 CSIO_DEC_STATS(mbm, n_activeq);
3249 }
3250 spin_unlock_irq(&hw->lock);
3251
3252 /* Now callback completions */
3253 csio_mb_completions(hw, &cbfn_q);
3254}
3255
3256/*
3257 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3258 *
3259 * @data: private data pointer
3260 *
3261 **/
3262static void
3263csio_hw_mb_timer(uintptr_t data)
3264{
3265 struct csio_hw *hw = (struct csio_hw *)data;
3266 struct csio_mb *mbp = NULL;
3267
3268 spin_lock_irq(&hw->lock);
3269 mbp = csio_mb_tmo_handler(hw);
3270 spin_unlock_irq(&hw->lock);
3271
3272 /* Call back the function for the timed-out Mailbox */
3273 if (mbp)
3274 mbp->mb_cbfn(hw, mbp);
3275
3276}
3277
3278/*
3279 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3280 * @hw: HW module
3281 *
3282 * Called with lock held, should exit with lock held.
3283 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3284 * into a local queue. Drops lock and calls the completions. Holds
3285 * lock and returns.
3286 */
3287static void
3288csio_hw_mbm_cleanup(struct csio_hw *hw)
3289{
3290 LIST_HEAD(cbfn_q);
3291
3292 csio_mb_cancel_all(hw, &cbfn_q);
3293
3294 spin_unlock_irq(&hw->lock);
3295 csio_mb_completions(hw, &cbfn_q);
3296 spin_lock_irq(&hw->lock);
3297}
3298
3299/*****************************************************************************
3300 * Event handling
3301 ****************************************************************************/
3302int
3303csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3304 uint16_t len)
3305{
3306 struct csio_evt_msg *evt_entry = NULL;
3307
3308 if (type >= CSIO_EVT_MAX)
3309 return -EINVAL;
3310
3311 if (len > CSIO_EVT_MSG_SIZE)
3312 return -EINVAL;
3313
3314 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3315 return -EINVAL;
3316
3317 if (list_empty(&hw->evt_free_q)) {
3318 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3319 type, len);
3320 return -ENOMEM;
3321 }
3322
3323 evt_entry = list_first_entry(&hw->evt_free_q,
3324 struct csio_evt_msg, list);
3325 list_del_init(&evt_entry->list);
3326
3327 /* copy event msg and queue the event */
3328 evt_entry->type = type;
3329 memcpy((void *)evt_entry->data, evt_msg, len);
3330 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3331
3332 CSIO_DEC_STATS(hw, n_evt_freeq);
3333 CSIO_INC_STATS(hw, n_evt_activeq);
3334
3335 return 0;
3336}
3337
3338static int
3339csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3340 uint16_t len, bool msg_sg)
3341{
3342 struct csio_evt_msg *evt_entry = NULL;
3343 struct csio_fl_dma_buf *fl_sg;
3344 uint32_t off = 0;
3345 unsigned long flags;
3346 int n, ret = 0;
3347
3348 if (type >= CSIO_EVT_MAX)
3349 return -EINVAL;
3350
3351 if (len > CSIO_EVT_MSG_SIZE)
3352 return -EINVAL;
3353
3354 spin_lock_irqsave(&hw->lock, flags);
3355 if (hw->flags & CSIO_HWF_FWEVT_STOP) {
3356 ret = -EINVAL;
3357 goto out;
3358 }
3359
3360 if (list_empty(&hw->evt_free_q)) {
3361 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3362 type, len);
3363 ret = -ENOMEM;
3364 goto out;
3365 }
3366
3367 evt_entry = list_first_entry(&hw->evt_free_q,
3368 struct csio_evt_msg, list);
3369 list_del_init(&evt_entry->list);
3370
3371 /* copy event msg and queue the event */
3372 evt_entry->type = type;
3373
3374 /* If Payload in SG list*/
3375 if (msg_sg) {
3376 fl_sg = (struct csio_fl_dma_buf *) evt_msg;
3377 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
3378 memcpy((void *)((uintptr_t)evt_entry->data + off),
3379 fl_sg->flbufs[n].vaddr,
3380 fl_sg->flbufs[n].len);
3381 off += fl_sg->flbufs[n].len;
3382 }
3383 } else
3384 memcpy((void *)evt_entry->data, evt_msg, len);
3385
3386 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3387 CSIO_DEC_STATS(hw, n_evt_freeq);
3388 CSIO_INC_STATS(hw, n_evt_activeq);
3389out:
3390 spin_unlock_irqrestore(&hw->lock, flags);
3391 return ret;
3392}
3393
3394static void
3395csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
3396{
3397 if (evt_entry) {
3398 spin_lock_irq(&hw->lock);
3399 list_del_init(&evt_entry->list);
3400 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3401 CSIO_DEC_STATS(hw, n_evt_activeq);
3402 CSIO_INC_STATS(hw, n_evt_freeq);
3403 spin_unlock_irq(&hw->lock);
3404 }
3405}
3406
3407void
3408csio_evtq_flush(struct csio_hw *hw)
3409{
3410 uint32_t count;
3411 count = 30;
3412 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
3413 spin_unlock_irq(&hw->lock);
3414 msleep(2000);
3415 spin_lock_irq(&hw->lock);
3416 }
3417
3418 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
3419}
3420
3421static void
3422csio_evtq_stop(struct csio_hw *hw)
3423{
3424 hw->flags |= CSIO_HWF_FWEVT_STOP;
3425}
3426
3427static void
3428csio_evtq_start(struct csio_hw *hw)
3429{
3430 hw->flags &= ~CSIO_HWF_FWEVT_STOP;
3431}
3432
3433static void
3434csio_evtq_cleanup(struct csio_hw *hw)
3435{
3436 struct list_head *evt_entry, *next_entry;
3437
3438 /* Release outstanding events from activeq to freeq*/
3439 if (!list_empty(&hw->evt_active_q))
3440 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
3441
3442 hw->stats.n_evt_activeq = 0;
3443 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3444
3445 /* Freeup event entry */
3446 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
3447 kfree(evt_entry);
3448 CSIO_DEC_STATS(hw, n_evt_freeq);
3449 }
3450
3451 hw->stats.n_evt_freeq = 0;
3452}
3453
3454
3455static void
3456csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3457 struct csio_fl_dma_buf *flb, void *priv)
3458{
3459 __u8 op;
a3667aae
NKI
3460 void *msg = NULL;
3461 uint32_t msg_len = 0;
3462 bool msg_sg = 0;
3463
3464 op = ((struct rss_header *) wr)->opcode;
3465 if (op == CPL_FW6_PLD) {
3466 CSIO_INC_STATS(hw, n_cpl_fw6_pld);
3467 if (!flb || !flb->totlen) {
3468 CSIO_INC_STATS(hw, n_cpl_unexp);
3469 return;
3470 }
3471
3472 msg = (void *) flb;
3473 msg_len = flb->totlen;
3474 msg_sg = 1;
a3667aae
NKI
3475 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
3476
3477 CSIO_INC_STATS(hw, n_cpl_fw6_msg);
3478 /* skip RSS header */
3479 msg = (void *)((uintptr_t)wr + sizeof(__be64));
3480 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
3481 sizeof(struct cpl_fw4_msg);
a3667aae
NKI
3482 } else {
3483 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
3484 CSIO_INC_STATS(hw, n_cpl_unexp);
3485 return;
3486 }
3487
3488 /*
3489 * Enqueue event to EventQ. Events processing happens
3490 * in Event worker thread context
3491 */
3492 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
3493 (uint16_t)msg_len, msg_sg))
3494 CSIO_INC_STATS(hw, n_evt_drop);
3495}
3496
3497void
3498csio_evtq_worker(struct work_struct *work)
3499{
3500 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
3501 struct list_head *evt_entry, *next_entry;
3502 LIST_HEAD(evt_q);
3503 struct csio_evt_msg *evt_msg;
3504 struct cpl_fw6_msg *msg;
3505 struct csio_rnode *rn;
3506 int rv = 0;
3507 uint8_t evtq_stop = 0;
3508
3509 csio_dbg(hw, "event worker thread active evts#%d\n",
3510 hw->stats.n_evt_activeq);
3511
3512 spin_lock_irq(&hw->lock);
3513 while (!list_empty(&hw->evt_active_q)) {
3514 list_splice_tail_init(&hw->evt_active_q, &evt_q);
3515 spin_unlock_irq(&hw->lock);
3516
3517 list_for_each_safe(evt_entry, next_entry, &evt_q) {
3518 evt_msg = (struct csio_evt_msg *) evt_entry;
3519
3520 /* Drop events if queue is STOPPED */
3521 spin_lock_irq(&hw->lock);
3522 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3523 evtq_stop = 1;
3524 spin_unlock_irq(&hw->lock);
3525 if (evtq_stop) {
3526 CSIO_INC_STATS(hw, n_evt_drop);
3527 goto free_evt;
3528 }
3529
3530 switch (evt_msg->type) {
3531 case CSIO_EVT_FW:
3532 msg = (struct cpl_fw6_msg *)(evt_msg->data);
3533
3534 if ((msg->opcode == CPL_FW6_MSG ||
3535 msg->opcode == CPL_FW4_MSG) &&
3536 !msg->type) {
3537 rv = csio_mb_fwevt_handler(hw,
3538 msg->data);
3539 if (!rv)
3540 break;
3541 /* Handle any remaining fw events */
3542 csio_fcoe_fwevt_handler(hw,
3543 msg->opcode, msg->data);
3544 } else if (msg->opcode == CPL_FW6_PLD) {
3545
3546 csio_fcoe_fwevt_handler(hw,
3547 msg->opcode, msg->data);
3548 } else {
3549 csio_warn(hw,
3550 "Unhandled FW msg op %x type %x\n",
3551 msg->opcode, msg->type);
3552 CSIO_INC_STATS(hw, n_evt_drop);
3553 }
3554 break;
3555
3556 case CSIO_EVT_MBX:
3557 csio_mberr_worker(hw);
3558 break;
3559
3560 case CSIO_EVT_DEV_LOSS:
3561 memcpy(&rn, evt_msg->data, sizeof(rn));
3562 csio_rnode_devloss_handler(rn);
3563 break;
3564
3565 default:
3566 csio_warn(hw, "Unhandled event %x on evtq\n",
3567 evt_msg->type);
3568 CSIO_INC_STATS(hw, n_evt_unexp);
3569 break;
3570 }
3571free_evt:
3572 csio_free_evt(hw, evt_msg);
3573 }
3574
3575 spin_lock_irq(&hw->lock);
3576 }
3577 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3578 spin_unlock_irq(&hw->lock);
3579}
3580
3581int
3582csio_fwevtq_handler(struct csio_hw *hw)
3583{
3584 int rv;
3585
3586 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
3587 CSIO_INC_STATS(hw, n_int_stray);
3588 return -EINVAL;
3589 }
3590
3591 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
3592 csio_process_fwevtq_entry, NULL);
3593 return rv;
3594}
3595
3596/****************************************************************************
3597 * Entry points
3598 ****************************************************************************/
3599
3600/* Management module */
3601/*
3602 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
3603 * mgmt - mgmt module
3604 * @io_req - io request
3605 *
3606 * Return - 0:if given IO Req exists in active Q.
3607 * -EINVAL :if lookup fails.
3608 */
3609int
3610csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
3611{
3612 struct list_head *tmp;
3613
3614 /* Lookup ioreq in the ACTIVEQ */
3615 list_for_each(tmp, &mgmtm->active_q) {
3616 if (io_req == (struct csio_ioreq *)tmp)
3617 return 0;
3618 }
3619 return -EINVAL;
3620}
3621
3622#define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
3623
3624/*
3625 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
3626 * @data - Event data.
3627 *
3628 * Return - none.
3629 */
3630static void
3631csio_mgmt_tmo_handler(uintptr_t data)
3632{
3633 struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;
3634 struct list_head *tmp;
3635 struct csio_ioreq *io_req;
3636
3637 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
3638
3639 spin_lock_irq(&mgmtm->hw->lock);
3640
3641 list_for_each(tmp, &mgmtm->active_q) {
3642 io_req = (struct csio_ioreq *) tmp;
3643 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
3644
3645 if (!io_req->tmo) {
3646 /* Dequeue the request from retry Q. */
3647 tmp = csio_list_prev(tmp);
3648 list_del_init(&io_req->sm.sm_list);
3649 if (io_req->io_cbfn) {
3650 /* io_req will be freed by completion handler */
3651 io_req->wr_status = -ETIMEDOUT;
3652 io_req->io_cbfn(mgmtm->hw, io_req);
3653 } else {
3654 CSIO_DB_ASSERT(0);
3655 }
3656 }
3657 }
3658
3659 /* If retry queue is not empty, re-arm timer */
3660 if (!list_empty(&mgmtm->active_q))
3661 mod_timer(&mgmtm->mgmt_timer,
3662 jiffies + msecs_to_jiffies(ECM_MIN_TMO));
3663 spin_unlock_irq(&mgmtm->hw->lock);
3664}
3665
3666static void
3667csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
3668{
3669 struct csio_hw *hw = mgmtm->hw;
3670 struct csio_ioreq *io_req;
3671 struct list_head *tmp;
3672 uint32_t count;
3673
3674 count = 30;
3675 /* Wait for all outstanding req to complete gracefully */
3676 while ((!list_empty(&mgmtm->active_q)) && count--) {
3677 spin_unlock_irq(&hw->lock);
3678 msleep(2000);
3679 spin_lock_irq(&hw->lock);
3680 }
3681
3682 /* release outstanding req from ACTIVEQ */
3683 list_for_each(tmp, &mgmtm->active_q) {
3684 io_req = (struct csio_ioreq *) tmp;
3685 tmp = csio_list_prev(tmp);
3686 list_del_init(&io_req->sm.sm_list);
3687 mgmtm->stats.n_active--;
3688 if (io_req->io_cbfn) {
3689 /* io_req will be freed by completion handler */
3690 io_req->wr_status = -ETIMEDOUT;
3691 io_req->io_cbfn(mgmtm->hw, io_req);
3692 }
3693 }
3694}
3695
3696/*
3697 * csio_mgmt_init - Mgmt module init entry point
3698 * @mgmtsm - mgmt module
3699 * @hw - HW module
3700 *
3701 * Initialize mgmt timer, resource wait queue, active queue,
3702 * completion q. Allocate Egress and Ingress
3703 * WR queues and save off the queue index returned by the WR
3704 * module for future use. Allocate and save off mgmt reqs in the
3705 * mgmt_req_freelist for future use. Make sure their SM is initialized
3706 * to uninit state.
3707 * Returns: 0 - on success
3708 * -ENOMEM - on error.
3709 */
3710static int
3711csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
3712{
3713 struct timer_list *timer = &mgmtm->mgmt_timer;
3714
3715 init_timer(timer);
3716 timer->function = csio_mgmt_tmo_handler;
3717 timer->data = (unsigned long)mgmtm;
3718
3719 INIT_LIST_HEAD(&mgmtm->active_q);
3720 INIT_LIST_HEAD(&mgmtm->cbfn_q);
3721
3722 mgmtm->hw = hw;
3723 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
3724
3725 return 0;
3726}
3727
3728/*
3729 * csio_mgmtm_exit - MGMT module exit entry point
3730 * @mgmtsm - mgmt module
3731 *
3732 * This function called during MGMT module uninit.
3733 * Stop timers, free ioreqs allocated.
3734 * Returns: None
3735 *
3736 */
3737static void
3738csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
3739{
3740 del_timer_sync(&mgmtm->mgmt_timer);
3741}
3742
3743
3744/**
3745 * csio_hw_start - Kicks off the HW State machine
3746 * @hw: Pointer to HW module.
3747 *
3748 * It is assumed that the initialization is a synchronous operation.
3749 * So when we return afer posting the event, the HW SM should be in
3750 * the ready state, if there were no errors during init.
3751 */
3752int
3753csio_hw_start(struct csio_hw *hw)
3754{
3755 spin_lock_irq(&hw->lock);
3756 csio_post_event(&hw->sm, CSIO_HWE_CFG);
3757 spin_unlock_irq(&hw->lock);
3758
3759 if (csio_is_hw_ready(hw))
3760 return 0;
3761 else
3762 return -EINVAL;
3763}
3764
3765int
3766csio_hw_stop(struct csio_hw *hw)
3767{
3768 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
3769
3770 if (csio_is_hw_removing(hw))
3771 return 0;
3772 else
3773 return -EINVAL;
3774}
3775
3776/* Max reset retries */
3777#define CSIO_MAX_RESET_RETRIES 3
3778
3779/**
3780 * csio_hw_reset - Reset the hardware
3781 * @hw: HW module.
3782 *
3783 * Caller should hold lock across this function.
3784 */
3785int
3786csio_hw_reset(struct csio_hw *hw)
3787{
3788 if (!csio_is_hw_master(hw))
3789 return -EPERM;
3790
3791 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
3792 csio_dbg(hw, "Max hw reset attempts reached..");
3793 return -EINVAL;
3794 }
3795
3796 hw->rst_retries++;
3797 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
3798
3799 if (csio_is_hw_ready(hw)) {
3800 hw->rst_retries = 0;
3801 hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
3802 return 0;
3803 } else
3804 return -EINVAL;
3805}
3806
3807/*
3808 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
3809 * @hw: HW module.
3810 */
3811static void
3812csio_hw_get_device_id(struct csio_hw *hw)
3813{
3814 /* Is the adapter device id cached already ?*/
3815 if (csio_is_dev_id_cached(hw))
3816 return;
3817
3818 /* Get the PCI vendor & device id */
3819 pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
3820 &hw->params.pci.vendor_id);
3821 pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
3822 &hw->params.pci.device_id);
3823
3824 csio_dev_id_cached(hw);
7cc16380 3825 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
a3667aae
NKI
3826
3827} /* csio_hw_get_device_id */
3828
3829/*
3830 * csio_hw_set_description - Set the model, description of the hw.
3831 * @hw: HW module.
3832 * @ven_id: PCI Vendor ID
3833 * @dev_id: PCI Device ID
3834 */
3835static void
3836csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
3837{
3838 uint32_t adap_type, prot_type;
3839
3840 if (ven_id == CSIO_VENDOR_ID) {
3841 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
3842 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
3843
7cc16380
AB
3844 if (prot_type == CSIO_T4_FCOE_ASIC) {
3845 memcpy(hw->hw_ver,
3846 csio_t4_fcoe_adapters[adap_type].model_no, 16);
a3667aae 3847 memcpy(hw->model_desc,
7cc16380
AB
3848 csio_t4_fcoe_adapters[adap_type].description,
3849 32);
3850 } else if (prot_type == CSIO_T5_FCOE_ASIC) {
a3667aae 3851 memcpy(hw->hw_ver,
7cc16380 3852 csio_t5_fcoe_adapters[adap_type].model_no, 16);
a3667aae 3853 memcpy(hw->model_desc,
7cc16380
AB
3854 csio_t5_fcoe_adapters[adap_type].description,
3855 32);
a3667aae
NKI
3856 } else {
3857 char tempName[32] = "Chelsio FCoE Controller";
3858 memcpy(hw->model_desc, tempName, 32);
a3667aae
NKI
3859 }
3860 }
3861} /* csio_hw_set_description */
3862
3863/**
3864 * csio_hw_init - Initialize HW module.
3865 * @hw: Pointer to HW module.
3866 *
3867 * Initialize the members of the HW module.
3868 */
3869int
3870csio_hw_init(struct csio_hw *hw)
3871{
3872 int rv = -EINVAL;
3873 uint32_t i;
3874 uint16_t ven_id, dev_id;
3875 struct csio_evt_msg *evt_entry;
3876
3877 INIT_LIST_HEAD(&hw->sm.sm_list);
3878 csio_init_state(&hw->sm, csio_hws_uninit);
3879 spin_lock_init(&hw->lock);
3880 INIT_LIST_HEAD(&hw->sln_head);
3881
3882 /* Get the PCI vendor & device id */
3883 csio_hw_get_device_id(hw);
3884
3885 strcpy(hw->name, CSIO_HW_NAME);
3886
7cc16380
AB
3887 /* Initialize the HW chip ops with T4/T5 specific ops */
3888 hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
3889
a3667aae
NKI
3890 /* Set the model & its description */
3891
3892 ven_id = hw->params.pci.vendor_id;
3893 dev_id = hw->params.pci.device_id;
3894
3895 csio_hw_set_description(hw, ven_id, dev_id);
3896
3897 /* Initialize default log level */
3898 hw->params.log_level = (uint32_t) csio_dbg_level;
3899
3900 csio_set_fwevt_intr_idx(hw, -1);
3901 csio_set_nondata_intr_idx(hw, -1);
3902
3903 /* Init all the modules: Mailbox, WorkRequest and Transport */
3904 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
3905 goto err;
3906
3907 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
3908 if (rv)
3909 goto err_mbm_exit;
3910
3911 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
3912 if (rv)
3913 goto err_wrm_exit;
3914
3915 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
3916 if (rv)
3917 goto err_scsim_exit;
3918 /* Pre-allocate evtq and initialize them */
3919 INIT_LIST_HEAD(&hw->evt_active_q);
3920 INIT_LIST_HEAD(&hw->evt_free_q);
3921 for (i = 0; i < csio_evtq_sz; i++) {
3922
3923 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
3924 if (!evt_entry) {
3925 csio_err(hw, "Failed to initialize eventq");
3926 goto err_evtq_cleanup;
3927 }
3928
3929 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3930 CSIO_INC_STATS(hw, n_evt_freeq);
3931 }
3932
3933 hw->dev_num = dev_num;
3934 dev_num++;
3935
3936 return 0;
3937
3938err_evtq_cleanup:
3939 csio_evtq_cleanup(hw);
3940 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
3941err_scsim_exit:
3942 csio_scsim_exit(csio_hw_to_scsim(hw));
3943err_wrm_exit:
3944 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
3945err_mbm_exit:
3946 csio_mbm_exit(csio_hw_to_mbm(hw));
3947err:
3948 return rv;
3949}
3950
3951/**
3952 * csio_hw_exit - Un-initialize HW module.
3953 * @hw: Pointer to HW module.
3954 *
3955 */
3956void
3957csio_hw_exit(struct csio_hw *hw)
3958{
3959 csio_evtq_cleanup(hw);
3960 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
3961 csio_scsim_exit(csio_hw_to_scsim(hw));
3962 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
3963 csio_mbm_exit(csio_hw_to_mbm(hw));
3964}
This page took 0.333816 seconds and 5 git commands to generate.