staging/rdma/hfi1: HFI reports wrong offline disabled reason when cable removed
[deliverable/linux.git] / drivers / staging / rdma / hfi1 / chip.c
1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 /*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55 #include <linux/pci.h>
56 #include <linux/delay.h>
57 #include <linux/interrupt.h>
58 #include <linux/module.h>
59
60 #include "hfi.h"
61 #include "trace.h"
62 #include "mad.h"
63 #include "pio.h"
64 #include "sdma.h"
65 #include "eprom.h"
66 #include "efivar.h"
67
68 #define NUM_IB_PORTS 1
69
70 uint kdeth_qp;
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78 /*
79 * Default time to aggregate two 10K packets from the idle state
80 * (timer not running). The timer starts at the end of the first packet,
81 * so only the time for one 10K packet and header plus a bit extra is needed.
82 * 10 * 1024 + 64 header byte = 10304 byte
83 * 10304 byte / 12.5 GB/s = 824.32ns
84 */
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97 uint loopback;
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
106
107 struct flag_table {
108 u64 flag; /* the flag */
109 char *str; /* description string */
110 u16 extra; /* extra information */
111 u16 unused0;
112 u32 unused1;
113 };
114
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED 0x1
121 #define SEC_PACKET_DROPPED 0x2
122 #define SEC_SC_HALTED 0x4 /* per-context only */
123 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
124
125 #define MIN_KERNEL_KCTXTS 2
126 #define FIRST_KERNEL_KCTXT 1
127 #define NUM_MAP_REGS 32
128
129 /* Bit offset into the GUID which carries HFI id information */
130 #define GUID_HFI_INDEX_SHIFT 39
131
132 /* extract the emulation revision */
133 #define emulator_rev(dd) ((dd)->irev >> 8)
134 /* parallel and serial emulation versions are 3 and 4 respectively */
135 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
136 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
137
138 /* RSM fields */
139
140 /* packet type */
141 #define IB_PACKET_TYPE 2ull
142 #define QW_SHIFT 6ull
143 /* QPN[7..1] */
144 #define QPN_WIDTH 7ull
145
146 /* LRH.BTH: QW 0, OFFSET 48 - for match */
147 #define LRH_BTH_QW 0ull
148 #define LRH_BTH_BIT_OFFSET 48ull
149 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
150 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
151 #define LRH_BTH_SELECT
152 #define LRH_BTH_MASK 3ull
153 #define LRH_BTH_VALUE 2ull
154
155 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
156 #define LRH_SC_QW 0ull
157 #define LRH_SC_BIT_OFFSET 56ull
158 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
159 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
160 #define LRH_SC_MASK 128ull
161 #define LRH_SC_VALUE 0ull
162
163 /* SC[n..0] QW 0, OFFSET 60 - for select */
164 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
165
166 /* QPN[m+n:1] QW 1, OFFSET 1 */
167 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
168
169 /* defines to build power on SC2VL table */
170 #define SC2VL_VAL( \
171 num, \
172 sc0, sc0val, \
173 sc1, sc1val, \
174 sc2, sc2val, \
175 sc3, sc3val, \
176 sc4, sc4val, \
177 sc5, sc5val, \
178 sc6, sc6val, \
179 sc7, sc7val) \
180 ( \
181 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
182 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
183 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
184 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
185 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
186 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
187 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
188 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
189 )
190
191 #define DC_SC_VL_VAL( \
192 range, \
193 e0, e0val, \
194 e1, e1val, \
195 e2, e2val, \
196 e3, e3val, \
197 e4, e4val, \
198 e5, e5val, \
199 e6, e6val, \
200 e7, e7val, \
201 e8, e8val, \
202 e9, e9val, \
203 e10, e10val, \
204 e11, e11val, \
205 e12, e12val, \
206 e13, e13val, \
207 e14, e14val, \
208 e15, e15val) \
209 ( \
210 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
211 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
212 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
213 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
214 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
215 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
216 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
217 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
218 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
219 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
220 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
221 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
222 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
223 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
224 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
225 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
226 )
227
228 /* all CceStatus sub-block freeze bits */
229 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
230 | CCE_STATUS_RXE_FROZE_SMASK \
231 | CCE_STATUS_TXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
233 /* all CceStatus sub-block TXE pause bits */
234 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
235 | CCE_STATUS_TXE_PAUSED_SMASK \
236 | CCE_STATUS_SDMA_PAUSED_SMASK)
237 /* all CceStatus sub-block RXE pause bits */
238 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
239
240 /*
241 * CCE Error flags.
242 */
243 static struct flag_table cce_err_status_flags[] = {
244 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
245 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
246 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
247 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
248 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
249 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
250 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
251 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
252 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
253 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
254 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
255 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
256 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
257 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
258 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
259 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
260 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
261 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
262 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
263 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
264 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
266 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
268 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
270 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
271 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
272 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
273 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
274 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
275 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
276 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
277 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
278 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
280 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
281 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
282 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
283 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
284 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
285 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
286 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
287 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
288 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
289 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
290 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
291 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
292 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
293 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
294 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
295 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
296 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
297 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
298 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
299 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
300 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
301 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
302 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
303 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
304 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
305 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
306 /*31*/ FLAG_ENTRY0("LATriggered",
307 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
308 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
309 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
310 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
311 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
312 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
313 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
314 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
315 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
316 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
317 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
318 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
319 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
320 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
321 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
322 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
323 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
324 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
325 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
326 /*41-63 reserved*/
327 };
328
329 /*
330 * Misc Error flags
331 */
332 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
333 static struct flag_table misc_err_status_flags[] = {
334 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
335 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
336 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
337 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
338 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
339 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
340 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
341 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
342 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
343 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
344 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
345 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
346 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
347 };
348
349 /*
350 * TXE PIO Error flags and consequences
351 */
352 static struct flag_table pio_err_status_flags[] = {
353 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
354 SEC_WRITE_DROPPED,
355 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
356 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
357 SEC_SPC_FREEZE,
358 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
359 /* 2*/ FLAG_ENTRY("PioCsrParity",
360 SEC_SPC_FREEZE,
361 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
362 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
363 SEC_SPC_FREEZE,
364 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
365 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
366 SEC_SPC_FREEZE,
367 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
368 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
369 SEC_SPC_FREEZE,
370 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
371 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
372 SEC_SPC_FREEZE,
373 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
374 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
375 SEC_SPC_FREEZE,
376 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
377 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
378 SEC_SPC_FREEZE,
379 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
380 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
381 SEC_SPC_FREEZE,
382 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
383 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
384 SEC_SPC_FREEZE,
385 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
386 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
387 SEC_SPC_FREEZE,
388 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
389 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
390 SEC_SPC_FREEZE,
391 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
392 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
393 0,
394 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
395 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
396 0,
397 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
398 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
399 SEC_SPC_FREEZE,
400 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
401 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
402 SEC_SPC_FREEZE,
403 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
404 /*17*/ FLAG_ENTRY("PioInitSmIn",
405 0,
406 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
407 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
408 SEC_SPC_FREEZE,
409 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
410 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
411 SEC_SPC_FREEZE,
412 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
413 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
414 0,
415 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
416 /*21*/ FLAG_ENTRY("PioWriteDataParity",
417 SEC_SPC_FREEZE,
418 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
419 /*22*/ FLAG_ENTRY("PioStateMachine",
420 SEC_SPC_FREEZE,
421 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
422 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
423 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
424 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
425 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
426 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
427 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
428 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
429 SEC_SPC_FREEZE,
430 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
431 /*26*/ FLAG_ENTRY("PioVlfSopParity",
432 SEC_SPC_FREEZE,
433 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
434 /*27*/ FLAG_ENTRY("PioVlFifoParity",
435 SEC_SPC_FREEZE,
436 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
437 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
438 SEC_SPC_FREEZE,
439 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
440 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
441 SEC_SPC_FREEZE,
442 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
443 /*30-31 reserved*/
444 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
445 SEC_SPC_FREEZE,
446 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
447 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
448 SEC_SPC_FREEZE,
449 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
450 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
451 SEC_SPC_FREEZE,
452 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
453 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
454 SEC_SPC_FREEZE,
455 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
456 /*36-63 reserved*/
457 };
458
459 /* TXE PIO errors that cause an SPC freeze */
460 #define ALL_PIO_FREEZE_ERR \
461 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
462 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
490
491 /*
492 * TXE SDMA Error flags
493 */
494 static struct flag_table sdma_err_status_flags[] = {
495 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
496 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
497 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
498 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
499 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
500 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
501 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
503 /*04-63 reserved*/
504 };
505
506 /* TXE SDMA errors that cause an SPC freeze */
507 #define ALL_SDMA_FREEZE_ERR \
508 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
509 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
511
512 /*
513 * TXE Egress Error flags
514 */
515 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
516 static struct flag_table egress_err_status_flags[] = {
517 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
518 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
519 /* 2 reserved */
520 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
521 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
522 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
523 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
524 /* 6 reserved */
525 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
526 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
527 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
528 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
529 /* 9-10 reserved */
530 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
531 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
532 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
533 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
534 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
535 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
536 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
537 SEES(TX_SDMA0_DISALLOWED_PACKET)),
538 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
539 SEES(TX_SDMA1_DISALLOWED_PACKET)),
540 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
541 SEES(TX_SDMA2_DISALLOWED_PACKET)),
542 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
543 SEES(TX_SDMA3_DISALLOWED_PACKET)),
544 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
545 SEES(TX_SDMA4_DISALLOWED_PACKET)),
546 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
547 SEES(TX_SDMA5_DISALLOWED_PACKET)),
548 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
549 SEES(TX_SDMA6_DISALLOWED_PACKET)),
550 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
551 SEES(TX_SDMA7_DISALLOWED_PACKET)),
552 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
553 SEES(TX_SDMA8_DISALLOWED_PACKET)),
554 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
555 SEES(TX_SDMA9_DISALLOWED_PACKET)),
556 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
557 SEES(TX_SDMA10_DISALLOWED_PACKET)),
558 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
559 SEES(TX_SDMA11_DISALLOWED_PACKET)),
560 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
561 SEES(TX_SDMA12_DISALLOWED_PACKET)),
562 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
563 SEES(TX_SDMA13_DISALLOWED_PACKET)),
564 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
565 SEES(TX_SDMA14_DISALLOWED_PACKET)),
566 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
567 SEES(TX_SDMA15_DISALLOWED_PACKET)),
568 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
569 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
570 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
571 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
572 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
573 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
574 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
575 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
576 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
577 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
578 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
580 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
582 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
584 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
586 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
587 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
588 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
589 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
590 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
591 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
592 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
593 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
594 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
595 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
596 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
597 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
598 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
599 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
600 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
601 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
602 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
603 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
604 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
605 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
606 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
607 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
608 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
609 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
610 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
611 };
612
613 /*
614 * TXE Egress Error Info flags
615 */
616 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
617 static struct flag_table egress_err_info_flags[] = {
618 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
619 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
620 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
621 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
622 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
623 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
624 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
625 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
626 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
627 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
628 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
629 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
630 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
631 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
632 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
633 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
634 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
635 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
636 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
637 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
638 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
639 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
640 };
641
642 /* TXE Egress errors that cause an SPC freeze */
643 #define ALL_TXE_EGRESS_FREEZE_ERR \
644 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
645 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
646 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
647 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
648 | SEES(TX_LAUNCH_CSR_PARITY) \
649 | SEES(TX_SBRD_CTL_CSR_PARITY) \
650 | SEES(TX_CONFIG_PARITY) \
651 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
652 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
653 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
654 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
655 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
656 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
657 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
658 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
660 | SEES(TX_CREDIT_RETURN_PARITY))
661
662 /*
663 * TXE Send error flags
664 */
665 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
666 static struct flag_table send_err_status_flags[] = {
667 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
668 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
669 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
670 };
671
672 /*
673 * TXE Send Context Error flags and consequences
674 */
675 static struct flag_table sc_err_status_flags[] = {
676 /* 0*/ FLAG_ENTRY("InconsistentSop",
677 SEC_PACKET_DROPPED | SEC_SC_HALTED,
678 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
679 /* 1*/ FLAG_ENTRY("DisallowedPacket",
680 SEC_PACKET_DROPPED | SEC_SC_HALTED,
681 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
682 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
683 SEC_WRITE_DROPPED | SEC_SC_HALTED,
684 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
685 /* 3*/ FLAG_ENTRY("WriteOverflow",
686 SEC_WRITE_DROPPED | SEC_SC_HALTED,
687 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
688 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
689 SEC_WRITE_DROPPED | SEC_SC_HALTED,
690 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
691 /* 5-63 reserved*/
692 };
693
694 /*
695 * RXE Receive Error flags
696 */
697 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
698 static struct flag_table rxe_err_status_flags[] = {
699 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
700 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
701 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
702 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
703 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
704 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
705 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
706 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
707 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
708 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
709 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
710 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
711 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
712 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
713 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
714 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
715 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
716 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
717 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
718 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
719 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
720 RXES(RBUF_BLOCK_LIST_READ_UNC)),
721 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
722 RXES(RBUF_BLOCK_LIST_READ_COR)),
723 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
724 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
725 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
726 RXES(RBUF_CSR_QENT_CNT_PARITY)),
727 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
728 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
729 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
730 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
731 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
732 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
733 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
734 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
735 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
736 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
737 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
738 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
739 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
740 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
741 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
742 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
743 RXES(RBUF_FL_INITDONE_PARITY)),
744 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
745 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
746 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
747 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
748 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
749 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
750 RXES(LOOKUP_DES_PART1_UNC_COR)),
751 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
752 RXES(LOOKUP_DES_PART2_PARITY)),
753 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
754 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
755 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
756 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
757 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
758 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
759 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
760 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
761 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
762 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
763 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
764 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
765 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
766 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
767 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
768 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
769 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
770 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
771 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
772 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
773 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
774 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
775 };
776
777 /* RXE errors that will trigger an SPC freeze */
778 #define ALL_RXE_FREEZE_ERR \
779 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
780 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
781 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
782 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
783 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
784 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
785 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
786 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
823
824 #define RXE_FREEZE_ABORT_MASK \
825 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
826 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
827 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
828
829 /*
830 * DCC Error Flags
831 */
832 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
833 static struct flag_table dcc_err_flags[] = {
834 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
835 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
836 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
837 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
838 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
839 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
840 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
841 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
842 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
843 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
844 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
845 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
846 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
847 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
848 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
849 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
850 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
851 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
852 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
853 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
854 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
855 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
856 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
857 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
858 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
859 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
860 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
861 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
862 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
863 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
864 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
865 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
866 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
867 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
868 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
869 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
870 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
871 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
872 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
873 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
874 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
875 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
876 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
877 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
878 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
879 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
880 };
881
882 /*
883 * LCB error flags
884 */
885 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
886 static struct flag_table lcb_err_flags[] = {
887 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
888 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
889 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
890 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
891 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
892 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
893 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
894 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
895 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
896 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
897 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
898 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
899 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
900 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
901 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
902 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
903 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
904 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
905 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
906 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
907 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
908 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
909 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
910 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
911 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
912 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
913 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
914 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
915 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
916 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
917 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
918 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
919 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
920 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
921 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
922 LCBE(REDUNDANT_FLIT_PARITY_ERR))
923 };
924
925 /*
926 * DC8051 Error Flags
927 */
928 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
929 static struct flag_table dc8051_err_flags[] = {
930 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
931 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
932 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
933 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
934 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
935 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
936 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
937 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
938 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
939 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
940 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
941 };
942
943 /*
944 * DC8051 Information Error flags
945 *
946 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
947 */
948 static struct flag_table dc8051_info_err_flags[] = {
949 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
950 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
951 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
952 FLAG_ENTRY0("Serdes internal loopback failure",
953 FAILED_SERDES_INTERNAL_LOOPBACK),
954 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
955 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
956 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
957 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
958 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
959 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
960 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
961 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
962 };
963
964 /*
965 * DC8051 Information Host Information flags
966 *
967 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
968 */
969 static struct flag_table dc8051_info_host_msg_flags[] = {
970 FLAG_ENTRY0("Host request done", 0x0001),
971 FLAG_ENTRY0("BC SMA message", 0x0002),
972 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
973 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
974 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
975 FLAG_ENTRY0("External device config request", 0x0020),
976 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
977 FLAG_ENTRY0("LinkUp achieved", 0x0080),
978 FLAG_ENTRY0("Link going down", 0x0100),
979 };
980
981
982 static u32 encoded_size(u32 size);
983 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
984 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
985 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
986 u8 *continuous);
987 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
988 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
989 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
990 u8 *remote_tx_rate, u16 *link_widths);
991 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
992 u8 *flag_bits, u16 *link_widths);
993 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
994 u8 *device_rev);
995 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
996 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
997 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
998 u8 *tx_polarity_inversion,
999 u8 *rx_polarity_inversion, u8 *max_rate);
1000 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1001 unsigned int context, u64 err_status);
1002 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1003 static void handle_dcc_err(struct hfi1_devdata *dd,
1004 unsigned int context, u64 err_status);
1005 static void handle_lcb_err(struct hfi1_devdata *dd,
1006 unsigned int context, u64 err_status);
1007 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1008 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1009 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1010 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1011 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1012 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1013 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1014 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015 static void set_partition_keys(struct hfi1_pportdata *);
1016 static const char *link_state_name(u32 state);
1017 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1018 u32 state);
1019 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1020 u64 *out_data);
1021 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1022 static int thermal_init(struct hfi1_devdata *dd);
1023
1024 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1025 int msecs);
1026 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1027 static void handle_temp_err(struct hfi1_devdata *);
1028 static void dc_shutdown(struct hfi1_devdata *);
1029 static void dc_start(struct hfi1_devdata *);
1030
1031 /*
1032 * Error interrupt table entry. This is used as input to the interrupt
1033 * "clear down" routine used for all second tier error interrupt register.
1034 * Second tier interrupt registers have a single bit representing them
1035 * in the top-level CceIntStatus.
1036 */
1037 struct err_reg_info {
1038 u32 status; /* status CSR offset */
1039 u32 clear; /* clear CSR offset */
1040 u32 mask; /* mask CSR offset */
1041 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1042 const char *desc;
1043 };
1044
1045 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1046 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1047 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1048
1049 /*
1050 * Helpers for building HFI and DC error interrupt table entries. Different
1051 * helpers are needed because of inconsistent register names.
1052 */
1053 #define EE(reg, handler, desc) \
1054 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1055 handler, desc }
1056 #define DC_EE1(reg, handler, desc) \
1057 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1058 #define DC_EE2(reg, handler, desc) \
1059 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1060
1061 /*
1062 * Table of the "misc" grouping of error interrupts. Each entry refers to
1063 * another register containing more information.
1064 */
1065 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1066 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1067 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1068 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1069 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1070 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1071 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1072 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1073 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1074 /* the rest are reserved */
1075 };
1076
1077 /*
1078 * Index into the Various section of the interrupt sources
1079 * corresponding to the Critical Temperature interrupt.
1080 */
1081 #define TCRIT_INT_SOURCE 4
1082
1083 /*
1084 * SDMA error interrupt entry - refers to another register containing more
1085 * information.
1086 */
1087 static const struct err_reg_info sdma_eng_err =
1088 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1089
1090 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1091 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1092 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1093 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1094 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1095 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1096 /* rest are reserved */
1097 };
1098
1099 /*
1100 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1101 * register can not be derived from the MTU value because 10K is not
1102 * a power of 2. Therefore, we need a constant. Everything else can
1103 * be calculated.
1104 */
1105 #define DCC_CFG_PORT_MTU_CAP_10240 7
1106
1107 /*
1108 * Table of the DC grouping of error interrupts. Each entry refers to
1109 * another register containing more information.
1110 */
1111 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1112 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1113 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1114 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1115 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1116 /* the rest are reserved */
1117 };
1118
1119 struct cntr_entry {
1120 /*
1121 * counter name
1122 */
1123 char *name;
1124
1125 /*
1126 * csr to read for name (if applicable)
1127 */
1128 u64 csr;
1129
1130 /*
1131 * offset into dd or ppd to store the counter's value
1132 */
1133 int offset;
1134
1135 /*
1136 * flags
1137 */
1138 u8 flags;
1139
1140 /*
1141 * accessor for stat element, context either dd or ppd
1142 */
1143 u64 (*rw_cntr)(const struct cntr_entry *,
1144 void *context,
1145 int vl,
1146 int mode,
1147 u64 data);
1148 };
1149
1150 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1151 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1152
1153 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1154 { \
1155 name, \
1156 csr, \
1157 offset, \
1158 flags, \
1159 accessor \
1160 }
1161
1162 /* 32bit RXE */
1163 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1164 CNTR_ELEM(#name, \
1165 (counter * 8 + RCV_COUNTER_ARRAY32), \
1166 0, flags | CNTR_32BIT, \
1167 port_access_u32_csr)
1168
1169 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1170 CNTR_ELEM(#name, \
1171 (counter * 8 + RCV_COUNTER_ARRAY32), \
1172 0, flags | CNTR_32BIT, \
1173 dev_access_u32_csr)
1174
1175 /* 64bit RXE */
1176 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1177 CNTR_ELEM(#name, \
1178 (counter * 8 + RCV_COUNTER_ARRAY64), \
1179 0, flags, \
1180 port_access_u64_csr)
1181
1182 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1183 CNTR_ELEM(#name, \
1184 (counter * 8 + RCV_COUNTER_ARRAY64), \
1185 0, flags, \
1186 dev_access_u64_csr)
1187
1188 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1189 #define OVR_ELM(ctx) \
1190 CNTR_ELEM("RcvHdrOvr" #ctx, \
1191 (RCV_HDR_OVFL_CNT + ctx*0x100), \
1192 0, CNTR_NORMAL, port_access_u64_csr)
1193
1194 /* 32bit TXE */
1195 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1196 CNTR_ELEM(#name, \
1197 (counter * 8 + SEND_COUNTER_ARRAY32), \
1198 0, flags | CNTR_32BIT, \
1199 port_access_u32_csr)
1200
1201 /* 64bit TXE */
1202 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1203 CNTR_ELEM(#name, \
1204 (counter * 8 + SEND_COUNTER_ARRAY64), \
1205 0, flags, \
1206 port_access_u64_csr)
1207
1208 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1209 CNTR_ELEM(#name,\
1210 counter * 8 + SEND_COUNTER_ARRAY64, \
1211 0, \
1212 flags, \
1213 dev_access_u64_csr)
1214
1215 /* CCE */
1216 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1217 CNTR_ELEM(#name, \
1218 (counter * 8 + CCE_COUNTER_ARRAY32), \
1219 0, flags | CNTR_32BIT, \
1220 dev_access_u32_csr)
1221
1222 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1223 CNTR_ELEM(#name, \
1224 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1225 0, flags | CNTR_32BIT, \
1226 dev_access_u32_csr)
1227
1228 /* DC */
1229 #define DC_PERF_CNTR(name, counter, flags) \
1230 CNTR_ELEM(#name, \
1231 counter, \
1232 0, \
1233 flags, \
1234 dev_access_u64_csr)
1235
1236 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1237 CNTR_ELEM(#name, \
1238 counter, \
1239 0, \
1240 flags, \
1241 dc_access_lcb_cntr)
1242
1243 /* ibp counters */
1244 #define SW_IBP_CNTR(name, cntr) \
1245 CNTR_ELEM(#name, \
1246 0, \
1247 0, \
1248 CNTR_SYNTH, \
1249 access_ibp_##cntr)
1250
1251 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1252 {
1253 u64 val;
1254
1255 if (dd->flags & HFI1_PRESENT) {
1256 val = readq((void __iomem *)dd->kregbase + offset);
1257 return val;
1258 }
1259 return -1;
1260 }
1261
1262 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1263 {
1264 if (dd->flags & HFI1_PRESENT)
1265 writeq(value, (void __iomem *)dd->kregbase + offset);
1266 }
1267
1268 void __iomem *get_csr_addr(
1269 struct hfi1_devdata *dd,
1270 u32 offset)
1271 {
1272 return (void __iomem *)dd->kregbase + offset;
1273 }
1274
1275 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1276 int mode, u64 value)
1277 {
1278 u64 ret;
1279
1280
1281 if (mode == CNTR_MODE_R) {
1282 ret = read_csr(dd, csr);
1283 } else if (mode == CNTR_MODE_W) {
1284 write_csr(dd, csr, value);
1285 ret = value;
1286 } else {
1287 dd_dev_err(dd, "Invalid cntr register access mode");
1288 return 0;
1289 }
1290
1291 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1292 return ret;
1293 }
1294
1295 /* Dev Access */
1296 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1297 void *context, int vl, int mode, u64 data)
1298 {
1299 struct hfi1_devdata *dd = context;
1300 u64 csr = entry->csr;
1301
1302 if (entry->flags & CNTR_SDMA) {
1303 if (vl == CNTR_INVALID_VL)
1304 return 0;
1305 csr += 0x100 * vl;
1306 } else {
1307 if (vl != CNTR_INVALID_VL)
1308 return 0;
1309 }
1310 return read_write_csr(dd, csr, mode, data);
1311 }
1312
1313 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1314 void *context, int idx, int mode, u64 data)
1315 {
1316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1317
1318 if (dd->per_sdma && idx < dd->num_sdma)
1319 return dd->per_sdma[idx].err_cnt;
1320 return 0;
1321 }
1322
1323 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1324 void *context, int idx, int mode, u64 data)
1325 {
1326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1327
1328 if (dd->per_sdma && idx < dd->num_sdma)
1329 return dd->per_sdma[idx].sdma_int_cnt;
1330 return 0;
1331 }
1332
1333 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1334 void *context, int idx, int mode, u64 data)
1335 {
1336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1337
1338 if (dd->per_sdma && idx < dd->num_sdma)
1339 return dd->per_sdma[idx].idle_int_cnt;
1340 return 0;
1341 }
1342
1343 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1344 void *context, int idx, int mode,
1345 u64 data)
1346 {
1347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1348
1349 if (dd->per_sdma && idx < dd->num_sdma)
1350 return dd->per_sdma[idx].progress_int_cnt;
1351 return 0;
1352 }
1353
1354 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1355 int vl, int mode, u64 data)
1356 {
1357 struct hfi1_devdata *dd = context;
1358
1359 u64 val = 0;
1360 u64 csr = entry->csr;
1361
1362 if (entry->flags & CNTR_VL) {
1363 if (vl == CNTR_INVALID_VL)
1364 return 0;
1365 csr += 8 * vl;
1366 } else {
1367 if (vl != CNTR_INVALID_VL)
1368 return 0;
1369 }
1370
1371 val = read_write_csr(dd, csr, mode, data);
1372 return val;
1373 }
1374
1375 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1376 int vl, int mode, u64 data)
1377 {
1378 struct hfi1_devdata *dd = context;
1379 u32 csr = entry->csr;
1380 int ret = 0;
1381
1382 if (vl != CNTR_INVALID_VL)
1383 return 0;
1384 if (mode == CNTR_MODE_R)
1385 ret = read_lcb_csr(dd, csr, &data);
1386 else if (mode == CNTR_MODE_W)
1387 ret = write_lcb_csr(dd, csr, data);
1388
1389 if (ret) {
1390 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1391 return 0;
1392 }
1393
1394 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1395 return data;
1396 }
1397
1398 /* Port Access */
1399 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1400 int vl, int mode, u64 data)
1401 {
1402 struct hfi1_pportdata *ppd = context;
1403
1404 if (vl != CNTR_INVALID_VL)
1405 return 0;
1406 return read_write_csr(ppd->dd, entry->csr, mode, data);
1407 }
1408
1409 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1410 void *context, int vl, int mode, u64 data)
1411 {
1412 struct hfi1_pportdata *ppd = context;
1413 u64 val;
1414 u64 csr = entry->csr;
1415
1416 if (entry->flags & CNTR_VL) {
1417 if (vl == CNTR_INVALID_VL)
1418 return 0;
1419 csr += 8 * vl;
1420 } else {
1421 if (vl != CNTR_INVALID_VL)
1422 return 0;
1423 }
1424 val = read_write_csr(ppd->dd, csr, mode, data);
1425 return val;
1426 }
1427
1428 /* Software defined */
1429 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1430 u64 data)
1431 {
1432 u64 ret;
1433
1434 if (mode == CNTR_MODE_R) {
1435 ret = *cntr;
1436 } else if (mode == CNTR_MODE_W) {
1437 *cntr = data;
1438 ret = data;
1439 } else {
1440 dd_dev_err(dd, "Invalid cntr sw access mode");
1441 return 0;
1442 }
1443
1444 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1445
1446 return ret;
1447 }
1448
1449 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1450 int vl, int mode, u64 data)
1451 {
1452 struct hfi1_pportdata *ppd = context;
1453
1454 if (vl != CNTR_INVALID_VL)
1455 return 0;
1456 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1457 }
1458
1459 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1460 int vl, int mode, u64 data)
1461 {
1462 struct hfi1_pportdata *ppd = context;
1463
1464 if (vl != CNTR_INVALID_VL)
1465 return 0;
1466 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1467 }
1468
1469 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1470 void *context, int vl, int mode,
1471 u64 data)
1472 {
1473 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1474
1475 if (vl != CNTR_INVALID_VL)
1476 return 0;
1477 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1478 }
1479
1480 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1481 void *context, int vl, int mode, u64 data)
1482 {
1483 struct hfi1_pportdata *ppd = context;
1484
1485 if (vl != CNTR_INVALID_VL)
1486 return 0;
1487
1488 return read_write_sw(ppd->dd, &ppd->port_xmit_discards, mode, data);
1489 }
1490
1491 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1492 void *context, int vl, int mode, u64 data)
1493 {
1494 struct hfi1_pportdata *ppd = context;
1495
1496 if (vl != CNTR_INVALID_VL)
1497 return 0;
1498
1499 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1500 mode, data);
1501 }
1502
1503 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1504 void *context, int vl, int mode, u64 data)
1505 {
1506 struct hfi1_pportdata *ppd = context;
1507
1508 if (vl != CNTR_INVALID_VL)
1509 return 0;
1510
1511 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1512 mode, data);
1513 }
1514
1515 u64 get_all_cpu_total(u64 __percpu *cntr)
1516 {
1517 int cpu;
1518 u64 counter = 0;
1519
1520 for_each_possible_cpu(cpu)
1521 counter += *per_cpu_ptr(cntr, cpu);
1522 return counter;
1523 }
1524
1525 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1526 u64 __percpu *cntr,
1527 int vl, int mode, u64 data)
1528 {
1529
1530 u64 ret = 0;
1531
1532 if (vl != CNTR_INVALID_VL)
1533 return 0;
1534
1535 if (mode == CNTR_MODE_R) {
1536 ret = get_all_cpu_total(cntr) - *z_val;
1537 } else if (mode == CNTR_MODE_W) {
1538 /* A write can only zero the counter */
1539 if (data == 0)
1540 *z_val = get_all_cpu_total(cntr);
1541 else
1542 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1543 } else {
1544 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1545 return 0;
1546 }
1547
1548 return ret;
1549 }
1550
1551 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1552 void *context, int vl, int mode, u64 data)
1553 {
1554 struct hfi1_devdata *dd = context;
1555
1556 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1557 mode, data);
1558 }
1559
1560 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1561 void *context, int vl, int mode, u64 data)
1562 {
1563 struct hfi1_devdata *dd = context;
1564
1565 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1566 mode, data);
1567 }
1568
1569 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1570 void *context, int vl, int mode, u64 data)
1571 {
1572 struct hfi1_devdata *dd = context;
1573
1574 return dd->verbs_dev.n_piowait;
1575 }
1576
1577 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1578 void *context, int vl, int mode, u64 data)
1579 {
1580 struct hfi1_devdata *dd = context;
1581
1582 return dd->verbs_dev.n_txwait;
1583 }
1584
1585 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1586 void *context, int vl, int mode, u64 data)
1587 {
1588 struct hfi1_devdata *dd = context;
1589
1590 return dd->verbs_dev.n_kmem_wait;
1591 }
1592
1593 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1594 void *context, int vl, int mode, u64 data)
1595 {
1596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1597
1598 return dd->verbs_dev.n_send_schedule;
1599 }
1600
1601 /* Software counters for the error status bits within MISC_ERR_STATUS */
1602 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1603 void *context, int vl, int mode,
1604 u64 data)
1605 {
1606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1607
1608 return dd->misc_err_status_cnt[12];
1609 }
1610
1611 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1612 void *context, int vl, int mode,
1613 u64 data)
1614 {
1615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1616
1617 return dd->misc_err_status_cnt[11];
1618 }
1619
1620 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1621 void *context, int vl, int mode,
1622 u64 data)
1623 {
1624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1625
1626 return dd->misc_err_status_cnt[10];
1627 }
1628
1629 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1630 void *context, int vl,
1631 int mode, u64 data)
1632 {
1633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1634
1635 return dd->misc_err_status_cnt[9];
1636 }
1637
1638 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1639 void *context, int vl, int mode,
1640 u64 data)
1641 {
1642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1643
1644 return dd->misc_err_status_cnt[8];
1645 }
1646
1647 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1648 const struct cntr_entry *entry,
1649 void *context, int vl, int mode, u64 data)
1650 {
1651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1652
1653 return dd->misc_err_status_cnt[7];
1654 }
1655
1656 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1657 void *context, int vl,
1658 int mode, u64 data)
1659 {
1660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1661
1662 return dd->misc_err_status_cnt[6];
1663 }
1664
1665 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1666 void *context, int vl, int mode,
1667 u64 data)
1668 {
1669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1670
1671 return dd->misc_err_status_cnt[5];
1672 }
1673
1674 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1675 void *context, int vl, int mode,
1676 u64 data)
1677 {
1678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1679
1680 return dd->misc_err_status_cnt[4];
1681 }
1682
1683 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1684 void *context, int vl,
1685 int mode, u64 data)
1686 {
1687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1688
1689 return dd->misc_err_status_cnt[3];
1690 }
1691
1692 static u64 access_misc_csr_write_bad_addr_err_cnt(
1693 const struct cntr_entry *entry,
1694 void *context, int vl, int mode, u64 data)
1695 {
1696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1697
1698 return dd->misc_err_status_cnt[2];
1699 }
1700
1701 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1702 void *context, int vl,
1703 int mode, u64 data)
1704 {
1705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1706
1707 return dd->misc_err_status_cnt[1];
1708 }
1709
1710 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1711 void *context, int vl, int mode,
1712 u64 data)
1713 {
1714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1715
1716 return dd->misc_err_status_cnt[0];
1717 }
1718
1719 /*
1720 * Software counter for the aggregate of
1721 * individual CceErrStatus counters
1722 */
1723 static u64 access_sw_cce_err_status_aggregated_cnt(
1724 const struct cntr_entry *entry,
1725 void *context, int vl, int mode, u64 data)
1726 {
1727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728
1729 return dd->sw_cce_err_status_aggregate;
1730 }
1731
1732 /*
1733 * Software counters corresponding to each of the
1734 * error status bits within CceErrStatus
1735 */
1736 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1737 void *context, int vl, int mode,
1738 u64 data)
1739 {
1740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1741
1742 return dd->cce_err_status_cnt[40];
1743 }
1744
1745 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1746 void *context, int vl, int mode,
1747 u64 data)
1748 {
1749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1750
1751 return dd->cce_err_status_cnt[39];
1752 }
1753
1754 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1755 void *context, int vl, int mode,
1756 u64 data)
1757 {
1758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1759
1760 return dd->cce_err_status_cnt[38];
1761 }
1762
1763 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1764 void *context, int vl, int mode,
1765 u64 data)
1766 {
1767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1768
1769 return dd->cce_err_status_cnt[37];
1770 }
1771
1772 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1773 void *context, int vl, int mode,
1774 u64 data)
1775 {
1776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1777
1778 return dd->cce_err_status_cnt[36];
1779 }
1780
1781 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1782 const struct cntr_entry *entry,
1783 void *context, int vl, int mode, u64 data)
1784 {
1785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1786
1787 return dd->cce_err_status_cnt[35];
1788 }
1789
1790 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1791 const struct cntr_entry *entry,
1792 void *context, int vl, int mode, u64 data)
1793 {
1794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1795
1796 return dd->cce_err_status_cnt[34];
1797 }
1798
1799 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1800 void *context, int vl,
1801 int mode, u64 data)
1802 {
1803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1804
1805 return dd->cce_err_status_cnt[33];
1806 }
1807
1808 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1809 void *context, int vl, int mode,
1810 u64 data)
1811 {
1812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1813
1814 return dd->cce_err_status_cnt[32];
1815 }
1816
1817 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1818 void *context, int vl, int mode, u64 data)
1819 {
1820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1821
1822 return dd->cce_err_status_cnt[31];
1823 }
1824
1825 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1826 void *context, int vl, int mode,
1827 u64 data)
1828 {
1829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1830
1831 return dd->cce_err_status_cnt[30];
1832 }
1833
1834 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1835 void *context, int vl, int mode,
1836 u64 data)
1837 {
1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839
1840 return dd->cce_err_status_cnt[29];
1841 }
1842
1843 static u64 access_pcic_transmit_back_parity_err_cnt(
1844 const struct cntr_entry *entry,
1845 void *context, int vl, int mode, u64 data)
1846 {
1847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848
1849 return dd->cce_err_status_cnt[28];
1850 }
1851
1852 static u64 access_pcic_transmit_front_parity_err_cnt(
1853 const struct cntr_entry *entry,
1854 void *context, int vl, int mode, u64 data)
1855 {
1856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1857
1858 return dd->cce_err_status_cnt[27];
1859 }
1860
1861 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1862 void *context, int vl, int mode,
1863 u64 data)
1864 {
1865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1866
1867 return dd->cce_err_status_cnt[26];
1868 }
1869
1870 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1871 void *context, int vl, int mode,
1872 u64 data)
1873 {
1874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1875
1876 return dd->cce_err_status_cnt[25];
1877 }
1878
1879 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1880 void *context, int vl, int mode,
1881 u64 data)
1882 {
1883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1884
1885 return dd->cce_err_status_cnt[24];
1886 }
1887
1888 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1889 void *context, int vl, int mode,
1890 u64 data)
1891 {
1892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1893
1894 return dd->cce_err_status_cnt[23];
1895 }
1896
1897 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1898 void *context, int vl,
1899 int mode, u64 data)
1900 {
1901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1902
1903 return dd->cce_err_status_cnt[22];
1904 }
1905
1906 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1907 void *context, int vl, int mode,
1908 u64 data)
1909 {
1910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1911
1912 return dd->cce_err_status_cnt[21];
1913 }
1914
1915 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1916 const struct cntr_entry *entry,
1917 void *context, int vl, int mode, u64 data)
1918 {
1919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1920
1921 return dd->cce_err_status_cnt[20];
1922 }
1923
1924 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1925 void *context, int vl,
1926 int mode, u64 data)
1927 {
1928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1929
1930 return dd->cce_err_status_cnt[19];
1931 }
1932
1933 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1934 void *context, int vl, int mode,
1935 u64 data)
1936 {
1937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1938
1939 return dd->cce_err_status_cnt[18];
1940 }
1941
1942 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1943 void *context, int vl, int mode,
1944 u64 data)
1945 {
1946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1947
1948 return dd->cce_err_status_cnt[17];
1949 }
1950
1951 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1952 void *context, int vl, int mode,
1953 u64 data)
1954 {
1955 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1956
1957 return dd->cce_err_status_cnt[16];
1958 }
1959
1960 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1961 void *context, int vl, int mode,
1962 u64 data)
1963 {
1964 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1965
1966 return dd->cce_err_status_cnt[15];
1967 }
1968
1969 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1970 void *context, int vl,
1971 int mode, u64 data)
1972 {
1973 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1974
1975 return dd->cce_err_status_cnt[14];
1976 }
1977
1978 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1979 void *context, int vl, int mode,
1980 u64 data)
1981 {
1982 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1983
1984 return dd->cce_err_status_cnt[13];
1985 }
1986
1987 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
1988 const struct cntr_entry *entry,
1989 void *context, int vl, int mode, u64 data)
1990 {
1991 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1992
1993 return dd->cce_err_status_cnt[12];
1994 }
1995
1996 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
1997 const struct cntr_entry *entry,
1998 void *context, int vl, int mode, u64 data)
1999 {
2000 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2001
2002 return dd->cce_err_status_cnt[11];
2003 }
2004
2005 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2006 const struct cntr_entry *entry,
2007 void *context, int vl, int mode, u64 data)
2008 {
2009 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2010
2011 return dd->cce_err_status_cnt[10];
2012 }
2013
2014 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2015 const struct cntr_entry *entry,
2016 void *context, int vl, int mode, u64 data)
2017 {
2018 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2019
2020 return dd->cce_err_status_cnt[9];
2021 }
2022
2023 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2024 const struct cntr_entry *entry,
2025 void *context, int vl, int mode, u64 data)
2026 {
2027 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2028
2029 return dd->cce_err_status_cnt[8];
2030 }
2031
2032 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2033 void *context, int vl,
2034 int mode, u64 data)
2035 {
2036 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2037
2038 return dd->cce_err_status_cnt[7];
2039 }
2040
2041 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2042 const struct cntr_entry *entry,
2043 void *context, int vl, int mode, u64 data)
2044 {
2045 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2046
2047 return dd->cce_err_status_cnt[6];
2048 }
2049
2050 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2051 void *context, int vl, int mode,
2052 u64 data)
2053 {
2054 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2055
2056 return dd->cce_err_status_cnt[5];
2057 }
2058
2059 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2060 void *context, int vl, int mode,
2061 u64 data)
2062 {
2063 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2064
2065 return dd->cce_err_status_cnt[4];
2066 }
2067
2068 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2069 const struct cntr_entry *entry,
2070 void *context, int vl, int mode, u64 data)
2071 {
2072 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2073
2074 return dd->cce_err_status_cnt[3];
2075 }
2076
2077 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2078 void *context, int vl,
2079 int mode, u64 data)
2080 {
2081 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2082
2083 return dd->cce_err_status_cnt[2];
2084 }
2085
2086 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2087 void *context, int vl,
2088 int mode, u64 data)
2089 {
2090 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2091
2092 return dd->cce_err_status_cnt[1];
2093 }
2094
2095 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2096 void *context, int vl, int mode,
2097 u64 data)
2098 {
2099 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2100
2101 return dd->cce_err_status_cnt[0];
2102 }
2103
2104 /*
2105 * Software counters corresponding to each of the
2106 * error status bits within RcvErrStatus
2107 */
2108 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2109 void *context, int vl, int mode,
2110 u64 data)
2111 {
2112 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113
2114 return dd->rcv_err_status_cnt[63];
2115 }
2116
2117 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2118 void *context, int vl,
2119 int mode, u64 data)
2120 {
2121 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122
2123 return dd->rcv_err_status_cnt[62];
2124 }
2125
2126 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2127 void *context, int vl, int mode,
2128 u64 data)
2129 {
2130 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131
2132 return dd->rcv_err_status_cnt[61];
2133 }
2134
2135 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2136 void *context, int vl, int mode,
2137 u64 data)
2138 {
2139 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141 return dd->rcv_err_status_cnt[60];
2142 }
2143
2144 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2145 void *context, int vl,
2146 int mode, u64 data)
2147 {
2148 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150 return dd->rcv_err_status_cnt[59];
2151 }
2152
2153 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2154 void *context, int vl,
2155 int mode, u64 data)
2156 {
2157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159 return dd->rcv_err_status_cnt[58];
2160 }
2161
2162 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2163 void *context, int vl, int mode,
2164 u64 data)
2165 {
2166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168 return dd->rcv_err_status_cnt[57];
2169 }
2170
2171 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2172 void *context, int vl, int mode,
2173 u64 data)
2174 {
2175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177 return dd->rcv_err_status_cnt[56];
2178 }
2179
2180 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2181 void *context, int vl, int mode,
2182 u64 data)
2183 {
2184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186 return dd->rcv_err_status_cnt[55];
2187 }
2188
2189 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2190 const struct cntr_entry *entry,
2191 void *context, int vl, int mode, u64 data)
2192 {
2193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195 return dd->rcv_err_status_cnt[54];
2196 }
2197
2198 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2199 const struct cntr_entry *entry,
2200 void *context, int vl, int mode, u64 data)
2201 {
2202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204 return dd->rcv_err_status_cnt[53];
2205 }
2206
2207 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2208 void *context, int vl,
2209 int mode, u64 data)
2210 {
2211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213 return dd->rcv_err_status_cnt[52];
2214 }
2215
2216 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2217 void *context, int vl,
2218 int mode, u64 data)
2219 {
2220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2221
2222 return dd->rcv_err_status_cnt[51];
2223 }
2224
2225 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2226 void *context, int vl,
2227 int mode, u64 data)
2228 {
2229 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2230
2231 return dd->rcv_err_status_cnt[50];
2232 }
2233
2234 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2235 void *context, int vl,
2236 int mode, u64 data)
2237 {
2238 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2239
2240 return dd->rcv_err_status_cnt[49];
2241 }
2242
2243 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2244 void *context, int vl,
2245 int mode, u64 data)
2246 {
2247 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2248
2249 return dd->rcv_err_status_cnt[48];
2250 }
2251
2252 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2253 void *context, int vl,
2254 int mode, u64 data)
2255 {
2256 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2257
2258 return dd->rcv_err_status_cnt[47];
2259 }
2260
2261 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2262 void *context, int vl, int mode,
2263 u64 data)
2264 {
2265 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2266
2267 return dd->rcv_err_status_cnt[46];
2268 }
2269
2270 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2271 const struct cntr_entry *entry,
2272 void *context, int vl, int mode, u64 data)
2273 {
2274 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2275
2276 return dd->rcv_err_status_cnt[45];
2277 }
2278
2279 static u64 access_rx_lookup_csr_parity_err_cnt(
2280 const struct cntr_entry *entry,
2281 void *context, int vl, int mode, u64 data)
2282 {
2283 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2284
2285 return dd->rcv_err_status_cnt[44];
2286 }
2287
2288 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2289 const struct cntr_entry *entry,
2290 void *context, int vl, int mode, u64 data)
2291 {
2292 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2293
2294 return dd->rcv_err_status_cnt[43];
2295 }
2296
2297 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2298 const struct cntr_entry *entry,
2299 void *context, int vl, int mode, u64 data)
2300 {
2301 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2302
2303 return dd->rcv_err_status_cnt[42];
2304 }
2305
2306 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2307 const struct cntr_entry *entry,
2308 void *context, int vl, int mode, u64 data)
2309 {
2310 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2311
2312 return dd->rcv_err_status_cnt[41];
2313 }
2314
2315 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2316 const struct cntr_entry *entry,
2317 void *context, int vl, int mode, u64 data)
2318 {
2319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2320
2321 return dd->rcv_err_status_cnt[40];
2322 }
2323
2324 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2325 const struct cntr_entry *entry,
2326 void *context, int vl, int mode, u64 data)
2327 {
2328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2329
2330 return dd->rcv_err_status_cnt[39];
2331 }
2332
2333 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2334 const struct cntr_entry *entry,
2335 void *context, int vl, int mode, u64 data)
2336 {
2337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2338
2339 return dd->rcv_err_status_cnt[38];
2340 }
2341
2342 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2343 const struct cntr_entry *entry,
2344 void *context, int vl, int mode, u64 data)
2345 {
2346 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2347
2348 return dd->rcv_err_status_cnt[37];
2349 }
2350
2351 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2352 const struct cntr_entry *entry,
2353 void *context, int vl, int mode, u64 data)
2354 {
2355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2356
2357 return dd->rcv_err_status_cnt[36];
2358 }
2359
2360 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2361 const struct cntr_entry *entry,
2362 void *context, int vl, int mode, u64 data)
2363 {
2364 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2365
2366 return dd->rcv_err_status_cnt[35];
2367 }
2368
2369 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2370 const struct cntr_entry *entry,
2371 void *context, int vl, int mode, u64 data)
2372 {
2373 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2374
2375 return dd->rcv_err_status_cnt[34];
2376 }
2377
2378 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2379 const struct cntr_entry *entry,
2380 void *context, int vl, int mode, u64 data)
2381 {
2382 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2383
2384 return dd->rcv_err_status_cnt[33];
2385 }
2386
2387 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2388 void *context, int vl, int mode,
2389 u64 data)
2390 {
2391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2392
2393 return dd->rcv_err_status_cnt[32];
2394 }
2395
2396 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2397 void *context, int vl, int mode,
2398 u64 data)
2399 {
2400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2401
2402 return dd->rcv_err_status_cnt[31];
2403 }
2404
2405 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2406 void *context, int vl, int mode,
2407 u64 data)
2408 {
2409 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2410
2411 return dd->rcv_err_status_cnt[30];
2412 }
2413
2414 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2415 void *context, int vl, int mode,
2416 u64 data)
2417 {
2418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2419
2420 return dd->rcv_err_status_cnt[29];
2421 }
2422
2423 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2424 void *context, int vl,
2425 int mode, u64 data)
2426 {
2427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2428
2429 return dd->rcv_err_status_cnt[28];
2430 }
2431
2432 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2433 const struct cntr_entry *entry,
2434 void *context, int vl, int mode, u64 data)
2435 {
2436 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2437
2438 return dd->rcv_err_status_cnt[27];
2439 }
2440
2441 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2442 const struct cntr_entry *entry,
2443 void *context, int vl, int mode, u64 data)
2444 {
2445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2446
2447 return dd->rcv_err_status_cnt[26];
2448 }
2449
2450 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2451 const struct cntr_entry *entry,
2452 void *context, int vl, int mode, u64 data)
2453 {
2454 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2455
2456 return dd->rcv_err_status_cnt[25];
2457 }
2458
2459 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2460 const struct cntr_entry *entry,
2461 void *context, int vl, int mode, u64 data)
2462 {
2463 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2464
2465 return dd->rcv_err_status_cnt[24];
2466 }
2467
2468 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2469 const struct cntr_entry *entry,
2470 void *context, int vl, int mode, u64 data)
2471 {
2472 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2473
2474 return dd->rcv_err_status_cnt[23];
2475 }
2476
2477 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2478 const struct cntr_entry *entry,
2479 void *context, int vl, int mode, u64 data)
2480 {
2481 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2482
2483 return dd->rcv_err_status_cnt[22];
2484 }
2485
2486 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2487 const struct cntr_entry *entry,
2488 void *context, int vl, int mode, u64 data)
2489 {
2490 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2491
2492 return dd->rcv_err_status_cnt[21];
2493 }
2494
2495 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2496 const struct cntr_entry *entry,
2497 void *context, int vl, int mode, u64 data)
2498 {
2499 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2500
2501 return dd->rcv_err_status_cnt[20];
2502 }
2503
2504 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2505 const struct cntr_entry *entry,
2506 void *context, int vl, int mode, u64 data)
2507 {
2508 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2509
2510 return dd->rcv_err_status_cnt[19];
2511 }
2512
2513 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2514 void *context, int vl,
2515 int mode, u64 data)
2516 {
2517 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2518
2519 return dd->rcv_err_status_cnt[18];
2520 }
2521
2522 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2523 void *context, int vl,
2524 int mode, u64 data)
2525 {
2526 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2527
2528 return dd->rcv_err_status_cnt[17];
2529 }
2530
2531 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2532 const struct cntr_entry *entry,
2533 void *context, int vl, int mode, u64 data)
2534 {
2535 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2536
2537 return dd->rcv_err_status_cnt[16];
2538 }
2539
2540 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2541 const struct cntr_entry *entry,
2542 void *context, int vl, int mode, u64 data)
2543 {
2544 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2545
2546 return dd->rcv_err_status_cnt[15];
2547 }
2548
2549 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2550 void *context, int vl,
2551 int mode, u64 data)
2552 {
2553 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2554
2555 return dd->rcv_err_status_cnt[14];
2556 }
2557
2558 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2559 void *context, int vl,
2560 int mode, u64 data)
2561 {
2562 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2563
2564 return dd->rcv_err_status_cnt[13];
2565 }
2566
2567 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2568 void *context, int vl, int mode,
2569 u64 data)
2570 {
2571 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2572
2573 return dd->rcv_err_status_cnt[12];
2574 }
2575
2576 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2577 void *context, int vl, int mode,
2578 u64 data)
2579 {
2580 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2581
2582 return dd->rcv_err_status_cnt[11];
2583 }
2584
2585 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2586 void *context, int vl, int mode,
2587 u64 data)
2588 {
2589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2590
2591 return dd->rcv_err_status_cnt[10];
2592 }
2593
2594 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2595 void *context, int vl, int mode,
2596 u64 data)
2597 {
2598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2599
2600 return dd->rcv_err_status_cnt[9];
2601 }
2602
2603 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2604 void *context, int vl, int mode,
2605 u64 data)
2606 {
2607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2608
2609 return dd->rcv_err_status_cnt[8];
2610 }
2611
2612 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2613 const struct cntr_entry *entry,
2614 void *context, int vl, int mode, u64 data)
2615 {
2616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2617
2618 return dd->rcv_err_status_cnt[7];
2619 }
2620
2621 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2622 const struct cntr_entry *entry,
2623 void *context, int vl, int mode, u64 data)
2624 {
2625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2626
2627 return dd->rcv_err_status_cnt[6];
2628 }
2629
2630 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2631 void *context, int vl, int mode,
2632 u64 data)
2633 {
2634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2635
2636 return dd->rcv_err_status_cnt[5];
2637 }
2638
2639 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2640 void *context, int vl, int mode,
2641 u64 data)
2642 {
2643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2644
2645 return dd->rcv_err_status_cnt[4];
2646 }
2647
2648 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2649 void *context, int vl, int mode,
2650 u64 data)
2651 {
2652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2653
2654 return dd->rcv_err_status_cnt[3];
2655 }
2656
2657 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2658 void *context, int vl, int mode,
2659 u64 data)
2660 {
2661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2662
2663 return dd->rcv_err_status_cnt[2];
2664 }
2665
2666 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2667 void *context, int vl, int mode,
2668 u64 data)
2669 {
2670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2671
2672 return dd->rcv_err_status_cnt[1];
2673 }
2674
2675 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2676 void *context, int vl, int mode,
2677 u64 data)
2678 {
2679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2680
2681 return dd->rcv_err_status_cnt[0];
2682 }
2683
2684 /*
2685 * Software counters corresponding to each of the
2686 * error status bits within SendPioErrStatus
2687 */
2688 static u64 access_pio_pec_sop_head_parity_err_cnt(
2689 const struct cntr_entry *entry,
2690 void *context, int vl, int mode, u64 data)
2691 {
2692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693
2694 return dd->send_pio_err_status_cnt[35];
2695 }
2696
2697 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2698 const struct cntr_entry *entry,
2699 void *context, int vl, int mode, u64 data)
2700 {
2701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702
2703 return dd->send_pio_err_status_cnt[34];
2704 }
2705
2706 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2707 const struct cntr_entry *entry,
2708 void *context, int vl, int mode, u64 data)
2709 {
2710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711
2712 return dd->send_pio_err_status_cnt[33];
2713 }
2714
2715 static u64 access_pio_current_free_cnt_parity_err_cnt(
2716 const struct cntr_entry *entry,
2717 void *context, int vl, int mode, u64 data)
2718 {
2719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721 return dd->send_pio_err_status_cnt[32];
2722 }
2723
2724 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2725 void *context, int vl, int mode,
2726 u64 data)
2727 {
2728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730 return dd->send_pio_err_status_cnt[31];
2731 }
2732
2733 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2734 void *context, int vl, int mode,
2735 u64 data)
2736 {
2737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739 return dd->send_pio_err_status_cnt[30];
2740 }
2741
2742 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2743 void *context, int vl, int mode,
2744 u64 data)
2745 {
2746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748 return dd->send_pio_err_status_cnt[29];
2749 }
2750
2751 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2752 const struct cntr_entry *entry,
2753 void *context, int vl, int mode, u64 data)
2754 {
2755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757 return dd->send_pio_err_status_cnt[28];
2758 }
2759
2760 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2761 void *context, int vl, int mode,
2762 u64 data)
2763 {
2764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766 return dd->send_pio_err_status_cnt[27];
2767 }
2768
2769 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2770 void *context, int vl, int mode,
2771 u64 data)
2772 {
2773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775 return dd->send_pio_err_status_cnt[26];
2776 }
2777
2778 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2779 void *context, int vl,
2780 int mode, u64 data)
2781 {
2782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784 return dd->send_pio_err_status_cnt[25];
2785 }
2786
2787 static u64 access_pio_block_qw_count_parity_err_cnt(
2788 const struct cntr_entry *entry,
2789 void *context, int vl, int mode, u64 data)
2790 {
2791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793 return dd->send_pio_err_status_cnt[24];
2794 }
2795
2796 static u64 access_pio_write_qw_valid_parity_err_cnt(
2797 const struct cntr_entry *entry,
2798 void *context, int vl, int mode, u64 data)
2799 {
2800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2801
2802 return dd->send_pio_err_status_cnt[23];
2803 }
2804
2805 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2806 void *context, int vl, int mode,
2807 u64 data)
2808 {
2809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2810
2811 return dd->send_pio_err_status_cnt[22];
2812 }
2813
2814 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2815 void *context, int vl,
2816 int mode, u64 data)
2817 {
2818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2819
2820 return dd->send_pio_err_status_cnt[21];
2821 }
2822
2823 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2824 void *context, int vl,
2825 int mode, u64 data)
2826 {
2827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2828
2829 return dd->send_pio_err_status_cnt[20];
2830 }
2831
2832 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2833 void *context, int vl,
2834 int mode, u64 data)
2835 {
2836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2837
2838 return dd->send_pio_err_status_cnt[19];
2839 }
2840
2841 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2842 const struct cntr_entry *entry,
2843 void *context, int vl, int mode, u64 data)
2844 {
2845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2846
2847 return dd->send_pio_err_status_cnt[18];
2848 }
2849
2850 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2851 void *context, int vl, int mode,
2852 u64 data)
2853 {
2854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2855
2856 return dd->send_pio_err_status_cnt[17];
2857 }
2858
2859 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2860 void *context, int vl, int mode,
2861 u64 data)
2862 {
2863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2864
2865 return dd->send_pio_err_status_cnt[16];
2866 }
2867
2868 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2869 const struct cntr_entry *entry,
2870 void *context, int vl, int mode, u64 data)
2871 {
2872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2873
2874 return dd->send_pio_err_status_cnt[15];
2875 }
2876
2877 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2878 const struct cntr_entry *entry,
2879 void *context, int vl, int mode, u64 data)
2880 {
2881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2882
2883 return dd->send_pio_err_status_cnt[14];
2884 }
2885
2886 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2887 const struct cntr_entry *entry,
2888 void *context, int vl, int mode, u64 data)
2889 {
2890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2891
2892 return dd->send_pio_err_status_cnt[13];
2893 }
2894
2895 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2896 const struct cntr_entry *entry,
2897 void *context, int vl, int mode, u64 data)
2898 {
2899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2900
2901 return dd->send_pio_err_status_cnt[12];
2902 }
2903
2904 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2905 const struct cntr_entry *entry,
2906 void *context, int vl, int mode, u64 data)
2907 {
2908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2909
2910 return dd->send_pio_err_status_cnt[11];
2911 }
2912
2913 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2914 const struct cntr_entry *entry,
2915 void *context, int vl, int mode, u64 data)
2916 {
2917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2918
2919 return dd->send_pio_err_status_cnt[10];
2920 }
2921
2922 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2923 const struct cntr_entry *entry,
2924 void *context, int vl, int mode, u64 data)
2925 {
2926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2927
2928 return dd->send_pio_err_status_cnt[9];
2929 }
2930
2931 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2932 const struct cntr_entry *entry,
2933 void *context, int vl, int mode, u64 data)
2934 {
2935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2936
2937 return dd->send_pio_err_status_cnt[8];
2938 }
2939
2940 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2941 const struct cntr_entry *entry,
2942 void *context, int vl, int mode, u64 data)
2943 {
2944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2945
2946 return dd->send_pio_err_status_cnt[7];
2947 }
2948
2949 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2950 void *context, int vl, int mode,
2951 u64 data)
2952 {
2953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2954
2955 return dd->send_pio_err_status_cnt[6];
2956 }
2957
2958 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2959 void *context, int vl, int mode,
2960 u64 data)
2961 {
2962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2963
2964 return dd->send_pio_err_status_cnt[5];
2965 }
2966
2967 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2968 void *context, int vl, int mode,
2969 u64 data)
2970 {
2971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2972
2973 return dd->send_pio_err_status_cnt[4];
2974 }
2975
2976 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2977 void *context, int vl, int mode,
2978 u64 data)
2979 {
2980 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2981
2982 return dd->send_pio_err_status_cnt[3];
2983 }
2984
2985 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
2986 void *context, int vl, int mode,
2987 u64 data)
2988 {
2989 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2990
2991 return dd->send_pio_err_status_cnt[2];
2992 }
2993
2994 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
2995 void *context, int vl,
2996 int mode, u64 data)
2997 {
2998 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2999
3000 return dd->send_pio_err_status_cnt[1];
3001 }
3002
3003 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3004 void *context, int vl, int mode,
3005 u64 data)
3006 {
3007 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3008
3009 return dd->send_pio_err_status_cnt[0];
3010 }
3011
3012 /*
3013 * Software counters corresponding to each of the
3014 * error status bits within SendDmaErrStatus
3015 */
3016 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3017 const struct cntr_entry *entry,
3018 void *context, int vl, int mode, u64 data)
3019 {
3020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021
3022 return dd->send_dma_err_status_cnt[3];
3023 }
3024
3025 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3026 const struct cntr_entry *entry,
3027 void *context, int vl, int mode, u64 data)
3028 {
3029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030
3031 return dd->send_dma_err_status_cnt[2];
3032 }
3033
3034 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3035 void *context, int vl, int mode,
3036 u64 data)
3037 {
3038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039
3040 return dd->send_dma_err_status_cnt[1];
3041 }
3042
3043 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3044 void *context, int vl, int mode,
3045 u64 data)
3046 {
3047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049 return dd->send_dma_err_status_cnt[0];
3050 }
3051
3052 /*
3053 * Software counters corresponding to each of the
3054 * error status bits within SendEgressErrStatus
3055 */
3056 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3057 const struct cntr_entry *entry,
3058 void *context, int vl, int mode, u64 data)
3059 {
3060 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3061
3062 return dd->send_egress_err_status_cnt[63];
3063 }
3064
3065 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3066 const struct cntr_entry *entry,
3067 void *context, int vl, int mode, u64 data)
3068 {
3069 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3070
3071 return dd->send_egress_err_status_cnt[62];
3072 }
3073
3074 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3075 void *context, int vl, int mode,
3076 u64 data)
3077 {
3078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3079
3080 return dd->send_egress_err_status_cnt[61];
3081 }
3082
3083 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3084 void *context, int vl,
3085 int mode, u64 data)
3086 {
3087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3088
3089 return dd->send_egress_err_status_cnt[60];
3090 }
3091
3092 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3093 const struct cntr_entry *entry,
3094 void *context, int vl, int mode, u64 data)
3095 {
3096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3097
3098 return dd->send_egress_err_status_cnt[59];
3099 }
3100
3101 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3102 void *context, int vl, int mode,
3103 u64 data)
3104 {
3105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3106
3107 return dd->send_egress_err_status_cnt[58];
3108 }
3109
3110 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3111 void *context, int vl, int mode,
3112 u64 data)
3113 {
3114 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3115
3116 return dd->send_egress_err_status_cnt[57];
3117 }
3118
3119 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3120 void *context, int vl, int mode,
3121 u64 data)
3122 {
3123 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3124
3125 return dd->send_egress_err_status_cnt[56];
3126 }
3127
3128 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3129 void *context, int vl, int mode,
3130 u64 data)
3131 {
3132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134 return dd->send_egress_err_status_cnt[55];
3135 }
3136
3137 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3138 void *context, int vl, int mode,
3139 u64 data)
3140 {
3141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143 return dd->send_egress_err_status_cnt[54];
3144 }
3145
3146 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3147 void *context, int vl, int mode,
3148 u64 data)
3149 {
3150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152 return dd->send_egress_err_status_cnt[53];
3153 }
3154
3155 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3156 void *context, int vl, int mode,
3157 u64 data)
3158 {
3159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161 return dd->send_egress_err_status_cnt[52];
3162 }
3163
3164 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3165 void *context, int vl, int mode,
3166 u64 data)
3167 {
3168 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3169
3170 return dd->send_egress_err_status_cnt[51];
3171 }
3172
3173 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3174 void *context, int vl, int mode,
3175 u64 data)
3176 {
3177 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3178
3179 return dd->send_egress_err_status_cnt[50];
3180 }
3181
3182 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3183 void *context, int vl, int mode,
3184 u64 data)
3185 {
3186 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3187
3188 return dd->send_egress_err_status_cnt[49];
3189 }
3190
3191 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3192 void *context, int vl, int mode,
3193 u64 data)
3194 {
3195 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3196
3197 return dd->send_egress_err_status_cnt[48];
3198 }
3199
3200 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3201 void *context, int vl, int mode,
3202 u64 data)
3203 {
3204 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3205
3206 return dd->send_egress_err_status_cnt[47];
3207 }
3208
3209 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3210 void *context, int vl, int mode,
3211 u64 data)
3212 {
3213 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3214
3215 return dd->send_egress_err_status_cnt[46];
3216 }
3217
3218 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3219 void *context, int vl, int mode,
3220 u64 data)
3221 {
3222 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3223
3224 return dd->send_egress_err_status_cnt[45];
3225 }
3226
3227 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3228 void *context, int vl,
3229 int mode, u64 data)
3230 {
3231 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3232
3233 return dd->send_egress_err_status_cnt[44];
3234 }
3235
3236 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3237 const struct cntr_entry *entry,
3238 void *context, int vl, int mode, u64 data)
3239 {
3240 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3241
3242 return dd->send_egress_err_status_cnt[43];
3243 }
3244
3245 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3246 void *context, int vl, int mode,
3247 u64 data)
3248 {
3249 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3250
3251 return dd->send_egress_err_status_cnt[42];
3252 }
3253
3254 static u64 access_tx_credit_return_partiy_err_cnt(
3255 const struct cntr_entry *entry,
3256 void *context, int vl, int mode, u64 data)
3257 {
3258 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3259
3260 return dd->send_egress_err_status_cnt[41];
3261 }
3262
3263 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3264 const struct cntr_entry *entry,
3265 void *context, int vl, int mode, u64 data)
3266 {
3267 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3268
3269 return dd->send_egress_err_status_cnt[40];
3270 }
3271
3272 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3273 const struct cntr_entry *entry,
3274 void *context, int vl, int mode, u64 data)
3275 {
3276 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3277
3278 return dd->send_egress_err_status_cnt[39];
3279 }
3280
3281 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3282 const struct cntr_entry *entry,
3283 void *context, int vl, int mode, u64 data)
3284 {
3285 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3286
3287 return dd->send_egress_err_status_cnt[38];
3288 }
3289
3290 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3291 const struct cntr_entry *entry,
3292 void *context, int vl, int mode, u64 data)
3293 {
3294 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3295
3296 return dd->send_egress_err_status_cnt[37];
3297 }
3298
3299 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3300 const struct cntr_entry *entry,
3301 void *context, int vl, int mode, u64 data)
3302 {
3303 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3304
3305 return dd->send_egress_err_status_cnt[36];
3306 }
3307
3308 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3309 const struct cntr_entry *entry,
3310 void *context, int vl, int mode, u64 data)
3311 {
3312 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3313
3314 return dd->send_egress_err_status_cnt[35];
3315 }
3316
3317 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3318 const struct cntr_entry *entry,
3319 void *context, int vl, int mode, u64 data)
3320 {
3321 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3322
3323 return dd->send_egress_err_status_cnt[34];
3324 }
3325
3326 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3327 const struct cntr_entry *entry,
3328 void *context, int vl, int mode, u64 data)
3329 {
3330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3331
3332 return dd->send_egress_err_status_cnt[33];
3333 }
3334
3335 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3336 const struct cntr_entry *entry,
3337 void *context, int vl, int mode, u64 data)
3338 {
3339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3340
3341 return dd->send_egress_err_status_cnt[32];
3342 }
3343
3344 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3345 const struct cntr_entry *entry,
3346 void *context, int vl, int mode, u64 data)
3347 {
3348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3349
3350 return dd->send_egress_err_status_cnt[31];
3351 }
3352
3353 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3354 const struct cntr_entry *entry,
3355 void *context, int vl, int mode, u64 data)
3356 {
3357 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3358
3359 return dd->send_egress_err_status_cnt[30];
3360 }
3361
3362 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3363 const struct cntr_entry *entry,
3364 void *context, int vl, int mode, u64 data)
3365 {
3366 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3367
3368 return dd->send_egress_err_status_cnt[29];
3369 }
3370
3371 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3372 const struct cntr_entry *entry,
3373 void *context, int vl, int mode, u64 data)
3374 {
3375 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3376
3377 return dd->send_egress_err_status_cnt[28];
3378 }
3379
3380 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3381 const struct cntr_entry *entry,
3382 void *context, int vl, int mode, u64 data)
3383 {
3384 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3385
3386 return dd->send_egress_err_status_cnt[27];
3387 }
3388
3389 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3390 const struct cntr_entry *entry,
3391 void *context, int vl, int mode, u64 data)
3392 {
3393 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3394
3395 return dd->send_egress_err_status_cnt[26];
3396 }
3397
3398 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3399 const struct cntr_entry *entry,
3400 void *context, int vl, int mode, u64 data)
3401 {
3402 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3403
3404 return dd->send_egress_err_status_cnt[25];
3405 }
3406
3407 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3408 const struct cntr_entry *entry,
3409 void *context, int vl, int mode, u64 data)
3410 {
3411 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3412
3413 return dd->send_egress_err_status_cnt[24];
3414 }
3415
3416 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3417 const struct cntr_entry *entry,
3418 void *context, int vl, int mode, u64 data)
3419 {
3420 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3421
3422 return dd->send_egress_err_status_cnt[23];
3423 }
3424
3425 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3426 const struct cntr_entry *entry,
3427 void *context, int vl, int mode, u64 data)
3428 {
3429 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3430
3431 return dd->send_egress_err_status_cnt[22];
3432 }
3433
3434 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3435 const struct cntr_entry *entry,
3436 void *context, int vl, int mode, u64 data)
3437 {
3438 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3439
3440 return dd->send_egress_err_status_cnt[21];
3441 }
3442
3443 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3444 const struct cntr_entry *entry,
3445 void *context, int vl, int mode, u64 data)
3446 {
3447 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3448
3449 return dd->send_egress_err_status_cnt[20];
3450 }
3451
3452 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3453 const struct cntr_entry *entry,
3454 void *context, int vl, int mode, u64 data)
3455 {
3456 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3457
3458 return dd->send_egress_err_status_cnt[19];
3459 }
3460
3461 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3462 const struct cntr_entry *entry,
3463 void *context, int vl, int mode, u64 data)
3464 {
3465 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3466
3467 return dd->send_egress_err_status_cnt[18];
3468 }
3469
3470 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3471 const struct cntr_entry *entry,
3472 void *context, int vl, int mode, u64 data)
3473 {
3474 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3475
3476 return dd->send_egress_err_status_cnt[17];
3477 }
3478
3479 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3480 const struct cntr_entry *entry,
3481 void *context, int vl, int mode, u64 data)
3482 {
3483 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3484
3485 return dd->send_egress_err_status_cnt[16];
3486 }
3487
3488 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3489 void *context, int vl, int mode,
3490 u64 data)
3491 {
3492 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3493
3494 return dd->send_egress_err_status_cnt[15];
3495 }
3496
3497 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3498 void *context, int vl,
3499 int mode, u64 data)
3500 {
3501 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3502
3503 return dd->send_egress_err_status_cnt[14];
3504 }
3505
3506 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3507 void *context, int vl, int mode,
3508 u64 data)
3509 {
3510 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3511
3512 return dd->send_egress_err_status_cnt[13];
3513 }
3514
3515 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3516 void *context, int vl, int mode,
3517 u64 data)
3518 {
3519 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3520
3521 return dd->send_egress_err_status_cnt[12];
3522 }
3523
3524 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3525 const struct cntr_entry *entry,
3526 void *context, int vl, int mode, u64 data)
3527 {
3528 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3529
3530 return dd->send_egress_err_status_cnt[11];
3531 }
3532
3533 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3534 void *context, int vl, int mode,
3535 u64 data)
3536 {
3537 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3538
3539 return dd->send_egress_err_status_cnt[10];
3540 }
3541
3542 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3543 void *context, int vl, int mode,
3544 u64 data)
3545 {
3546 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3547
3548 return dd->send_egress_err_status_cnt[9];
3549 }
3550
3551 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3552 const struct cntr_entry *entry,
3553 void *context, int vl, int mode, u64 data)
3554 {
3555 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3556
3557 return dd->send_egress_err_status_cnt[8];
3558 }
3559
3560 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3561 const struct cntr_entry *entry,
3562 void *context, int vl, int mode, u64 data)
3563 {
3564 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3565
3566 return dd->send_egress_err_status_cnt[7];
3567 }
3568
3569 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3570 void *context, int vl, int mode,
3571 u64 data)
3572 {
3573 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3574
3575 return dd->send_egress_err_status_cnt[6];
3576 }
3577
3578 static u64 access_tx_incorrect_link_state_err_cnt(
3579 const struct cntr_entry *entry,
3580 void *context, int vl, int mode, u64 data)
3581 {
3582 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3583
3584 return dd->send_egress_err_status_cnt[5];
3585 }
3586
3587 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3588 void *context, int vl, int mode,
3589 u64 data)
3590 {
3591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3592
3593 return dd->send_egress_err_status_cnt[4];
3594 }
3595
3596 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3597 const struct cntr_entry *entry,
3598 void *context, int vl, int mode, u64 data)
3599 {
3600 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3601
3602 return dd->send_egress_err_status_cnt[3];
3603 }
3604
3605 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3606 void *context, int vl, int mode,
3607 u64 data)
3608 {
3609 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3610
3611 return dd->send_egress_err_status_cnt[2];
3612 }
3613
3614 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3615 const struct cntr_entry *entry,
3616 void *context, int vl, int mode, u64 data)
3617 {
3618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3619
3620 return dd->send_egress_err_status_cnt[1];
3621 }
3622
3623 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3624 const struct cntr_entry *entry,
3625 void *context, int vl, int mode, u64 data)
3626 {
3627 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3628
3629 return dd->send_egress_err_status_cnt[0];
3630 }
3631
3632 /*
3633 * Software counters corresponding to each of the
3634 * error status bits within SendErrStatus
3635 */
3636 static u64 access_send_csr_write_bad_addr_err_cnt(
3637 const struct cntr_entry *entry,
3638 void *context, int vl, int mode, u64 data)
3639 {
3640 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641
3642 return dd->send_err_status_cnt[2];
3643 }
3644
3645 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3646 void *context, int vl,
3647 int mode, u64 data)
3648 {
3649 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650
3651 return dd->send_err_status_cnt[1];
3652 }
3653
3654 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3655 void *context, int vl, int mode,
3656 u64 data)
3657 {
3658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659
3660 return dd->send_err_status_cnt[0];
3661 }
3662
3663 /*
3664 * Software counters corresponding to each of the
3665 * error status bits within SendCtxtErrStatus
3666 */
3667 static u64 access_pio_write_out_of_bounds_err_cnt(
3668 const struct cntr_entry *entry,
3669 void *context, int vl, int mode, u64 data)
3670 {
3671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3672
3673 return dd->sw_ctxt_err_status_cnt[4];
3674 }
3675
3676 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3677 void *context, int vl, int mode,
3678 u64 data)
3679 {
3680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3681
3682 return dd->sw_ctxt_err_status_cnt[3];
3683 }
3684
3685 static u64 access_pio_write_crosses_boundary_err_cnt(
3686 const struct cntr_entry *entry,
3687 void *context, int vl, int mode, u64 data)
3688 {
3689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3690
3691 return dd->sw_ctxt_err_status_cnt[2];
3692 }
3693
3694 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3695 void *context, int vl,
3696 int mode, u64 data)
3697 {
3698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3699
3700 return dd->sw_ctxt_err_status_cnt[1];
3701 }
3702
3703 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3704 void *context, int vl, int mode,
3705 u64 data)
3706 {
3707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3708
3709 return dd->sw_ctxt_err_status_cnt[0];
3710 }
3711
3712 /*
3713 * Software counters corresponding to each of the
3714 * error status bits within SendDmaEngErrStatus
3715 */
3716 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3717 const struct cntr_entry *entry,
3718 void *context, int vl, int mode, u64 data)
3719 {
3720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3721
3722 return dd->sw_send_dma_eng_err_status_cnt[23];
3723 }
3724
3725 static u64 access_sdma_header_storage_cor_err_cnt(
3726 const struct cntr_entry *entry,
3727 void *context, int vl, int mode, u64 data)
3728 {
3729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3730
3731 return dd->sw_send_dma_eng_err_status_cnt[22];
3732 }
3733
3734 static u64 access_sdma_packet_tracking_cor_err_cnt(
3735 const struct cntr_entry *entry,
3736 void *context, int vl, int mode, u64 data)
3737 {
3738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3739
3740 return dd->sw_send_dma_eng_err_status_cnt[21];
3741 }
3742
3743 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3744 void *context, int vl, int mode,
3745 u64 data)
3746 {
3747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748
3749 return dd->sw_send_dma_eng_err_status_cnt[20];
3750 }
3751
3752 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3753 void *context, int vl, int mode,
3754 u64 data)
3755 {
3756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3757
3758 return dd->sw_send_dma_eng_err_status_cnt[19];
3759 }
3760
3761 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3762 const struct cntr_entry *entry,
3763 void *context, int vl, int mode, u64 data)
3764 {
3765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3766
3767 return dd->sw_send_dma_eng_err_status_cnt[18];
3768 }
3769
3770 static u64 access_sdma_header_storage_unc_err_cnt(
3771 const struct cntr_entry *entry,
3772 void *context, int vl, int mode, u64 data)
3773 {
3774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3775
3776 return dd->sw_send_dma_eng_err_status_cnt[17];
3777 }
3778
3779 static u64 access_sdma_packet_tracking_unc_err_cnt(
3780 const struct cntr_entry *entry,
3781 void *context, int vl, int mode, u64 data)
3782 {
3783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785 return dd->sw_send_dma_eng_err_status_cnt[16];
3786 }
3787
3788 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3789 void *context, int vl, int mode,
3790 u64 data)
3791 {
3792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794 return dd->sw_send_dma_eng_err_status_cnt[15];
3795 }
3796
3797 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3798 void *context, int vl, int mode,
3799 u64 data)
3800 {
3801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803 return dd->sw_send_dma_eng_err_status_cnt[14];
3804 }
3805
3806 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3807 void *context, int vl, int mode,
3808 u64 data)
3809 {
3810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812 return dd->sw_send_dma_eng_err_status_cnt[13];
3813 }
3814
3815 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3816 void *context, int vl, int mode,
3817 u64 data)
3818 {
3819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821 return dd->sw_send_dma_eng_err_status_cnt[12];
3822 }
3823
3824 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3825 void *context, int vl, int mode,
3826 u64 data)
3827 {
3828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3829
3830 return dd->sw_send_dma_eng_err_status_cnt[11];
3831 }
3832
3833 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3834 void *context, int vl, int mode,
3835 u64 data)
3836 {
3837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3838
3839 return dd->sw_send_dma_eng_err_status_cnt[10];
3840 }
3841
3842 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3843 void *context, int vl, int mode,
3844 u64 data)
3845 {
3846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3847
3848 return dd->sw_send_dma_eng_err_status_cnt[9];
3849 }
3850
3851 static u64 access_sdma_packet_desc_overflow_err_cnt(
3852 const struct cntr_entry *entry,
3853 void *context, int vl, int mode, u64 data)
3854 {
3855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3856
3857 return dd->sw_send_dma_eng_err_status_cnt[8];
3858 }
3859
3860 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3861 void *context, int vl,
3862 int mode, u64 data)
3863 {
3864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3865
3866 return dd->sw_send_dma_eng_err_status_cnt[7];
3867 }
3868
3869 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3870 void *context, int vl, int mode, u64 data)
3871 {
3872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3873
3874 return dd->sw_send_dma_eng_err_status_cnt[6];
3875 }
3876
3877 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3878 void *context, int vl, int mode,
3879 u64 data)
3880 {
3881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3882
3883 return dd->sw_send_dma_eng_err_status_cnt[5];
3884 }
3885
3886 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3887 void *context, int vl, int mode,
3888 u64 data)
3889 {
3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891
3892 return dd->sw_send_dma_eng_err_status_cnt[4];
3893 }
3894
3895 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3896 const struct cntr_entry *entry,
3897 void *context, int vl, int mode, u64 data)
3898 {
3899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3900
3901 return dd->sw_send_dma_eng_err_status_cnt[3];
3902 }
3903
3904 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3905 void *context, int vl, int mode,
3906 u64 data)
3907 {
3908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3909
3910 return dd->sw_send_dma_eng_err_status_cnt[2];
3911 }
3912
3913 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3914 void *context, int vl, int mode,
3915 u64 data)
3916 {
3917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3918
3919 return dd->sw_send_dma_eng_err_status_cnt[1];
3920 }
3921
3922 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3923 void *context, int vl, int mode,
3924 u64 data)
3925 {
3926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3927
3928 return dd->sw_send_dma_eng_err_status_cnt[0];
3929 }
3930
3931 #define def_access_sw_cpu(cntr) \
3932 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3933 void *context, int vl, int mode, u64 data) \
3934 { \
3935 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3936 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3937 ppd->ibport_data.rvp.cntr, vl, \
3938 mode, data); \
3939 }
3940
3941 def_access_sw_cpu(rc_acks);
3942 def_access_sw_cpu(rc_qacks);
3943 def_access_sw_cpu(rc_delayed_comp);
3944
3945 #define def_access_ibp_counter(cntr) \
3946 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3947 void *context, int vl, int mode, u64 data) \
3948 { \
3949 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3950 \
3951 if (vl != CNTR_INVALID_VL) \
3952 return 0; \
3953 \
3954 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
3955 mode, data); \
3956 }
3957
3958 def_access_ibp_counter(loop_pkts);
3959 def_access_ibp_counter(rc_resends);
3960 def_access_ibp_counter(rnr_naks);
3961 def_access_ibp_counter(other_naks);
3962 def_access_ibp_counter(rc_timeouts);
3963 def_access_ibp_counter(pkt_drops);
3964 def_access_ibp_counter(dmawait);
3965 def_access_ibp_counter(rc_seqnak);
3966 def_access_ibp_counter(rc_dupreq);
3967 def_access_ibp_counter(rdma_seq);
3968 def_access_ibp_counter(unaligned);
3969 def_access_ibp_counter(seq_naks);
3970
3971 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3972 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3973 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3974 CNTR_NORMAL),
3975 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3976 CNTR_NORMAL),
3977 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3978 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3979 CNTR_NORMAL),
3980 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3981 CNTR_NORMAL),
3982 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3983 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3984 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
3985 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
3986 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
3987 CNTR_NORMAL),
3988 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
3989 CNTR_NORMAL),
3990 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
3991 CNTR_NORMAL),
3992 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
3993 CNTR_NORMAL),
3994 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
3995 CNTR_NORMAL),
3996 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
3997 CNTR_NORMAL),
3998 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
3999 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4000 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4001 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4002 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4003 CNTR_SYNTH),
4004 [C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4005 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4006 CNTR_SYNTH),
4007 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4008 CNTR_SYNTH),
4009 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4010 CNTR_SYNTH),
4011 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4012 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4013 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4014 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4015 CNTR_SYNTH),
4016 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4017 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4018 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4019 CNTR_SYNTH),
4020 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4021 CNTR_SYNTH),
4022 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4023 CNTR_SYNTH),
4024 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4025 CNTR_SYNTH),
4026 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4027 CNTR_SYNTH),
4028 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4029 CNTR_SYNTH),
4030 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4031 CNTR_SYNTH),
4032 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4033 CNTR_SYNTH | CNTR_VL),
4034 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4035 CNTR_SYNTH | CNTR_VL),
4036 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4037 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4038 CNTR_SYNTH | CNTR_VL),
4039 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4040 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4041 CNTR_SYNTH | CNTR_VL),
4042 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4043 CNTR_SYNTH),
4044 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4045 CNTR_SYNTH | CNTR_VL),
4046 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4047 CNTR_SYNTH),
4048 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4049 CNTR_SYNTH | CNTR_VL),
4050 [C_DC_TOTAL_CRC] =
4051 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4052 CNTR_SYNTH),
4053 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4054 CNTR_SYNTH),
4055 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4056 CNTR_SYNTH),
4057 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4058 CNTR_SYNTH),
4059 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4060 CNTR_SYNTH),
4061 [C_DC_CRC_MULT_LN] =
4062 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4063 CNTR_SYNTH),
4064 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4065 CNTR_SYNTH),
4066 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4067 CNTR_SYNTH),
4068 [C_DC_SEQ_CRC_CNT] =
4069 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4070 CNTR_SYNTH),
4071 [C_DC_ESC0_ONLY_CNT] =
4072 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4073 CNTR_SYNTH),
4074 [C_DC_ESC0_PLUS1_CNT] =
4075 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4076 CNTR_SYNTH),
4077 [C_DC_ESC0_PLUS2_CNT] =
4078 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4079 CNTR_SYNTH),
4080 [C_DC_REINIT_FROM_PEER_CNT] =
4081 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4082 CNTR_SYNTH),
4083 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4084 CNTR_SYNTH),
4085 [C_DC_MISC_FLG_CNT] =
4086 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4087 CNTR_SYNTH),
4088 [C_DC_PRF_GOOD_LTP_CNT] =
4089 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4090 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4091 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4092 CNTR_SYNTH),
4093 [C_DC_PRF_RX_FLIT_CNT] =
4094 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4095 [C_DC_PRF_TX_FLIT_CNT] =
4096 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4097 [C_DC_PRF_CLK_CNTR] =
4098 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4099 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4100 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4101 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4102 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4103 CNTR_SYNTH),
4104 [C_DC_PG_STS_TX_SBE_CNT] =
4105 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4106 [C_DC_PG_STS_TX_MBE_CNT] =
4107 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4108 CNTR_SYNTH),
4109 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4110 access_sw_cpu_intr),
4111 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4112 access_sw_cpu_rcv_limit),
4113 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4114 access_sw_vtx_wait),
4115 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4116 access_sw_pio_wait),
4117 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4118 access_sw_kmem_wait),
4119 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4120 access_sw_send_schedule),
4121 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4122 SEND_DMA_DESC_FETCHED_CNT, 0,
4123 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4124 dev_access_u32_csr),
4125 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4126 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4127 access_sde_int_cnt),
4128 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4129 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4130 access_sde_err_cnt),
4131 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4132 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4133 access_sde_idle_int_cnt),
4134 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4135 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4136 access_sde_progress_int_cnt),
4137 /* MISC_ERR_STATUS */
4138 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4139 CNTR_NORMAL,
4140 access_misc_pll_lock_fail_err_cnt),
4141 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4142 CNTR_NORMAL,
4143 access_misc_mbist_fail_err_cnt),
4144 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4145 CNTR_NORMAL,
4146 access_misc_invalid_eep_cmd_err_cnt),
4147 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4148 CNTR_NORMAL,
4149 access_misc_efuse_done_parity_err_cnt),
4150 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4151 CNTR_NORMAL,
4152 access_misc_efuse_write_err_cnt),
4153 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4154 0, CNTR_NORMAL,
4155 access_misc_efuse_read_bad_addr_err_cnt),
4156 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4157 CNTR_NORMAL,
4158 access_misc_efuse_csr_parity_err_cnt),
4159 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4160 CNTR_NORMAL,
4161 access_misc_fw_auth_failed_err_cnt),
4162 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4163 CNTR_NORMAL,
4164 access_misc_key_mismatch_err_cnt),
4165 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4166 CNTR_NORMAL,
4167 access_misc_sbus_write_failed_err_cnt),
4168 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4169 CNTR_NORMAL,
4170 access_misc_csr_write_bad_addr_err_cnt),
4171 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4172 CNTR_NORMAL,
4173 access_misc_csr_read_bad_addr_err_cnt),
4174 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4175 CNTR_NORMAL,
4176 access_misc_csr_parity_err_cnt),
4177 /* CceErrStatus */
4178 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4179 CNTR_NORMAL,
4180 access_sw_cce_err_status_aggregated_cnt),
4181 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4182 CNTR_NORMAL,
4183 access_cce_msix_csr_parity_err_cnt),
4184 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4185 CNTR_NORMAL,
4186 access_cce_int_map_unc_err_cnt),
4187 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4188 CNTR_NORMAL,
4189 access_cce_int_map_cor_err_cnt),
4190 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4191 CNTR_NORMAL,
4192 access_cce_msix_table_unc_err_cnt),
4193 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4194 CNTR_NORMAL,
4195 access_cce_msix_table_cor_err_cnt),
4196 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4197 0, CNTR_NORMAL,
4198 access_cce_rxdma_conv_fifo_parity_err_cnt),
4199 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4200 0, CNTR_NORMAL,
4201 access_cce_rcpl_async_fifo_parity_err_cnt),
4202 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4203 CNTR_NORMAL,
4204 access_cce_seg_write_bad_addr_err_cnt),
4205 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4206 CNTR_NORMAL,
4207 access_cce_seg_read_bad_addr_err_cnt),
4208 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4209 CNTR_NORMAL,
4210 access_la_triggered_cnt),
4211 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4212 CNTR_NORMAL,
4213 access_cce_trgt_cpl_timeout_err_cnt),
4214 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4215 CNTR_NORMAL,
4216 access_pcic_receive_parity_err_cnt),
4217 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4218 CNTR_NORMAL,
4219 access_pcic_transmit_back_parity_err_cnt),
4220 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4221 0, CNTR_NORMAL,
4222 access_pcic_transmit_front_parity_err_cnt),
4223 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4224 CNTR_NORMAL,
4225 access_pcic_cpl_dat_q_unc_err_cnt),
4226 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4227 CNTR_NORMAL,
4228 access_pcic_cpl_hd_q_unc_err_cnt),
4229 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4230 CNTR_NORMAL,
4231 access_pcic_post_dat_q_unc_err_cnt),
4232 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4233 CNTR_NORMAL,
4234 access_pcic_post_hd_q_unc_err_cnt),
4235 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4236 CNTR_NORMAL,
4237 access_pcic_retry_sot_mem_unc_err_cnt),
4238 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4239 CNTR_NORMAL,
4240 access_pcic_retry_mem_unc_err),
4241 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4242 CNTR_NORMAL,
4243 access_pcic_n_post_dat_q_parity_err_cnt),
4244 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4245 CNTR_NORMAL,
4246 access_pcic_n_post_h_q_parity_err_cnt),
4247 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4248 CNTR_NORMAL,
4249 access_pcic_cpl_dat_q_cor_err_cnt),
4250 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4251 CNTR_NORMAL,
4252 access_pcic_cpl_hd_q_cor_err_cnt),
4253 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4254 CNTR_NORMAL,
4255 access_pcic_post_dat_q_cor_err_cnt),
4256 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4257 CNTR_NORMAL,
4258 access_pcic_post_hd_q_cor_err_cnt),
4259 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4260 CNTR_NORMAL,
4261 access_pcic_retry_sot_mem_cor_err_cnt),
4262 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4263 CNTR_NORMAL,
4264 access_pcic_retry_mem_cor_err_cnt),
4265 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4266 "CceCli1AsyncFifoDbgParityError", 0, 0,
4267 CNTR_NORMAL,
4268 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4269 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4270 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4271 CNTR_NORMAL,
4272 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4273 ),
4274 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4275 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4276 CNTR_NORMAL,
4277 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4278 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4279 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4280 CNTR_NORMAL,
4281 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4282 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4283 0, CNTR_NORMAL,
4284 access_cce_cli2_async_fifo_parity_err_cnt),
4285 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4286 CNTR_NORMAL,
4287 access_cce_csr_cfg_bus_parity_err_cnt),
4288 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4289 0, CNTR_NORMAL,
4290 access_cce_cli0_async_fifo_parity_err_cnt),
4291 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4292 CNTR_NORMAL,
4293 access_cce_rspd_data_parity_err_cnt),
4294 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4295 CNTR_NORMAL,
4296 access_cce_trgt_access_err_cnt),
4297 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4298 0, CNTR_NORMAL,
4299 access_cce_trgt_async_fifo_parity_err_cnt),
4300 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4301 CNTR_NORMAL,
4302 access_cce_csr_write_bad_addr_err_cnt),
4303 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4304 CNTR_NORMAL,
4305 access_cce_csr_read_bad_addr_err_cnt),
4306 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4307 CNTR_NORMAL,
4308 access_ccs_csr_parity_err_cnt),
4309
4310 /* RcvErrStatus */
4311 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4312 CNTR_NORMAL,
4313 access_rx_csr_parity_err_cnt),
4314 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4315 CNTR_NORMAL,
4316 access_rx_csr_write_bad_addr_err_cnt),
4317 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4318 CNTR_NORMAL,
4319 access_rx_csr_read_bad_addr_err_cnt),
4320 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4321 CNTR_NORMAL,
4322 access_rx_dma_csr_unc_err_cnt),
4323 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4324 CNTR_NORMAL,
4325 access_rx_dma_dq_fsm_encoding_err_cnt),
4326 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4327 CNTR_NORMAL,
4328 access_rx_dma_eq_fsm_encoding_err_cnt),
4329 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4330 CNTR_NORMAL,
4331 access_rx_dma_csr_parity_err_cnt),
4332 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4333 CNTR_NORMAL,
4334 access_rx_rbuf_data_cor_err_cnt),
4335 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4336 CNTR_NORMAL,
4337 access_rx_rbuf_data_unc_err_cnt),
4338 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4339 CNTR_NORMAL,
4340 access_rx_dma_data_fifo_rd_cor_err_cnt),
4341 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4342 CNTR_NORMAL,
4343 access_rx_dma_data_fifo_rd_unc_err_cnt),
4344 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4345 CNTR_NORMAL,
4346 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4347 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4348 CNTR_NORMAL,
4349 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4350 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4351 CNTR_NORMAL,
4352 access_rx_rbuf_desc_part2_cor_err_cnt),
4353 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4354 CNTR_NORMAL,
4355 access_rx_rbuf_desc_part2_unc_err_cnt),
4356 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4357 CNTR_NORMAL,
4358 access_rx_rbuf_desc_part1_cor_err_cnt),
4359 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4360 CNTR_NORMAL,
4361 access_rx_rbuf_desc_part1_unc_err_cnt),
4362 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4363 CNTR_NORMAL,
4364 access_rx_hq_intr_fsm_err_cnt),
4365 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4366 CNTR_NORMAL,
4367 access_rx_hq_intr_csr_parity_err_cnt),
4368 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4369 CNTR_NORMAL,
4370 access_rx_lookup_csr_parity_err_cnt),
4371 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4372 CNTR_NORMAL,
4373 access_rx_lookup_rcv_array_cor_err_cnt),
4374 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4375 CNTR_NORMAL,
4376 access_rx_lookup_rcv_array_unc_err_cnt),
4377 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4378 0, CNTR_NORMAL,
4379 access_rx_lookup_des_part2_parity_err_cnt),
4380 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4381 0, CNTR_NORMAL,
4382 access_rx_lookup_des_part1_unc_cor_err_cnt),
4383 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4384 CNTR_NORMAL,
4385 access_rx_lookup_des_part1_unc_err_cnt),
4386 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4387 CNTR_NORMAL,
4388 access_rx_rbuf_next_free_buf_cor_err_cnt),
4389 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4390 CNTR_NORMAL,
4391 access_rx_rbuf_next_free_buf_unc_err_cnt),
4392 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4393 "RxRbufFlInitWrAddrParityErr", 0, 0,
4394 CNTR_NORMAL,
4395 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4396 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4397 0, CNTR_NORMAL,
4398 access_rx_rbuf_fl_initdone_parity_err_cnt),
4399 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4400 0, CNTR_NORMAL,
4401 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4402 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4403 CNTR_NORMAL,
4404 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4405 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4406 CNTR_NORMAL,
4407 access_rx_rbuf_empty_err_cnt),
4408 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4409 CNTR_NORMAL,
4410 access_rx_rbuf_full_err_cnt),
4411 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4412 CNTR_NORMAL,
4413 access_rbuf_bad_lookup_err_cnt),
4414 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4415 CNTR_NORMAL,
4416 access_rbuf_ctx_id_parity_err_cnt),
4417 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4418 CNTR_NORMAL,
4419 access_rbuf_csr_qeopdw_parity_err_cnt),
4420 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4421 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4422 CNTR_NORMAL,
4423 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4424 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4425 "RxRbufCsrQTlPtrParityErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4428 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4429 0, CNTR_NORMAL,
4430 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4431 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4432 0, CNTR_NORMAL,
4433 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4434 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4435 0, 0, CNTR_NORMAL,
4436 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4437 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4438 0, CNTR_NORMAL,
4439 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4440 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4441 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4442 CNTR_NORMAL,
4443 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4444 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4445 0, CNTR_NORMAL,
4446 access_rx_rbuf_block_list_read_cor_err_cnt),
4447 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4448 0, CNTR_NORMAL,
4449 access_rx_rbuf_block_list_read_unc_err_cnt),
4450 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4451 CNTR_NORMAL,
4452 access_rx_rbuf_lookup_des_cor_err_cnt),
4453 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4454 CNTR_NORMAL,
4455 access_rx_rbuf_lookup_des_unc_err_cnt),
4456 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4457 "RxRbufLookupDesRegUncCorErr", 0, 0,
4458 CNTR_NORMAL,
4459 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4460 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4461 CNTR_NORMAL,
4462 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4463 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4464 CNTR_NORMAL,
4465 access_rx_rbuf_free_list_cor_err_cnt),
4466 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4467 CNTR_NORMAL,
4468 access_rx_rbuf_free_list_unc_err_cnt),
4469 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4470 CNTR_NORMAL,
4471 access_rx_rcv_fsm_encoding_err_cnt),
4472 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4473 CNTR_NORMAL,
4474 access_rx_dma_flag_cor_err_cnt),
4475 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4476 CNTR_NORMAL,
4477 access_rx_dma_flag_unc_err_cnt),
4478 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4479 CNTR_NORMAL,
4480 access_rx_dc_sop_eop_parity_err_cnt),
4481 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4482 CNTR_NORMAL,
4483 access_rx_rcv_csr_parity_err_cnt),
4484 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4485 CNTR_NORMAL,
4486 access_rx_rcv_qp_map_table_cor_err_cnt),
4487 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4488 CNTR_NORMAL,
4489 access_rx_rcv_qp_map_table_unc_err_cnt),
4490 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4491 CNTR_NORMAL,
4492 access_rx_rcv_data_cor_err_cnt),
4493 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4494 CNTR_NORMAL,
4495 access_rx_rcv_data_unc_err_cnt),
4496 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4497 CNTR_NORMAL,
4498 access_rx_rcv_hdr_cor_err_cnt),
4499 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4500 CNTR_NORMAL,
4501 access_rx_rcv_hdr_unc_err_cnt),
4502 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4503 CNTR_NORMAL,
4504 access_rx_dc_intf_parity_err_cnt),
4505 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4506 CNTR_NORMAL,
4507 access_rx_dma_csr_cor_err_cnt),
4508 /* SendPioErrStatus */
4509 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4510 CNTR_NORMAL,
4511 access_pio_pec_sop_head_parity_err_cnt),
4512 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4513 CNTR_NORMAL,
4514 access_pio_pcc_sop_head_parity_err_cnt),
4515 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4516 0, 0, CNTR_NORMAL,
4517 access_pio_last_returned_cnt_parity_err_cnt),
4518 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4519 0, CNTR_NORMAL,
4520 access_pio_current_free_cnt_parity_err_cnt),
4521 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4522 CNTR_NORMAL,
4523 access_pio_reserved_31_err_cnt),
4524 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4525 CNTR_NORMAL,
4526 access_pio_reserved_30_err_cnt),
4527 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4528 CNTR_NORMAL,
4529 access_pio_ppmc_sop_len_err_cnt),
4530 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4531 CNTR_NORMAL,
4532 access_pio_ppmc_bqc_mem_parity_err_cnt),
4533 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4534 CNTR_NORMAL,
4535 access_pio_vl_fifo_parity_err_cnt),
4536 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4537 CNTR_NORMAL,
4538 access_pio_vlf_sop_parity_err_cnt),
4539 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4540 CNTR_NORMAL,
4541 access_pio_vlf_v1_len_parity_err_cnt),
4542 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4543 CNTR_NORMAL,
4544 access_pio_block_qw_count_parity_err_cnt),
4545 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4546 CNTR_NORMAL,
4547 access_pio_write_qw_valid_parity_err_cnt),
4548 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4549 CNTR_NORMAL,
4550 access_pio_state_machine_err_cnt),
4551 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4552 CNTR_NORMAL,
4553 access_pio_write_data_parity_err_cnt),
4554 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4555 CNTR_NORMAL,
4556 access_pio_host_addr_mem_cor_err_cnt),
4557 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4558 CNTR_NORMAL,
4559 access_pio_host_addr_mem_unc_err_cnt),
4560 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4561 CNTR_NORMAL,
4562 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4563 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4564 CNTR_NORMAL,
4565 access_pio_init_sm_in_err_cnt),
4566 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4567 CNTR_NORMAL,
4568 access_pio_ppmc_pbl_fifo_err_cnt),
4569 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4570 0, CNTR_NORMAL,
4571 access_pio_credit_ret_fifo_parity_err_cnt),
4572 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4573 CNTR_NORMAL,
4574 access_pio_v1_len_mem_bank1_cor_err_cnt),
4575 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4576 CNTR_NORMAL,
4577 access_pio_v1_len_mem_bank0_cor_err_cnt),
4578 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4579 CNTR_NORMAL,
4580 access_pio_v1_len_mem_bank1_unc_err_cnt),
4581 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_pio_v1_len_mem_bank0_unc_err_cnt),
4584 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_pio_sm_pkt_reset_parity_err_cnt),
4587 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4588 CNTR_NORMAL,
4589 access_pio_pkt_evict_fifo_parity_err_cnt),
4590 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4591 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4592 CNTR_NORMAL,
4593 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4594 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4595 CNTR_NORMAL,
4596 access_pio_sbrdctl_crrel_parity_err_cnt),
4597 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4598 CNTR_NORMAL,
4599 access_pio_pec_fifo_parity_err_cnt),
4600 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4601 CNTR_NORMAL,
4602 access_pio_pcc_fifo_parity_err_cnt),
4603 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4604 CNTR_NORMAL,
4605 access_pio_sb_mem_fifo1_err_cnt),
4606 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4607 CNTR_NORMAL,
4608 access_pio_sb_mem_fifo0_err_cnt),
4609 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4610 CNTR_NORMAL,
4611 access_pio_csr_parity_err_cnt),
4612 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4613 CNTR_NORMAL,
4614 access_pio_write_addr_parity_err_cnt),
4615 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4616 CNTR_NORMAL,
4617 access_pio_write_bad_ctxt_err_cnt),
4618 /* SendDmaErrStatus */
4619 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4620 0, CNTR_NORMAL,
4621 access_sdma_pcie_req_tracking_cor_err_cnt),
4622 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4623 0, CNTR_NORMAL,
4624 access_sdma_pcie_req_tracking_unc_err_cnt),
4625 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4626 CNTR_NORMAL,
4627 access_sdma_csr_parity_err_cnt),
4628 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4629 CNTR_NORMAL,
4630 access_sdma_rpy_tag_err_cnt),
4631 /* SendEgressErrStatus */
4632 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4633 CNTR_NORMAL,
4634 access_tx_read_pio_memory_csr_unc_err_cnt),
4635 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4636 0, CNTR_NORMAL,
4637 access_tx_read_sdma_memory_csr_err_cnt),
4638 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4639 CNTR_NORMAL,
4640 access_tx_egress_fifo_cor_err_cnt),
4641 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4642 CNTR_NORMAL,
4643 access_tx_read_pio_memory_cor_err_cnt),
4644 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4645 CNTR_NORMAL,
4646 access_tx_read_sdma_memory_cor_err_cnt),
4647 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4648 CNTR_NORMAL,
4649 access_tx_sb_hdr_cor_err_cnt),
4650 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4651 CNTR_NORMAL,
4652 access_tx_credit_overrun_err_cnt),
4653 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4654 CNTR_NORMAL,
4655 access_tx_launch_fifo8_cor_err_cnt),
4656 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4657 CNTR_NORMAL,
4658 access_tx_launch_fifo7_cor_err_cnt),
4659 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4660 CNTR_NORMAL,
4661 access_tx_launch_fifo6_cor_err_cnt),
4662 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4663 CNTR_NORMAL,
4664 access_tx_launch_fifo5_cor_err_cnt),
4665 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4666 CNTR_NORMAL,
4667 access_tx_launch_fifo4_cor_err_cnt),
4668 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4669 CNTR_NORMAL,
4670 access_tx_launch_fifo3_cor_err_cnt),
4671 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4672 CNTR_NORMAL,
4673 access_tx_launch_fifo2_cor_err_cnt),
4674 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4675 CNTR_NORMAL,
4676 access_tx_launch_fifo1_cor_err_cnt),
4677 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4678 CNTR_NORMAL,
4679 access_tx_launch_fifo0_cor_err_cnt),
4680 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4681 CNTR_NORMAL,
4682 access_tx_credit_return_vl_err_cnt),
4683 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4684 CNTR_NORMAL,
4685 access_tx_hcrc_insertion_err_cnt),
4686 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4687 CNTR_NORMAL,
4688 access_tx_egress_fifo_unc_err_cnt),
4689 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4690 CNTR_NORMAL,
4691 access_tx_read_pio_memory_unc_err_cnt),
4692 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4693 CNTR_NORMAL,
4694 access_tx_read_sdma_memory_unc_err_cnt),
4695 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4696 CNTR_NORMAL,
4697 access_tx_sb_hdr_unc_err_cnt),
4698 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4699 CNTR_NORMAL,
4700 access_tx_credit_return_partiy_err_cnt),
4701 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4702 0, 0, CNTR_NORMAL,
4703 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4704 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4705 0, 0, CNTR_NORMAL,
4706 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4707 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4708 0, 0, CNTR_NORMAL,
4709 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4710 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4711 0, 0, CNTR_NORMAL,
4712 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4713 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4714 0, 0, CNTR_NORMAL,
4715 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4716 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4717 0, 0, CNTR_NORMAL,
4718 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4719 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4720 0, 0, CNTR_NORMAL,
4721 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4722 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4723 0, 0, CNTR_NORMAL,
4724 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4725 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4726 0, 0, CNTR_NORMAL,
4727 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4728 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4729 0, 0, CNTR_NORMAL,
4730 access_tx_sdma15_disallowed_packet_err_cnt),
4731 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4732 0, 0, CNTR_NORMAL,
4733 access_tx_sdma14_disallowed_packet_err_cnt),
4734 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4735 0, 0, CNTR_NORMAL,
4736 access_tx_sdma13_disallowed_packet_err_cnt),
4737 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4738 0, 0, CNTR_NORMAL,
4739 access_tx_sdma12_disallowed_packet_err_cnt),
4740 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4741 0, 0, CNTR_NORMAL,
4742 access_tx_sdma11_disallowed_packet_err_cnt),
4743 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4744 0, 0, CNTR_NORMAL,
4745 access_tx_sdma10_disallowed_packet_err_cnt),
4746 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4747 0, 0, CNTR_NORMAL,
4748 access_tx_sdma9_disallowed_packet_err_cnt),
4749 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4750 0, 0, CNTR_NORMAL,
4751 access_tx_sdma8_disallowed_packet_err_cnt),
4752 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4753 0, 0, CNTR_NORMAL,
4754 access_tx_sdma7_disallowed_packet_err_cnt),
4755 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4756 0, 0, CNTR_NORMAL,
4757 access_tx_sdma6_disallowed_packet_err_cnt),
4758 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4759 0, 0, CNTR_NORMAL,
4760 access_tx_sdma5_disallowed_packet_err_cnt),
4761 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4762 0, 0, CNTR_NORMAL,
4763 access_tx_sdma4_disallowed_packet_err_cnt),
4764 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4765 0, 0, CNTR_NORMAL,
4766 access_tx_sdma3_disallowed_packet_err_cnt),
4767 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4768 0, 0, CNTR_NORMAL,
4769 access_tx_sdma2_disallowed_packet_err_cnt),
4770 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4771 0, 0, CNTR_NORMAL,
4772 access_tx_sdma1_disallowed_packet_err_cnt),
4773 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4774 0, 0, CNTR_NORMAL,
4775 access_tx_sdma0_disallowed_packet_err_cnt),
4776 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4777 CNTR_NORMAL,
4778 access_tx_config_parity_err_cnt),
4779 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4780 CNTR_NORMAL,
4781 access_tx_sbrd_ctl_csr_parity_err_cnt),
4782 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4783 CNTR_NORMAL,
4784 access_tx_launch_csr_parity_err_cnt),
4785 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4786 CNTR_NORMAL,
4787 access_tx_illegal_vl_err_cnt),
4788 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4789 "TxSbrdCtlStateMachineParityErr", 0, 0,
4790 CNTR_NORMAL,
4791 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4792 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4793 CNTR_NORMAL,
4794 access_egress_reserved_10_err_cnt),
4795 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4796 CNTR_NORMAL,
4797 access_egress_reserved_9_err_cnt),
4798 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4799 0, 0, CNTR_NORMAL,
4800 access_tx_sdma_launch_intf_parity_err_cnt),
4801 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4802 CNTR_NORMAL,
4803 access_tx_pio_launch_intf_parity_err_cnt),
4804 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4805 CNTR_NORMAL,
4806 access_egress_reserved_6_err_cnt),
4807 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4808 CNTR_NORMAL,
4809 access_tx_incorrect_link_state_err_cnt),
4810 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4811 CNTR_NORMAL,
4812 access_tx_linkdown_err_cnt),
4813 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4814 "EgressFifoUnderrunOrParityErr", 0, 0,
4815 CNTR_NORMAL,
4816 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4817 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4818 CNTR_NORMAL,
4819 access_egress_reserved_2_err_cnt),
4820 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4821 CNTR_NORMAL,
4822 access_tx_pkt_integrity_mem_unc_err_cnt),
4823 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4824 CNTR_NORMAL,
4825 access_tx_pkt_integrity_mem_cor_err_cnt),
4826 /* SendErrStatus */
4827 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4828 CNTR_NORMAL,
4829 access_send_csr_write_bad_addr_err_cnt),
4830 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4831 CNTR_NORMAL,
4832 access_send_csr_read_bad_addr_err_cnt),
4833 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4834 CNTR_NORMAL,
4835 access_send_csr_parity_cnt),
4836 /* SendCtxtErrStatus */
4837 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4838 CNTR_NORMAL,
4839 access_pio_write_out_of_bounds_err_cnt),
4840 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4841 CNTR_NORMAL,
4842 access_pio_write_overflow_err_cnt),
4843 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4844 0, 0, CNTR_NORMAL,
4845 access_pio_write_crosses_boundary_err_cnt),
4846 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4847 CNTR_NORMAL,
4848 access_pio_disallowed_packet_err_cnt),
4849 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4850 CNTR_NORMAL,
4851 access_pio_inconsistent_sop_err_cnt),
4852 /* SendDmaEngErrStatus */
4853 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4854 0, 0, CNTR_NORMAL,
4855 access_sdma_header_request_fifo_cor_err_cnt),
4856 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4857 CNTR_NORMAL,
4858 access_sdma_header_storage_cor_err_cnt),
4859 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4860 CNTR_NORMAL,
4861 access_sdma_packet_tracking_cor_err_cnt),
4862 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4863 CNTR_NORMAL,
4864 access_sdma_assembly_cor_err_cnt),
4865 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4866 CNTR_NORMAL,
4867 access_sdma_desc_table_cor_err_cnt),
4868 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4869 0, 0, CNTR_NORMAL,
4870 access_sdma_header_request_fifo_unc_err_cnt),
4871 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4872 CNTR_NORMAL,
4873 access_sdma_header_storage_unc_err_cnt),
4874 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4875 CNTR_NORMAL,
4876 access_sdma_packet_tracking_unc_err_cnt),
4877 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4878 CNTR_NORMAL,
4879 access_sdma_assembly_unc_err_cnt),
4880 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4881 CNTR_NORMAL,
4882 access_sdma_desc_table_unc_err_cnt),
4883 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4884 CNTR_NORMAL,
4885 access_sdma_timeout_err_cnt),
4886 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4887 CNTR_NORMAL,
4888 access_sdma_header_length_err_cnt),
4889 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4890 CNTR_NORMAL,
4891 access_sdma_header_address_err_cnt),
4892 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4893 CNTR_NORMAL,
4894 access_sdma_header_select_err_cnt),
4895 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4896 CNTR_NORMAL,
4897 access_sdma_reserved_9_err_cnt),
4898 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4899 CNTR_NORMAL,
4900 access_sdma_packet_desc_overflow_err_cnt),
4901 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4902 CNTR_NORMAL,
4903 access_sdma_length_mismatch_err_cnt),
4904 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4905 CNTR_NORMAL,
4906 access_sdma_halt_err_cnt),
4907 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4908 CNTR_NORMAL,
4909 access_sdma_mem_read_err_cnt),
4910 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4911 CNTR_NORMAL,
4912 access_sdma_first_desc_err_cnt),
4913 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4914 CNTR_NORMAL,
4915 access_sdma_tail_out_of_bounds_err_cnt),
4916 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4917 CNTR_NORMAL,
4918 access_sdma_too_long_err_cnt),
4919 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4920 CNTR_NORMAL,
4921 access_sdma_gen_mismatch_err_cnt),
4922 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4923 CNTR_NORMAL,
4924 access_sdma_wrong_dw_err_cnt),
4925 };
4926
4927 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4928 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4929 CNTR_NORMAL),
4930 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4931 CNTR_NORMAL),
4932 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4933 CNTR_NORMAL),
4934 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4935 CNTR_NORMAL),
4936 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4937 CNTR_NORMAL),
4938 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4939 CNTR_NORMAL),
4940 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4941 CNTR_NORMAL),
4942 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4943 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4944 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4945 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4946 CNTR_SYNTH | CNTR_VL),
4947 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4948 CNTR_SYNTH | CNTR_VL),
4949 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4950 CNTR_SYNTH | CNTR_VL),
4951 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4952 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4953 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4954 access_sw_link_dn_cnt),
4955 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4956 access_sw_link_up_cnt),
4957 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4958 access_sw_unknown_frame_cnt),
4959 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4960 access_sw_xmit_discards),
4961 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4962 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4963 access_sw_xmit_discards),
4964 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4965 access_xmit_constraint_errs),
4966 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4967 access_rcv_constraint_errs),
4968 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4969 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4970 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4971 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4972 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4973 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4974 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4975 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4976 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4977 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4978 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4979 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4980 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4981 access_sw_cpu_rc_acks),
4982 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
4983 access_sw_cpu_rc_qacks),
4984 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
4985 access_sw_cpu_rc_delayed_comp),
4986 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
4987 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
4988 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
4989 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
4990 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
4991 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
4992 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
4993 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
4994 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
4995 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
4996 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
4997 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
4998 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
4999 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5000 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5001 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5002 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5003 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5004 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5005 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5006 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5007 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5008 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5009 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5010 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5011 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5012 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5013 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5014 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5015 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5016 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5017 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5018 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5019 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5020 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5021 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5022 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5023 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5024 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5025 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5026 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5027 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5028 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5029 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5030 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5031 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5032 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5033 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5034 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5035 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5036 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5037 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5038 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5039 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5040 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5041 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5042 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5043 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5044 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5045 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5046 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5047 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5048 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5049 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5050 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5051 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5052 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5053 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5054 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5055 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5056 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5057 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5058 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5059 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5060 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5061 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5062 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5063 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5064 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5065 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5066 };
5067
5068 /* ======================================================================== */
5069
5070 /* return true if this is chip revision revision a */
5071 int is_ax(struct hfi1_devdata *dd)
5072 {
5073 u8 chip_rev_minor =
5074 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5075 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5076 return (chip_rev_minor & 0xf0) == 0;
5077 }
5078
5079 /* return true if this is chip revision revision b */
5080 int is_bx(struct hfi1_devdata *dd)
5081 {
5082 u8 chip_rev_minor =
5083 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5084 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5085 return (chip_rev_minor & 0xF0) == 0x10;
5086 }
5087
5088 /*
5089 * Append string s to buffer buf. Arguments curp and len are the current
5090 * position and remaining length, respectively.
5091 *
5092 * return 0 on success, 1 on out of room
5093 */
5094 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5095 {
5096 char *p = *curp;
5097 int len = *lenp;
5098 int result = 0; /* success */
5099 char c;
5100
5101 /* add a comma, if first in the buffer */
5102 if (p != buf) {
5103 if (len == 0) {
5104 result = 1; /* out of room */
5105 goto done;
5106 }
5107 *p++ = ',';
5108 len--;
5109 }
5110
5111 /* copy the string */
5112 while ((c = *s++) != 0) {
5113 if (len == 0) {
5114 result = 1; /* out of room */
5115 goto done;
5116 }
5117 *p++ = c;
5118 len--;
5119 }
5120
5121 done:
5122 /* write return values */
5123 *curp = p;
5124 *lenp = len;
5125
5126 return result;
5127 }
5128
5129 /*
5130 * Using the given flag table, print a comma separated string into
5131 * the buffer. End in '*' if the buffer is too short.
5132 */
5133 static char *flag_string(char *buf, int buf_len, u64 flags,
5134 struct flag_table *table, int table_size)
5135 {
5136 char extra[32];
5137 char *p = buf;
5138 int len = buf_len;
5139 int no_room = 0;
5140 int i;
5141
5142 /* make sure there is at least 2 so we can form "*" */
5143 if (len < 2)
5144 return "";
5145
5146 len--; /* leave room for a nul */
5147 for (i = 0; i < table_size; i++) {
5148 if (flags & table[i].flag) {
5149 no_room = append_str(buf, &p, &len, table[i].str);
5150 if (no_room)
5151 break;
5152 flags &= ~table[i].flag;
5153 }
5154 }
5155
5156 /* any undocumented bits left? */
5157 if (!no_room && flags) {
5158 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5159 no_room = append_str(buf, &p, &len, extra);
5160 }
5161
5162 /* add * if ran out of room */
5163 if (no_room) {
5164 /* may need to back up to add space for a '*' */
5165 if (len == 0)
5166 --p;
5167 *p++ = '*';
5168 }
5169
5170 /* add final nul - space already allocated above */
5171 *p = 0;
5172 return buf;
5173 }
5174
5175 /* first 8 CCE error interrupt source names */
5176 static const char * const cce_misc_names[] = {
5177 "CceErrInt", /* 0 */
5178 "RxeErrInt", /* 1 */
5179 "MiscErrInt", /* 2 */
5180 "Reserved3", /* 3 */
5181 "PioErrInt", /* 4 */
5182 "SDmaErrInt", /* 5 */
5183 "EgressErrInt", /* 6 */
5184 "TxeErrInt" /* 7 */
5185 };
5186
5187 /*
5188 * Return the miscellaneous error interrupt name.
5189 */
5190 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5191 {
5192 if (source < ARRAY_SIZE(cce_misc_names))
5193 strncpy(buf, cce_misc_names[source], bsize);
5194 else
5195 snprintf(buf,
5196 bsize,
5197 "Reserved%u",
5198 source + IS_GENERAL_ERR_START);
5199
5200 return buf;
5201 }
5202
5203 /*
5204 * Return the SDMA engine error interrupt name.
5205 */
5206 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5207 {
5208 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5209 return buf;
5210 }
5211
5212 /*
5213 * Return the send context error interrupt name.
5214 */
5215 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5216 {
5217 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5218 return buf;
5219 }
5220
5221 static const char * const various_names[] = {
5222 "PbcInt",
5223 "GpioAssertInt",
5224 "Qsfp1Int",
5225 "Qsfp2Int",
5226 "TCritInt"
5227 };
5228
5229 /*
5230 * Return the various interrupt name.
5231 */
5232 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5233 {
5234 if (source < ARRAY_SIZE(various_names))
5235 strncpy(buf, various_names[source], bsize);
5236 else
5237 snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
5238 return buf;
5239 }
5240
5241 /*
5242 * Return the DC interrupt name.
5243 */
5244 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5245 {
5246 static const char * const dc_int_names[] = {
5247 "common",
5248 "lcb",
5249 "8051",
5250 "lbm" /* local block merge */
5251 };
5252
5253 if (source < ARRAY_SIZE(dc_int_names))
5254 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5255 else
5256 snprintf(buf, bsize, "DCInt%u", source);
5257 return buf;
5258 }
5259
5260 static const char * const sdma_int_names[] = {
5261 "SDmaInt",
5262 "SdmaIdleInt",
5263 "SdmaProgressInt",
5264 };
5265
5266 /*
5267 * Return the SDMA engine interrupt name.
5268 */
5269 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5270 {
5271 /* what interrupt */
5272 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5273 /* which engine */
5274 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5275
5276 if (likely(what < 3))
5277 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5278 else
5279 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5280 return buf;
5281 }
5282
5283 /*
5284 * Return the receive available interrupt name.
5285 */
5286 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5287 {
5288 snprintf(buf, bsize, "RcvAvailInt%u", source);
5289 return buf;
5290 }
5291
5292 /*
5293 * Return the receive urgent interrupt name.
5294 */
5295 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5296 {
5297 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5298 return buf;
5299 }
5300
5301 /*
5302 * Return the send credit interrupt name.
5303 */
5304 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5305 {
5306 snprintf(buf, bsize, "SendCreditInt%u", source);
5307 return buf;
5308 }
5309
5310 /*
5311 * Return the reserved interrupt name.
5312 */
5313 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5314 {
5315 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5316 return buf;
5317 }
5318
5319 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5320 {
5321 return flag_string(buf, buf_len, flags,
5322 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5323 }
5324
5325 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5326 {
5327 return flag_string(buf, buf_len, flags,
5328 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5329 }
5330
5331 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5332 {
5333 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5334 ARRAY_SIZE(misc_err_status_flags));
5335 }
5336
5337 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5338 {
5339 return flag_string(buf, buf_len, flags,
5340 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5341 }
5342
5343 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5344 {
5345 return flag_string(buf, buf_len, flags,
5346 sdma_err_status_flags,
5347 ARRAY_SIZE(sdma_err_status_flags));
5348 }
5349
5350 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5351 {
5352 return flag_string(buf, buf_len, flags,
5353 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5354 }
5355
5356 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5357 {
5358 return flag_string(buf, buf_len, flags,
5359 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5360 }
5361
5362 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5363 {
5364 return flag_string(buf, buf_len, flags,
5365 send_err_status_flags,
5366 ARRAY_SIZE(send_err_status_flags));
5367 }
5368
5369 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5370 {
5371 char buf[96];
5372 int i = 0;
5373
5374 /*
5375 * For most these errors, there is nothing that can be done except
5376 * report or record it.
5377 */
5378 dd_dev_info(dd, "CCE Error: %s\n",
5379 cce_err_status_string(buf, sizeof(buf), reg));
5380
5381 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5382 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5383 /* this error requires a manual drop into SPC freeze mode */
5384 /* then a fix up */
5385 start_freeze_handling(dd->pport, FREEZE_SELF);
5386 }
5387
5388 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5389 if (reg & (1ull << i)) {
5390 incr_cntr64(&dd->cce_err_status_cnt[i]);
5391 /* maintain a counter over all cce_err_status errors */
5392 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5393 }
5394 }
5395 }
5396
5397 /*
5398 * Check counters for receive errors that do not have an interrupt
5399 * associated with them.
5400 */
5401 #define RCVERR_CHECK_TIME 10
5402 static void update_rcverr_timer(unsigned long opaque)
5403 {
5404 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5405 struct hfi1_pportdata *ppd = dd->pport;
5406 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5407
5408 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5409 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5410 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5411 set_link_down_reason(ppd,
5412 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5413 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5414 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5415 }
5416 dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
5417
5418 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5419 }
5420
5421 static int init_rcverr(struct hfi1_devdata *dd)
5422 {
5423 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5424 /* Assume the hardware counter has been reset */
5425 dd->rcv_ovfl_cnt = 0;
5426 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5427 }
5428
5429 static void free_rcverr(struct hfi1_devdata *dd)
5430 {
5431 if (dd->rcverr_timer.data)
5432 del_timer_sync(&dd->rcverr_timer);
5433 dd->rcverr_timer.data = 0;
5434 }
5435
5436 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5437 {
5438 char buf[96];
5439 int i = 0;
5440
5441 dd_dev_info(dd, "Receive Error: %s\n",
5442 rxe_err_status_string(buf, sizeof(buf), reg));
5443
5444 if (reg & ALL_RXE_FREEZE_ERR) {
5445 int flags = 0;
5446
5447 /*
5448 * Freeze mode recovery is disabled for the errors
5449 * in RXE_FREEZE_ABORT_MASK
5450 */
5451 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5452 flags = FREEZE_ABORT;
5453
5454 start_freeze_handling(dd->pport, flags);
5455 }
5456
5457 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5458 if (reg & (1ull << i))
5459 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5460 }
5461 }
5462
5463 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5464 {
5465 char buf[96];
5466 int i = 0;
5467
5468 dd_dev_info(dd, "Misc Error: %s",
5469 misc_err_status_string(buf, sizeof(buf), reg));
5470 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5471 if (reg & (1ull << i))
5472 incr_cntr64(&dd->misc_err_status_cnt[i]);
5473 }
5474 }
5475
5476 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5477 {
5478 char buf[96];
5479 int i = 0;
5480
5481 dd_dev_info(dd, "PIO Error: %s\n",
5482 pio_err_status_string(buf, sizeof(buf), reg));
5483
5484 if (reg & ALL_PIO_FREEZE_ERR)
5485 start_freeze_handling(dd->pport, 0);
5486
5487 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5488 if (reg & (1ull << i))
5489 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5490 }
5491 }
5492
5493 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5494 {
5495 char buf[96];
5496 int i = 0;
5497
5498 dd_dev_info(dd, "SDMA Error: %s\n",
5499 sdma_err_status_string(buf, sizeof(buf), reg));
5500
5501 if (reg & ALL_SDMA_FREEZE_ERR)
5502 start_freeze_handling(dd->pport, 0);
5503
5504 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5505 if (reg & (1ull << i))
5506 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5507 }
5508 }
5509
5510 static void count_port_inactive(struct hfi1_devdata *dd)
5511 {
5512 struct hfi1_pportdata *ppd = dd->pport;
5513
5514 if (ppd->port_xmit_discards < ~(u64)0)
5515 ppd->port_xmit_discards++;
5516 }
5517
5518 /*
5519 * We have had a "disallowed packet" error during egress. Determine the
5520 * integrity check which failed, and update relevant error counter, etc.
5521 *
5522 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5523 * bit of state per integrity check, and so we can miss the reason for an
5524 * egress error if more than one packet fails the same integrity check
5525 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5526 */
5527 static void handle_send_egress_err_info(struct hfi1_devdata *dd)
5528 {
5529 struct hfi1_pportdata *ppd = dd->pport;
5530 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5531 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5532 char buf[96];
5533
5534 /* clear down all observed info as quickly as possible after read */
5535 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5536
5537 dd_dev_info(dd,
5538 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5539 info, egress_err_info_string(buf, sizeof(buf), info), src);
5540
5541 /* Eventually add other counters for each bit */
5542
5543 if (info & SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK) {
5544 if (ppd->port_xmit_discards < ~(u64)0)
5545 ppd->port_xmit_discards++;
5546 }
5547 }
5548
5549 /*
5550 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5551 * register. Does it represent a 'port inactive' error?
5552 */
5553 static inline int port_inactive_err(u64 posn)
5554 {
5555 return (posn >= SEES(TX_LINKDOWN) &&
5556 posn <= SEES(TX_INCORRECT_LINK_STATE));
5557 }
5558
5559 /*
5560 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5561 * register. Does it represent a 'disallowed packet' error?
5562 */
5563 static inline int disallowed_pkt_err(u64 posn)
5564 {
5565 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5566 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5567 }
5568
5569 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5570 {
5571 u64 reg_copy = reg, handled = 0;
5572 char buf[96];
5573 int i = 0;
5574
5575 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5576 start_freeze_handling(dd->pport, 0);
5577 if (is_ax(dd) && (reg &
5578 SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
5579 && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5580 start_freeze_handling(dd->pport, 0);
5581
5582 while (reg_copy) {
5583 int posn = fls64(reg_copy);
5584 /*
5585 * fls64() returns a 1-based offset, but we generally
5586 * want 0-based offsets.
5587 */
5588 int shift = posn - 1;
5589
5590 if (port_inactive_err(shift)) {
5591 count_port_inactive(dd);
5592 handled |= (1ULL << shift);
5593 } else if (disallowed_pkt_err(shift)) {
5594 handle_send_egress_err_info(dd);
5595 handled |= (1ULL << shift);
5596 }
5597 clear_bit(shift, (unsigned long *)&reg_copy);
5598 }
5599
5600 reg &= ~handled;
5601
5602 if (reg)
5603 dd_dev_info(dd, "Egress Error: %s\n",
5604 egress_err_status_string(buf, sizeof(buf), reg));
5605
5606 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5607 if (reg & (1ull << i))
5608 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5609 }
5610 }
5611
5612 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5613 {
5614 char buf[96];
5615 int i = 0;
5616
5617 dd_dev_info(dd, "Send Error: %s\n",
5618 send_err_status_string(buf, sizeof(buf), reg));
5619
5620 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5621 if (reg & (1ull << i))
5622 incr_cntr64(&dd->send_err_status_cnt[i]);
5623 }
5624 }
5625
5626 /*
5627 * The maximum number of times the error clear down will loop before
5628 * blocking a repeating error. This value is arbitrary.
5629 */
5630 #define MAX_CLEAR_COUNT 20
5631
5632 /*
5633 * Clear and handle an error register. All error interrupts are funneled
5634 * through here to have a central location to correctly handle single-
5635 * or multi-shot errors.
5636 *
5637 * For non per-context registers, call this routine with a context value
5638 * of 0 so the per-context offset is zero.
5639 *
5640 * If the handler loops too many times, assume that something is wrong
5641 * and can't be fixed, so mask the error bits.
5642 */
5643 static void interrupt_clear_down(struct hfi1_devdata *dd,
5644 u32 context,
5645 const struct err_reg_info *eri)
5646 {
5647 u64 reg;
5648 u32 count;
5649
5650 /* read in a loop until no more errors are seen */
5651 count = 0;
5652 while (1) {
5653 reg = read_kctxt_csr(dd, context, eri->status);
5654 if (reg == 0)
5655 break;
5656 write_kctxt_csr(dd, context, eri->clear, reg);
5657 if (likely(eri->handler))
5658 eri->handler(dd, context, reg);
5659 count++;
5660 if (count > MAX_CLEAR_COUNT) {
5661 u64 mask;
5662
5663 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5664 eri->desc, reg);
5665 /*
5666 * Read-modify-write so any other masked bits
5667 * remain masked.
5668 */
5669 mask = read_kctxt_csr(dd, context, eri->mask);
5670 mask &= ~reg;
5671 write_kctxt_csr(dd, context, eri->mask, mask);
5672 break;
5673 }
5674 }
5675 }
5676
5677 /*
5678 * CCE block "misc" interrupt. Source is < 16.
5679 */
5680 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5681 {
5682 const struct err_reg_info *eri = &misc_errs[source];
5683
5684 if (eri->handler) {
5685 interrupt_clear_down(dd, 0, eri);
5686 } else {
5687 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5688 source);
5689 }
5690 }
5691
5692 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5693 {
5694 return flag_string(buf, buf_len, flags,
5695 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5696 }
5697
5698 /*
5699 * Send context error interrupt. Source (hw_context) is < 160.
5700 *
5701 * All send context errors cause the send context to halt. The normal
5702 * clear-down mechanism cannot be used because we cannot clear the
5703 * error bits until several other long-running items are done first.
5704 * This is OK because with the context halted, nothing else is going
5705 * to happen on it anyway.
5706 */
5707 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5708 unsigned int hw_context)
5709 {
5710 struct send_context_info *sci;
5711 struct send_context *sc;
5712 char flags[96];
5713 u64 status;
5714 u32 sw_index;
5715 int i = 0;
5716
5717 sw_index = dd->hw_to_sw[hw_context];
5718 if (sw_index >= dd->num_send_contexts) {
5719 dd_dev_err(dd,
5720 "out of range sw index %u for send context %u\n",
5721 sw_index, hw_context);
5722 return;
5723 }
5724 sci = &dd->send_contexts[sw_index];
5725 sc = sci->sc;
5726 if (!sc) {
5727 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5728 sw_index, hw_context);
5729 return;
5730 }
5731
5732 /* tell the software that a halt has begun */
5733 sc_stop(sc, SCF_HALTED);
5734
5735 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5736
5737 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5738 send_context_err_status_string(flags, sizeof(flags), status));
5739
5740 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5741 handle_send_egress_err_info(dd);
5742
5743 /*
5744 * Automatically restart halted kernel contexts out of interrupt
5745 * context. User contexts must ask the driver to restart the context.
5746 */
5747 if (sc->type != SC_USER)
5748 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5749
5750 /*
5751 * Update the counters for the corresponding status bits.
5752 * Note that these particular counters are aggregated over all
5753 * 160 contexts.
5754 */
5755 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5756 if (status & (1ull << i))
5757 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5758 }
5759 }
5760
5761 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5762 unsigned int source, u64 status)
5763 {
5764 struct sdma_engine *sde;
5765 int i = 0;
5766
5767 sde = &dd->per_sdma[source];
5768 #ifdef CONFIG_SDMA_VERBOSITY
5769 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5770 slashstrip(__FILE__), __LINE__, __func__);
5771 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5772 sde->this_idx, source, (unsigned long long)status);
5773 #endif
5774 sde->err_cnt++;
5775 sdma_engine_error(sde, status);
5776
5777 /*
5778 * Update the counters for the corresponding status bits.
5779 * Note that these particular counters are aggregated over
5780 * all 16 DMA engines.
5781 */
5782 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5783 if (status & (1ull << i))
5784 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5785 }
5786 }
5787
5788 /*
5789 * CCE block SDMA error interrupt. Source is < 16.
5790 */
5791 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5792 {
5793 #ifdef CONFIG_SDMA_VERBOSITY
5794 struct sdma_engine *sde = &dd->per_sdma[source];
5795
5796 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5797 slashstrip(__FILE__), __LINE__, __func__);
5798 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5799 source);
5800 sdma_dumpstate(sde);
5801 #endif
5802 interrupt_clear_down(dd, source, &sdma_eng_err);
5803 }
5804
5805 /*
5806 * CCE block "various" interrupt. Source is < 8.
5807 */
5808 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5809 {
5810 const struct err_reg_info *eri = &various_err[source];
5811
5812 /*
5813 * TCritInt cannot go through interrupt_clear_down()
5814 * because it is not a second tier interrupt. The handler
5815 * should be called directly.
5816 */
5817 if (source == TCRIT_INT_SOURCE)
5818 handle_temp_err(dd);
5819 else if (eri->handler)
5820 interrupt_clear_down(dd, 0, eri);
5821 else
5822 dd_dev_info(dd,
5823 "%s: Unimplemented/reserved interrupt %d\n",
5824 __func__, source);
5825 }
5826
5827 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5828 {
5829 /* source is always zero */
5830 struct hfi1_pportdata *ppd = dd->pport;
5831 unsigned long flags;
5832 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5833
5834 if (reg & QSFP_HFI0_MODPRST_N) {
5835
5836 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5837 __func__);
5838
5839 if (!qsfp_mod_present(ppd)) {
5840 ppd->driver_link_ready = 0;
5841 /*
5842 * Cable removed, reset all our information about the
5843 * cache and cable capabilities
5844 */
5845
5846 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5847 /*
5848 * We don't set cache_refresh_required here as we expect
5849 * an interrupt when a cable is inserted
5850 */
5851 ppd->qsfp_info.cache_valid = 0;
5852 ppd->qsfp_info.qsfp_interrupt_functional = 0;
5853 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5854 flags);
5855 write_csr(dd,
5856 dd->hfi1_id ?
5857 ASIC_QSFP2_INVERT :
5858 ASIC_QSFP1_INVERT,
5859 qsfp_int_mgmt);
5860
5861 if ((ppd->offline_disabled_reason >
5862 HFI1_ODR_MASK(
5863 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED)) ||
5864 (ppd->offline_disabled_reason ==
5865 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5866 ppd->offline_disabled_reason =
5867 HFI1_ODR_MASK(
5868 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED);
5869
5870 if (ppd->host_link_state == HLS_DN_POLL) {
5871 /*
5872 * The link is still in POLL. This means
5873 * that the normal link down processing
5874 * will not happen. We have to do it here
5875 * before turning the DC off.
5876 */
5877 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5878 }
5879 } else {
5880 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5881 ppd->qsfp_info.cache_valid = 0;
5882 ppd->qsfp_info.cache_refresh_required = 1;
5883 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5884 flags);
5885
5886 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
5887 write_csr(dd,
5888 dd->hfi1_id ?
5889 ASIC_QSFP2_INVERT :
5890 ASIC_QSFP1_INVERT,
5891 qsfp_int_mgmt);
5892 }
5893 }
5894
5895 if (reg & QSFP_HFI0_INT_N) {
5896
5897 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5898 __func__);
5899 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5900 ppd->qsfp_info.check_interrupt_flags = 1;
5901 ppd->qsfp_info.qsfp_interrupt_functional = 1;
5902 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
5903 }
5904
5905 /* Schedule the QSFP work only if there is a cable attached. */
5906 if (qsfp_mod_present(ppd))
5907 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
5908 }
5909
5910 static int request_host_lcb_access(struct hfi1_devdata *dd)
5911 {
5912 int ret;
5913
5914 ret = do_8051_command(dd, HCMD_MISC,
5915 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5916 NULL);
5917 if (ret != HCMD_SUCCESS) {
5918 dd_dev_err(dd, "%s: command failed with error %d\n",
5919 __func__, ret);
5920 }
5921 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5922 }
5923
5924 static int request_8051_lcb_access(struct hfi1_devdata *dd)
5925 {
5926 int ret;
5927
5928 ret = do_8051_command(dd, HCMD_MISC,
5929 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5930 NULL);
5931 if (ret != HCMD_SUCCESS) {
5932 dd_dev_err(dd, "%s: command failed with error %d\n",
5933 __func__, ret);
5934 }
5935 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5936 }
5937
5938 /*
5939 * Set the LCB selector - allow host access. The DCC selector always
5940 * points to the host.
5941 */
5942 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
5943 {
5944 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5945 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
5946 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
5947 }
5948
5949 /*
5950 * Clear the LCB selector - allow 8051 access. The DCC selector always
5951 * points to the host.
5952 */
5953 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
5954 {
5955 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5956 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
5957 }
5958
5959 /*
5960 * Acquire LCB access from the 8051. If the host already has access,
5961 * just increment a counter. Otherwise, inform the 8051 that the
5962 * host is taking access.
5963 *
5964 * Returns:
5965 * 0 on success
5966 * -EBUSY if the 8051 has control and cannot be disturbed
5967 * -errno if unable to acquire access from the 8051
5968 */
5969 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
5970 {
5971 struct hfi1_pportdata *ppd = dd->pport;
5972 int ret = 0;
5973
5974 /*
5975 * Use the host link state lock so the operation of this routine
5976 * { link state check, selector change, count increment } can occur
5977 * as a unit against a link state change. Otherwise there is a
5978 * race between the state change and the count increment.
5979 */
5980 if (sleep_ok) {
5981 mutex_lock(&ppd->hls_lock);
5982 } else {
5983 while (!mutex_trylock(&ppd->hls_lock))
5984 udelay(1);
5985 }
5986
5987 /* this access is valid only when the link is up */
5988 if ((ppd->host_link_state & HLS_UP) == 0) {
5989 dd_dev_info(dd, "%s: link state %s not up\n",
5990 __func__, link_state_name(ppd->host_link_state));
5991 ret = -EBUSY;
5992 goto done;
5993 }
5994
5995 if (dd->lcb_access_count == 0) {
5996 ret = request_host_lcb_access(dd);
5997 if (ret) {
5998 dd_dev_err(dd,
5999 "%s: unable to acquire LCB access, err %d\n",
6000 __func__, ret);
6001 goto done;
6002 }
6003 set_host_lcb_access(dd);
6004 }
6005 dd->lcb_access_count++;
6006 done:
6007 mutex_unlock(&ppd->hls_lock);
6008 return ret;
6009 }
6010
6011 /*
6012 * Release LCB access by decrementing the use count. If the count is moving
6013 * from 1 to 0, inform 8051 that it has control back.
6014 *
6015 * Returns:
6016 * 0 on success
6017 * -errno if unable to release access to the 8051
6018 */
6019 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6020 {
6021 int ret = 0;
6022
6023 /*
6024 * Use the host link state lock because the acquire needed it.
6025 * Here, we only need to keep { selector change, count decrement }
6026 * as a unit.
6027 */
6028 if (sleep_ok) {
6029 mutex_lock(&dd->pport->hls_lock);
6030 } else {
6031 while (!mutex_trylock(&dd->pport->hls_lock))
6032 udelay(1);
6033 }
6034
6035 if (dd->lcb_access_count == 0) {
6036 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6037 __func__);
6038 goto done;
6039 }
6040
6041 if (dd->lcb_access_count == 1) {
6042 set_8051_lcb_access(dd);
6043 ret = request_8051_lcb_access(dd);
6044 if (ret) {
6045 dd_dev_err(dd,
6046 "%s: unable to release LCB access, err %d\n",
6047 __func__, ret);
6048 /* restore host access if the grant didn't work */
6049 set_host_lcb_access(dd);
6050 goto done;
6051 }
6052 }
6053 dd->lcb_access_count--;
6054 done:
6055 mutex_unlock(&dd->pport->hls_lock);
6056 return ret;
6057 }
6058
6059 /*
6060 * Initialize LCB access variables and state. Called during driver load,
6061 * after most of the initialization is finished.
6062 *
6063 * The DC default is LCB access on for the host. The driver defaults to
6064 * leaving access to the 8051. Assign access now - this constrains the call
6065 * to this routine to be after all LCB set-up is done. In particular, after
6066 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6067 */
6068 static void init_lcb_access(struct hfi1_devdata *dd)
6069 {
6070 dd->lcb_access_count = 0;
6071 }
6072
6073 /*
6074 * Write a response back to a 8051 request.
6075 */
6076 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6077 {
6078 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6079 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6080 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6081 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6082 }
6083
6084 /*
6085 * Handle requests from the 8051.
6086 */
6087 static void handle_8051_request(struct hfi1_devdata *dd)
6088 {
6089 u64 reg;
6090 u16 data;
6091 u8 type;
6092
6093 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6094 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6095 return; /* no request */
6096
6097 /* zero out COMPLETED so the response is seen */
6098 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6099
6100 /* extract request details */
6101 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6102 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6103 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6104 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6105
6106 switch (type) {
6107 case HREQ_LOAD_CONFIG:
6108 case HREQ_SAVE_CONFIG:
6109 case HREQ_READ_CONFIG:
6110 case HREQ_SET_TX_EQ_ABS:
6111 case HREQ_SET_TX_EQ_REL:
6112 case HREQ_ENABLE:
6113 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6114 type);
6115 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6116 break;
6117
6118 case HREQ_CONFIG_DONE:
6119 hreq_response(dd, HREQ_SUCCESS, 0);
6120 break;
6121
6122 case HREQ_INTERFACE_TEST:
6123 hreq_response(dd, HREQ_SUCCESS, data);
6124 break;
6125
6126 default:
6127 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6128 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6129 break;
6130 }
6131 }
6132
6133 static void write_global_credit(struct hfi1_devdata *dd,
6134 u8 vau, u16 total, u16 shared)
6135 {
6136 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6137 ((u64)total
6138 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6139 | ((u64)shared
6140 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6141 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6142 }
6143
6144 /*
6145 * Set up initial VL15 credits of the remote. Assumes the rest of
6146 * the CM credit registers are zero from a previous global or credit reset .
6147 */
6148 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6149 {
6150 /* leave shared count at zero for both global and VL15 */
6151 write_global_credit(dd, vau, vl15buf, 0);
6152
6153 /* We may need some credits for another VL when sending packets
6154 * with the snoop interface. Dividing it down the middle for VL15
6155 * and VL0 should suffice.
6156 */
6157 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6158 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6159 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6160 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6161 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6162 } else {
6163 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6164 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6165 }
6166 }
6167
6168 /*
6169 * Zero all credit details from the previous connection and
6170 * reset the CM manager's internal counters.
6171 */
6172 void reset_link_credits(struct hfi1_devdata *dd)
6173 {
6174 int i;
6175
6176 /* remove all previous VL credit limits */
6177 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6178 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
6179 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6180 write_global_credit(dd, 0, 0, 0);
6181 /* reset the CM block */
6182 pio_send_control(dd, PSC_CM_RESET);
6183 }
6184
6185 /* convert a vCU to a CU */
6186 static u32 vcu_to_cu(u8 vcu)
6187 {
6188 return 1 << vcu;
6189 }
6190
6191 /* convert a CU to a vCU */
6192 static u8 cu_to_vcu(u32 cu)
6193 {
6194 return ilog2(cu);
6195 }
6196
6197 /* convert a vAU to an AU */
6198 static u32 vau_to_au(u8 vau)
6199 {
6200 return 8 * (1 << vau);
6201 }
6202
6203 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6204 {
6205 ppd->sm_trap_qp = 0x0;
6206 ppd->sa_qp = 0x1;
6207 }
6208
6209 /*
6210 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6211 */
6212 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6213 {
6214 u64 reg;
6215
6216 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6217 write_csr(dd, DC_LCB_CFG_RUN, 0);
6218 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6219 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6220 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6221 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6222 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6223 reg = read_csr(dd, DCC_CFG_RESET);
6224 write_csr(dd, DCC_CFG_RESET,
6225 reg
6226 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6227 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6228 (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6229 if (!abort) {
6230 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6231 write_csr(dd, DCC_CFG_RESET, reg);
6232 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6233 }
6234 }
6235
6236 /*
6237 * This routine should be called after the link has been transitioned to
6238 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6239 * reset).
6240 *
6241 * The expectation is that the caller of this routine would have taken
6242 * care of properly transitioning the link into the correct state.
6243 */
6244 static void dc_shutdown(struct hfi1_devdata *dd)
6245 {
6246 unsigned long flags;
6247
6248 spin_lock_irqsave(&dd->dc8051_lock, flags);
6249 if (dd->dc_shutdown) {
6250 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6251 return;
6252 }
6253 dd->dc_shutdown = 1;
6254 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6255 /* Shutdown the LCB */
6256 lcb_shutdown(dd, 1);
6257 /* Going to OFFLINE would have causes the 8051 to put the
6258 * SerDes into reset already. Just need to shut down the 8051,
6259 * itself. */
6260 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6261 }
6262
6263 /* Calling this after the DC has been brought out of reset should not
6264 * do any damage. */
6265 static void dc_start(struct hfi1_devdata *dd)
6266 {
6267 unsigned long flags;
6268 int ret;
6269
6270 spin_lock_irqsave(&dd->dc8051_lock, flags);
6271 if (!dd->dc_shutdown)
6272 goto done;
6273 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6274 /* Take the 8051 out of reset */
6275 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6276 /* Wait until 8051 is ready */
6277 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6278 if (ret) {
6279 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6280 __func__);
6281 }
6282 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6283 write_csr(dd, DCC_CFG_RESET, 0x10);
6284 /* lcb_shutdown() with abort=1 does not restore these */
6285 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6286 spin_lock_irqsave(&dd->dc8051_lock, flags);
6287 dd->dc_shutdown = 0;
6288 done:
6289 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6290 }
6291
6292 /*
6293 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6294 */
6295 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6296 {
6297 u64 rx_radr, tx_radr;
6298 u32 version;
6299
6300 if (dd->icode != ICODE_FPGA_EMULATION)
6301 return;
6302
6303 /*
6304 * These LCB defaults on emulator _s are good, nothing to do here:
6305 * LCB_CFG_TX_FIFOS_RADR
6306 * LCB_CFG_RX_FIFOS_RADR
6307 * LCB_CFG_LN_DCLK
6308 * LCB_CFG_IGNORE_LOST_RCLK
6309 */
6310 if (is_emulator_s(dd))
6311 return;
6312 /* else this is _p */
6313
6314 version = emulator_rev(dd);
6315 if (!is_ax(dd))
6316 version = 0x2d; /* all B0 use 0x2d or higher settings */
6317
6318 if (version <= 0x12) {
6319 /* release 0x12 and below */
6320
6321 /*
6322 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6323 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6324 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6325 */
6326 rx_radr =
6327 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6328 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6329 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6330 /*
6331 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6332 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6333 */
6334 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6335 } else if (version <= 0x18) {
6336 /* release 0x13 up to 0x18 */
6337 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6338 rx_radr =
6339 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6340 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6341 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6342 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6343 } else if (version == 0x19) {
6344 /* release 0x19 */
6345 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6346 rx_radr =
6347 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6348 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6349 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6350 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6351 } else if (version == 0x1a) {
6352 /* release 0x1a */
6353 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6354 rx_radr =
6355 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6356 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6357 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6358 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6359 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6360 } else {
6361 /* release 0x1b and higher */
6362 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6363 rx_radr =
6364 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6365 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6366 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6367 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6368 }
6369
6370 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6371 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6372 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6373 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6374 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6375 }
6376
6377 /*
6378 * Handle a SMA idle message
6379 *
6380 * This is a work-queue function outside of the interrupt.
6381 */
6382 void handle_sma_message(struct work_struct *work)
6383 {
6384 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6385 sma_message_work);
6386 struct hfi1_devdata *dd = ppd->dd;
6387 u64 msg;
6388 int ret;
6389
6390 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6391 is stripped off */
6392 ret = read_idle_sma(dd, &msg);
6393 if (ret)
6394 return;
6395 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6396 /*
6397 * React to the SMA message. Byte[1] (0 for us) is the command.
6398 */
6399 switch (msg & 0xff) {
6400 case SMA_IDLE_ARM:
6401 /*
6402 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6403 * State Transitions
6404 *
6405 * Only expected in INIT or ARMED, discard otherwise.
6406 */
6407 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6408 ppd->neighbor_normal = 1;
6409 break;
6410 case SMA_IDLE_ACTIVE:
6411 /*
6412 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6413 * State Transitions
6414 *
6415 * Can activate the node. Discard otherwise.
6416 */
6417 if (ppd->host_link_state == HLS_UP_ARMED
6418 && ppd->is_active_optimize_enabled) {
6419 ppd->neighbor_normal = 1;
6420 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6421 if (ret)
6422 dd_dev_err(
6423 dd,
6424 "%s: received Active SMA idle message, couldn't set link to Active\n",
6425 __func__);
6426 }
6427 break;
6428 default:
6429 dd_dev_err(dd,
6430 "%s: received unexpected SMA idle message 0x%llx\n",
6431 __func__, msg);
6432 break;
6433 }
6434 }
6435
6436 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6437 {
6438 u64 rcvctrl;
6439 unsigned long flags;
6440
6441 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6442 rcvctrl = read_csr(dd, RCV_CTRL);
6443 rcvctrl |= add;
6444 rcvctrl &= ~clear;
6445 write_csr(dd, RCV_CTRL, rcvctrl);
6446 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6447 }
6448
6449 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6450 {
6451 adjust_rcvctrl(dd, add, 0);
6452 }
6453
6454 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6455 {
6456 adjust_rcvctrl(dd, 0, clear);
6457 }
6458
6459 /*
6460 * Called from all interrupt handlers to start handling an SPC freeze.
6461 */
6462 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6463 {
6464 struct hfi1_devdata *dd = ppd->dd;
6465 struct send_context *sc;
6466 int i;
6467
6468 if (flags & FREEZE_SELF)
6469 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6470
6471 /* enter frozen mode */
6472 dd->flags |= HFI1_FROZEN;
6473
6474 /* notify all SDMA engines that they are going into a freeze */
6475 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6476
6477 /* do halt pre-handling on all enabled send contexts */
6478 for (i = 0; i < dd->num_send_contexts; i++) {
6479 sc = dd->send_contexts[i].sc;
6480 if (sc && (sc->flags & SCF_ENABLED))
6481 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6482 }
6483
6484 /* Send context are frozen. Notify user space */
6485 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6486
6487 if (flags & FREEZE_ABORT) {
6488 dd_dev_err(dd,
6489 "Aborted freeze recovery. Please REBOOT system\n");
6490 return;
6491 }
6492 /* queue non-interrupt handler */
6493 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6494 }
6495
6496 /*
6497 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6498 * depending on the "freeze" parameter.
6499 *
6500 * No need to return an error if it times out, our only option
6501 * is to proceed anyway.
6502 */
6503 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6504 {
6505 unsigned long timeout;
6506 u64 reg;
6507
6508 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6509 while (1) {
6510 reg = read_csr(dd, CCE_STATUS);
6511 if (freeze) {
6512 /* waiting until all indicators are set */
6513 if ((reg & ALL_FROZE) == ALL_FROZE)
6514 return; /* all done */
6515 } else {
6516 /* waiting until all indicators are clear */
6517 if ((reg & ALL_FROZE) == 0)
6518 return; /* all done */
6519 }
6520
6521 if (time_after(jiffies, timeout)) {
6522 dd_dev_err(dd,
6523 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6524 freeze ? "" : "un",
6525 reg & ALL_FROZE,
6526 freeze ? ALL_FROZE : 0ull);
6527 return;
6528 }
6529 usleep_range(80, 120);
6530 }
6531 }
6532
6533 /*
6534 * Do all freeze handling for the RXE block.
6535 */
6536 static void rxe_freeze(struct hfi1_devdata *dd)
6537 {
6538 int i;
6539
6540 /* disable port */
6541 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6542
6543 /* disable all receive contexts */
6544 for (i = 0; i < dd->num_rcv_contexts; i++)
6545 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6546 }
6547
6548 /*
6549 * Unfreeze handling for the RXE block - kernel contexts only.
6550 * This will also enable the port. User contexts will do unfreeze
6551 * handling on a per-context basis as they call into the driver.
6552 *
6553 */
6554 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6555 {
6556 int i;
6557
6558 /* enable all kernel contexts */
6559 for (i = 0; i < dd->n_krcv_queues; i++)
6560 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, i);
6561
6562 /* enable port */
6563 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6564 }
6565
6566 /*
6567 * Non-interrupt SPC freeze handling.
6568 *
6569 * This is a work-queue function outside of the triggering interrupt.
6570 */
6571 void handle_freeze(struct work_struct *work)
6572 {
6573 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6574 freeze_work);
6575 struct hfi1_devdata *dd = ppd->dd;
6576
6577 /* wait for freeze indicators on all affected blocks */
6578 wait_for_freeze_status(dd, 1);
6579
6580 /* SPC is now frozen */
6581
6582 /* do send PIO freeze steps */
6583 pio_freeze(dd);
6584
6585 /* do send DMA freeze steps */
6586 sdma_freeze(dd);
6587
6588 /* do send egress freeze steps - nothing to do */
6589
6590 /* do receive freeze steps */
6591 rxe_freeze(dd);
6592
6593 /*
6594 * Unfreeze the hardware - clear the freeze, wait for each
6595 * block's frozen bit to clear, then clear the frozen flag.
6596 */
6597 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6598 wait_for_freeze_status(dd, 0);
6599
6600 if (is_ax(dd)) {
6601 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6602 wait_for_freeze_status(dd, 1);
6603 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6604 wait_for_freeze_status(dd, 0);
6605 }
6606
6607 /* do send PIO unfreeze steps for kernel contexts */
6608 pio_kernel_unfreeze(dd);
6609
6610 /* do send DMA unfreeze steps */
6611 sdma_unfreeze(dd);
6612
6613 /* do send egress unfreeze steps - nothing to do */
6614
6615 /* do receive unfreeze steps for kernel contexts */
6616 rxe_kernel_unfreeze(dd);
6617
6618 /*
6619 * The unfreeze procedure touches global device registers when
6620 * it disables and re-enables RXE. Mark the device unfrozen
6621 * after all that is done so other parts of the driver waiting
6622 * for the device to unfreeze don't do things out of order.
6623 *
6624 * The above implies that the meaning of HFI1_FROZEN flag is
6625 * "Device has gone into freeze mode and freeze mode handling
6626 * is still in progress."
6627 *
6628 * The flag will be removed when freeze mode processing has
6629 * completed.
6630 */
6631 dd->flags &= ~HFI1_FROZEN;
6632 wake_up(&dd->event_queue);
6633
6634 /* no longer frozen */
6635 }
6636
6637 /*
6638 * Handle a link up interrupt from the 8051.
6639 *
6640 * This is a work-queue function outside of the interrupt.
6641 */
6642 void handle_link_up(struct work_struct *work)
6643 {
6644 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6645 link_up_work);
6646 set_link_state(ppd, HLS_UP_INIT);
6647
6648 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6649 read_ltp_rtt(ppd->dd);
6650 /*
6651 * OPA specifies that certain counters are cleared on a transition
6652 * to link up, so do that.
6653 */
6654 clear_linkup_counters(ppd->dd);
6655 /*
6656 * And (re)set link up default values.
6657 */
6658 set_linkup_defaults(ppd);
6659
6660 /* enforce link speed enabled */
6661 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6662 /* oops - current speed is not enabled, bounce */
6663 dd_dev_err(ppd->dd,
6664 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6665 ppd->link_speed_active, ppd->link_speed_enabled);
6666 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6667 OPA_LINKDOWN_REASON_SPEED_POLICY);
6668 set_link_state(ppd, HLS_DN_OFFLINE);
6669 start_link(ppd);
6670 }
6671 }
6672
6673 /* Several pieces of LNI information were cached for SMA in ppd.
6674 * Reset these on link down */
6675 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6676 {
6677 ppd->neighbor_guid = 0;
6678 ppd->neighbor_port_number = 0;
6679 ppd->neighbor_type = 0;
6680 ppd->neighbor_fm_security = 0;
6681 }
6682
6683 /*
6684 * Handle a link down interrupt from the 8051.
6685 *
6686 * This is a work-queue function outside of the interrupt.
6687 */
6688 void handle_link_down(struct work_struct *work)
6689 {
6690 u8 lcl_reason, neigh_reason = 0;
6691 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6692 link_down_work);
6693
6694 /* go offline first, then deal with reasons */
6695 set_link_state(ppd, HLS_DN_OFFLINE);
6696
6697 lcl_reason = 0;
6698 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6699
6700 /*
6701 * If no reason, assume peer-initiated but missed
6702 * LinkGoingDown idle flits.
6703 */
6704 if (neigh_reason == 0)
6705 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6706
6707 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6708
6709 reset_neighbor_info(ppd);
6710
6711 /* disable the port */
6712 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6713
6714 /* If there is no cable attached, turn the DC off. Otherwise,
6715 * start the link bring up. */
6716 if (!qsfp_mod_present(ppd))
6717 dc_shutdown(ppd->dd);
6718 else
6719 start_link(ppd);
6720 }
6721
6722 void handle_link_bounce(struct work_struct *work)
6723 {
6724 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6725 link_bounce_work);
6726
6727 /*
6728 * Only do something if the link is currently up.
6729 */
6730 if (ppd->host_link_state & HLS_UP) {
6731 set_link_state(ppd, HLS_DN_OFFLINE);
6732 start_link(ppd);
6733 } else {
6734 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6735 __func__, link_state_name(ppd->host_link_state));
6736 }
6737 }
6738
6739 /*
6740 * Mask conversion: Capability exchange to Port LTP. The capability
6741 * exchange has an implicit 16b CRC that is mandatory.
6742 */
6743 static int cap_to_port_ltp(int cap)
6744 {
6745 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6746
6747 if (cap & CAP_CRC_14B)
6748 port_ltp |= PORT_LTP_CRC_MODE_14;
6749 if (cap & CAP_CRC_48B)
6750 port_ltp |= PORT_LTP_CRC_MODE_48;
6751 if (cap & CAP_CRC_12B_16B_PER_LANE)
6752 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6753
6754 return port_ltp;
6755 }
6756
6757 /*
6758 * Convert an OPA Port LTP mask to capability mask
6759 */
6760 int port_ltp_to_cap(int port_ltp)
6761 {
6762 int cap_mask = 0;
6763
6764 if (port_ltp & PORT_LTP_CRC_MODE_14)
6765 cap_mask |= CAP_CRC_14B;
6766 if (port_ltp & PORT_LTP_CRC_MODE_48)
6767 cap_mask |= CAP_CRC_48B;
6768 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6769 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6770
6771 return cap_mask;
6772 }
6773
6774 /*
6775 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6776 */
6777 static int lcb_to_port_ltp(int lcb_crc)
6778 {
6779 int port_ltp = 0;
6780
6781 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6782 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6783 else if (lcb_crc == LCB_CRC_48B)
6784 port_ltp = PORT_LTP_CRC_MODE_48;
6785 else if (lcb_crc == LCB_CRC_14B)
6786 port_ltp = PORT_LTP_CRC_MODE_14;
6787 else
6788 port_ltp = PORT_LTP_CRC_MODE_16;
6789
6790 return port_ltp;
6791 }
6792
6793 /*
6794 * Our neighbor has indicated that we are allowed to act as a fabric
6795 * manager, so place the full management partition key in the second
6796 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6797 * that we should already have the limited management partition key in
6798 * array element 1, and also that the port is not yet up when
6799 * add_full_mgmt_pkey() is invoked.
6800 */
6801 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6802 {
6803 struct hfi1_devdata *dd = ppd->dd;
6804
6805 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6806 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6807 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6808 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
6809 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6810 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6811 }
6812
6813 /*
6814 * Convert the given link width to the OPA link width bitmask.
6815 */
6816 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6817 {
6818 switch (width) {
6819 case 0:
6820 /*
6821 * Simulator and quick linkup do not set the width.
6822 * Just set it to 4x without complaint.
6823 */
6824 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6825 return OPA_LINK_WIDTH_4X;
6826 return 0; /* no lanes up */
6827 case 1: return OPA_LINK_WIDTH_1X;
6828 case 2: return OPA_LINK_WIDTH_2X;
6829 case 3: return OPA_LINK_WIDTH_3X;
6830 default:
6831 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6832 __func__, width);
6833 /* fall through */
6834 case 4: return OPA_LINK_WIDTH_4X;
6835 }
6836 }
6837
6838 /*
6839 * Do a population count on the bottom nibble.
6840 */
6841 static const u8 bit_counts[16] = {
6842 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6843 };
6844 static inline u8 nibble_to_count(u8 nibble)
6845 {
6846 return bit_counts[nibble & 0xf];
6847 }
6848
6849 /*
6850 * Read the active lane information from the 8051 registers and return
6851 * their widths.
6852 *
6853 * Active lane information is found in these 8051 registers:
6854 * enable_lane_tx
6855 * enable_lane_rx
6856 */
6857 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
6858 u16 *rx_width)
6859 {
6860 u16 tx, rx;
6861 u8 enable_lane_rx;
6862 u8 enable_lane_tx;
6863 u8 tx_polarity_inversion;
6864 u8 rx_polarity_inversion;
6865 u8 max_rate;
6866
6867 /* read the active lanes */
6868 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
6869 &rx_polarity_inversion, &max_rate);
6870 read_local_lni(dd, &enable_lane_rx);
6871
6872 /* convert to counts */
6873 tx = nibble_to_count(enable_lane_tx);
6874 rx = nibble_to_count(enable_lane_rx);
6875
6876 /*
6877 * Set link_speed_active here, overriding what was set in
6878 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
6879 * set the max_rate field in handle_verify_cap until v0.19.
6880 */
6881 if ((dd->icode == ICODE_RTL_SILICON)
6882 && (dd->dc8051_ver < dc8051_ver(0, 19))) {
6883 /* max_rate: 0 = 12.5G, 1 = 25G */
6884 switch (max_rate) {
6885 case 0:
6886 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
6887 break;
6888 default:
6889 dd_dev_err(dd,
6890 "%s: unexpected max rate %d, using 25Gb\n",
6891 __func__, (int)max_rate);
6892 /* fall through */
6893 case 1:
6894 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
6895 break;
6896 }
6897 }
6898
6899 dd_dev_info(dd,
6900 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
6901 enable_lane_tx, tx, enable_lane_rx, rx);
6902 *tx_width = link_width_to_bits(dd, tx);
6903 *rx_width = link_width_to_bits(dd, rx);
6904 }
6905
6906 /*
6907 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
6908 * Valid after the end of VerifyCap and during LinkUp. Does not change
6909 * after link up. I.e. look elsewhere for downgrade information.
6910 *
6911 * Bits are:
6912 * + bits [7:4] contain the number of active transmitters
6913 * + bits [3:0] contain the number of active receivers
6914 * These are numbers 1 through 4 and can be different values if the
6915 * link is asymmetric.
6916 *
6917 * verify_cap_local_fm_link_width[0] retains its original value.
6918 */
6919 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
6920 u16 *rx_width)
6921 {
6922 u16 widths, tx, rx;
6923 u8 misc_bits, local_flags;
6924 u16 active_tx, active_rx;
6925
6926 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
6927 tx = widths >> 12;
6928 rx = (widths >> 8) & 0xf;
6929
6930 *tx_width = link_width_to_bits(dd, tx);
6931 *rx_width = link_width_to_bits(dd, rx);
6932
6933 /* print the active widths */
6934 get_link_widths(dd, &active_tx, &active_rx);
6935 }
6936
6937 /*
6938 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
6939 * hardware information when the link first comes up.
6940 *
6941 * The link width is not available until after VerifyCap.AllFramesReceived
6942 * (the trigger for handle_verify_cap), so this is outside that routine
6943 * and should be called when the 8051 signals linkup.
6944 */
6945 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
6946 {
6947 u16 tx_width, rx_width;
6948
6949 /* get end-of-LNI link widths */
6950 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
6951
6952 /* use tx_width as the link is supposed to be symmetric on link up */
6953 ppd->link_width_active = tx_width;
6954 /* link width downgrade active (LWD.A) starts out matching LW.A */
6955 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
6956 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
6957 /* per OPA spec, on link up LWD.E resets to LWD.S */
6958 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
6959 /* cache the active egress rate (units {10^6 bits/sec]) */
6960 ppd->current_egress_rate = active_egress_rate(ppd);
6961 }
6962
6963 /*
6964 * Handle a verify capabilities interrupt from the 8051.
6965 *
6966 * This is a work-queue function outside of the interrupt.
6967 */
6968 void handle_verify_cap(struct work_struct *work)
6969 {
6970 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6971 link_vc_work);
6972 struct hfi1_devdata *dd = ppd->dd;
6973 u64 reg;
6974 u8 power_management;
6975 u8 continious;
6976 u8 vcu;
6977 u8 vau;
6978 u8 z;
6979 u16 vl15buf;
6980 u16 link_widths;
6981 u16 crc_mask;
6982 u16 crc_val;
6983 u16 device_id;
6984 u16 active_tx, active_rx;
6985 u8 partner_supported_crc;
6986 u8 remote_tx_rate;
6987 u8 device_rev;
6988
6989 set_link_state(ppd, HLS_VERIFY_CAP);
6990
6991 lcb_shutdown(dd, 0);
6992 adjust_lcb_for_fpga_serdes(dd);
6993
6994 /*
6995 * These are now valid:
6996 * remote VerifyCap fields in the general LNI config
6997 * CSR DC8051_STS_REMOTE_GUID
6998 * CSR DC8051_STS_REMOTE_NODE_TYPE
6999 * CSR DC8051_STS_REMOTE_FM_SECURITY
7000 * CSR DC8051_STS_REMOTE_PORT_NO
7001 */
7002
7003 read_vc_remote_phy(dd, &power_management, &continious);
7004 read_vc_remote_fabric(
7005 dd,
7006 &vau,
7007 &z,
7008 &vcu,
7009 &vl15buf,
7010 &partner_supported_crc);
7011 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7012 read_remote_device_id(dd, &device_id, &device_rev);
7013 /*
7014 * And the 'MgmtAllowed' information, which is exchanged during
7015 * LNI, is also be available at this point.
7016 */
7017 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7018 /* print the active widths */
7019 get_link_widths(dd, &active_tx, &active_rx);
7020 dd_dev_info(dd,
7021 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7022 (int)power_management, (int)continious);
7023 dd_dev_info(dd,
7024 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7025 (int)vau,
7026 (int)z,
7027 (int)vcu,
7028 (int)vl15buf,
7029 (int)partner_supported_crc);
7030 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7031 (u32)remote_tx_rate, (u32)link_widths);
7032 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7033 (u32)device_id, (u32)device_rev);
7034 /*
7035 * The peer vAU value just read is the peer receiver value. HFI does
7036 * not support a transmit vAU of 0 (AU == 8). We advertised that
7037 * with Z=1 in the fabric capabilities sent to the peer. The peer
7038 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7039 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7040 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7041 * subject to the Z value exception.
7042 */
7043 if (vau == 0)
7044 vau = 1;
7045 set_up_vl15(dd, vau, vl15buf);
7046
7047 /* set up the LCB CRC mode */
7048 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7049
7050 /* order is important: use the lowest bit in common */
7051 if (crc_mask & CAP_CRC_14B)
7052 crc_val = LCB_CRC_14B;
7053 else if (crc_mask & CAP_CRC_48B)
7054 crc_val = LCB_CRC_48B;
7055 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7056 crc_val = LCB_CRC_12B_16B_PER_LANE;
7057 else
7058 crc_val = LCB_CRC_16B;
7059
7060 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7061 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7062 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7063
7064 /* set (14b only) or clear sideband credit */
7065 reg = read_csr(dd, SEND_CM_CTRL);
7066 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7067 write_csr(dd, SEND_CM_CTRL,
7068 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7069 } else {
7070 write_csr(dd, SEND_CM_CTRL,
7071 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7072 }
7073
7074 ppd->link_speed_active = 0; /* invalid value */
7075 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7076 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7077 switch (remote_tx_rate) {
7078 case 0:
7079 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7080 break;
7081 case 1:
7082 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7083 break;
7084 }
7085 } else {
7086 /* actual rate is highest bit of the ANDed rates */
7087 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7088
7089 if (rate & 2)
7090 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7091 else if (rate & 1)
7092 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7093 }
7094 if (ppd->link_speed_active == 0) {
7095 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7096 __func__, (int)remote_tx_rate);
7097 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7098 }
7099
7100 /*
7101 * Cache the values of the supported, enabled, and active
7102 * LTP CRC modes to return in 'portinfo' queries. But the bit
7103 * flags that are returned in the portinfo query differ from
7104 * what's in the link_crc_mask, crc_sizes, and crc_val
7105 * variables. Convert these here.
7106 */
7107 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7108 /* supported crc modes */
7109 ppd->port_ltp_crc_mode |=
7110 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7111 /* enabled crc modes */
7112 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7113 /* active crc mode */
7114
7115 /* set up the remote credit return table */
7116 assign_remote_cm_au_table(dd, vcu);
7117
7118 /*
7119 * The LCB is reset on entry to handle_verify_cap(), so this must
7120 * be applied on every link up.
7121 *
7122 * Adjust LCB error kill enable to kill the link if
7123 * these RBUF errors are seen:
7124 * REPLAY_BUF_MBE_SMASK
7125 * FLIT_INPUT_BUF_MBE_SMASK
7126 */
7127 if (is_ax(dd)) { /* fixed in B0 */
7128 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7129 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7130 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7131 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7132 }
7133
7134 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7135 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7136
7137 /* give 8051 access to the LCB CSRs */
7138 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7139 set_8051_lcb_access(dd);
7140
7141 ppd->neighbor_guid =
7142 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7143 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7144 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7145 ppd->neighbor_type =
7146 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7147 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7148 ppd->neighbor_fm_security =
7149 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7150 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7151 dd_dev_info(dd,
7152 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7153 ppd->neighbor_guid, ppd->neighbor_type,
7154 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7155 if (ppd->mgmt_allowed)
7156 add_full_mgmt_pkey(ppd);
7157
7158 /* tell the 8051 to go to LinkUp */
7159 set_link_state(ppd, HLS_GOING_UP);
7160 }
7161
7162 /*
7163 * Apply the link width downgrade enabled policy against the current active
7164 * link widths.
7165 *
7166 * Called when the enabled policy changes or the active link widths change.
7167 */
7168 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7169 {
7170 int do_bounce = 0;
7171 int tries;
7172 u16 lwde;
7173 u16 tx, rx;
7174
7175 /* use the hls lock to avoid a race with actual link up */
7176 tries = 0;
7177 retry:
7178 mutex_lock(&ppd->hls_lock);
7179 /* only apply if the link is up */
7180 if (!(ppd->host_link_state & HLS_UP)) {
7181 /* still going up..wait and retry */
7182 if (ppd->host_link_state & HLS_GOING_UP) {
7183 if (++tries < 1000) {
7184 mutex_unlock(&ppd->hls_lock);
7185 usleep_range(100, 120); /* arbitrary */
7186 goto retry;
7187 }
7188 dd_dev_err(ppd->dd,
7189 "%s: giving up waiting for link state change\n",
7190 __func__);
7191 }
7192 goto done;
7193 }
7194
7195 lwde = ppd->link_width_downgrade_enabled;
7196
7197 if (refresh_widths) {
7198 get_link_widths(ppd->dd, &tx, &rx);
7199 ppd->link_width_downgrade_tx_active = tx;
7200 ppd->link_width_downgrade_rx_active = rx;
7201 }
7202
7203 if (lwde == 0) {
7204 /* downgrade is disabled */
7205
7206 /* bounce if not at starting active width */
7207 if ((ppd->link_width_active !=
7208 ppd->link_width_downgrade_tx_active)
7209 || (ppd->link_width_active !=
7210 ppd->link_width_downgrade_rx_active)) {
7211 dd_dev_err(ppd->dd,
7212 "Link downgrade is disabled and link has downgraded, downing link\n");
7213 dd_dev_err(ppd->dd,
7214 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7215 ppd->link_width_active,
7216 ppd->link_width_downgrade_tx_active,
7217 ppd->link_width_downgrade_rx_active);
7218 do_bounce = 1;
7219 }
7220 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
7221 || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7222 /* Tx or Rx is outside the enabled policy */
7223 dd_dev_err(ppd->dd,
7224 "Link is outside of downgrade allowed, downing link\n");
7225 dd_dev_err(ppd->dd,
7226 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7227 lwde,
7228 ppd->link_width_downgrade_tx_active,
7229 ppd->link_width_downgrade_rx_active);
7230 do_bounce = 1;
7231 }
7232
7233 done:
7234 mutex_unlock(&ppd->hls_lock);
7235
7236 if (do_bounce) {
7237 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7238 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7239 set_link_state(ppd, HLS_DN_OFFLINE);
7240 start_link(ppd);
7241 }
7242 }
7243
7244 /*
7245 * Handle a link downgrade interrupt from the 8051.
7246 *
7247 * This is a work-queue function outside of the interrupt.
7248 */
7249 void handle_link_downgrade(struct work_struct *work)
7250 {
7251 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7252 link_downgrade_work);
7253
7254 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7255 apply_link_downgrade_policy(ppd, 1);
7256 }
7257
7258 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7259 {
7260 return flag_string(buf, buf_len, flags, dcc_err_flags,
7261 ARRAY_SIZE(dcc_err_flags));
7262 }
7263
7264 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7265 {
7266 return flag_string(buf, buf_len, flags, lcb_err_flags,
7267 ARRAY_SIZE(lcb_err_flags));
7268 }
7269
7270 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7271 {
7272 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7273 ARRAY_SIZE(dc8051_err_flags));
7274 }
7275
7276 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7277 {
7278 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7279 ARRAY_SIZE(dc8051_info_err_flags));
7280 }
7281
7282 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7283 {
7284 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7285 ARRAY_SIZE(dc8051_info_host_msg_flags));
7286 }
7287
7288 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7289 {
7290 struct hfi1_pportdata *ppd = dd->pport;
7291 u64 info, err, host_msg;
7292 int queue_link_down = 0;
7293 char buf[96];
7294
7295 /* look at the flags */
7296 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7297 /* 8051 information set by firmware */
7298 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7299 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7300 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7301 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7302 host_msg = (info >>
7303 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7304 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7305
7306 /*
7307 * Handle error flags.
7308 */
7309 if (err & FAILED_LNI) {
7310 /*
7311 * LNI error indications are cleared by the 8051
7312 * only when starting polling. Only pay attention
7313 * to them when in the states that occur during
7314 * LNI.
7315 */
7316 if (ppd->host_link_state
7317 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7318 queue_link_down = 1;
7319 dd_dev_info(dd, "Link error: %s\n",
7320 dc8051_info_err_string(buf,
7321 sizeof(buf),
7322 err & FAILED_LNI));
7323 }
7324 err &= ~(u64)FAILED_LNI;
7325 }
7326 /* unknown frames can happen durning LNI, just count */
7327 if (err & UNKNOWN_FRAME) {
7328 ppd->unknown_frame_count++;
7329 err &= ~(u64)UNKNOWN_FRAME;
7330 }
7331 if (err) {
7332 /* report remaining errors, but do not do anything */
7333 dd_dev_err(dd, "8051 info error: %s\n",
7334 dc8051_info_err_string(buf, sizeof(buf), err));
7335 }
7336
7337 /*
7338 * Handle host message flags.
7339 */
7340 if (host_msg & HOST_REQ_DONE) {
7341 /*
7342 * Presently, the driver does a busy wait for
7343 * host requests to complete. This is only an
7344 * informational message.
7345 * NOTE: The 8051 clears the host message
7346 * information *on the next 8051 command*.
7347 * Therefore, when linkup is achieved,
7348 * this flag will still be set.
7349 */
7350 host_msg &= ~(u64)HOST_REQ_DONE;
7351 }
7352 if (host_msg & BC_SMA_MSG) {
7353 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7354 host_msg &= ~(u64)BC_SMA_MSG;
7355 }
7356 if (host_msg & LINKUP_ACHIEVED) {
7357 dd_dev_info(dd, "8051: Link up\n");
7358 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7359 host_msg &= ~(u64)LINKUP_ACHIEVED;
7360 }
7361 if (host_msg & EXT_DEVICE_CFG_REQ) {
7362 handle_8051_request(dd);
7363 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7364 }
7365 if (host_msg & VERIFY_CAP_FRAME) {
7366 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7367 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7368 }
7369 if (host_msg & LINK_GOING_DOWN) {
7370 const char *extra = "";
7371 /* no downgrade action needed if going down */
7372 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7373 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7374 extra = " (ignoring downgrade)";
7375 }
7376 dd_dev_info(dd, "8051: Link down%s\n", extra);
7377 queue_link_down = 1;
7378 host_msg &= ~(u64)LINK_GOING_DOWN;
7379 }
7380 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7381 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7382 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7383 }
7384 if (host_msg) {
7385 /* report remaining messages, but do not do anything */
7386 dd_dev_info(dd, "8051 info host message: %s\n",
7387 dc8051_info_host_msg_string(buf, sizeof(buf),
7388 host_msg));
7389 }
7390
7391 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7392 }
7393 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7394 /*
7395 * Lost the 8051 heartbeat. If this happens, we
7396 * receive constant interrupts about it. Disable
7397 * the interrupt after the first.
7398 */
7399 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7400 write_csr(dd, DC_DC8051_ERR_EN,
7401 read_csr(dd, DC_DC8051_ERR_EN)
7402 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7403
7404 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7405 }
7406 if (reg) {
7407 /* report the error, but do not do anything */
7408 dd_dev_err(dd, "8051 error: %s\n",
7409 dc8051_err_string(buf, sizeof(buf), reg));
7410 }
7411
7412 if (queue_link_down) {
7413 /* if the link is already going down or disabled, do not
7414 * queue another */
7415 if ((ppd->host_link_state
7416 & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
7417 || ppd->link_enabled == 0) {
7418 dd_dev_info(dd, "%s: not queuing link down\n",
7419 __func__);
7420 } else {
7421 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7422 }
7423 }
7424 }
7425
7426 static const char * const fm_config_txt[] = {
7427 [0] =
7428 "BadHeadDist: Distance violation between two head flits",
7429 [1] =
7430 "BadTailDist: Distance violation between two tail flits",
7431 [2] =
7432 "BadCtrlDist: Distance violation between two credit control flits",
7433 [3] =
7434 "BadCrdAck: Credits return for unsupported VL",
7435 [4] =
7436 "UnsupportedVLMarker: Received VL Marker",
7437 [5] =
7438 "BadPreempt: Exceeded the preemption nesting level",
7439 [6] =
7440 "BadControlFlit: Received unsupported control flit",
7441 /* no 7 */
7442 [8] =
7443 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7444 };
7445
7446 static const char * const port_rcv_txt[] = {
7447 [1] =
7448 "BadPktLen: Illegal PktLen",
7449 [2] =
7450 "PktLenTooLong: Packet longer than PktLen",
7451 [3] =
7452 "PktLenTooShort: Packet shorter than PktLen",
7453 [4] =
7454 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7455 [5] =
7456 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7457 [6] =
7458 "BadL2: Illegal L2 opcode",
7459 [7] =
7460 "BadSC: Unsupported SC",
7461 [9] =
7462 "BadRC: Illegal RC",
7463 [11] =
7464 "PreemptError: Preempting with same VL",
7465 [12] =
7466 "PreemptVL15: Preempting a VL15 packet",
7467 };
7468
7469 #define OPA_LDR_FMCONFIG_OFFSET 16
7470 #define OPA_LDR_PORTRCV_OFFSET 0
7471 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7472 {
7473 u64 info, hdr0, hdr1;
7474 const char *extra;
7475 char buf[96];
7476 struct hfi1_pportdata *ppd = dd->pport;
7477 u8 lcl_reason = 0;
7478 int do_bounce = 0;
7479
7480 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7481 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7482 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7483 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7484 /* set status bit */
7485 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7486 }
7487 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7488 }
7489
7490 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7491 struct hfi1_pportdata *ppd = dd->pport;
7492 /* this counter saturates at (2^32) - 1 */
7493 if (ppd->link_downed < (u32)UINT_MAX)
7494 ppd->link_downed++;
7495 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7496 }
7497
7498 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7499 u8 reason_valid = 1;
7500
7501 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7502 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7503 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7504 /* set status bit */
7505 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7506 }
7507 switch (info) {
7508 case 0:
7509 case 1:
7510 case 2:
7511 case 3:
7512 case 4:
7513 case 5:
7514 case 6:
7515 extra = fm_config_txt[info];
7516 break;
7517 case 8:
7518 extra = fm_config_txt[info];
7519 if (ppd->port_error_action &
7520 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7521 do_bounce = 1;
7522 /*
7523 * lcl_reason cannot be derived from info
7524 * for this error
7525 */
7526 lcl_reason =
7527 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7528 }
7529 break;
7530 default:
7531 reason_valid = 0;
7532 snprintf(buf, sizeof(buf), "reserved%lld", info);
7533 extra = buf;
7534 break;
7535 }
7536
7537 if (reason_valid && !do_bounce) {
7538 do_bounce = ppd->port_error_action &
7539 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7540 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7541 }
7542
7543 /* just report this */
7544 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7545 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7546 }
7547
7548 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7549 u8 reason_valid = 1;
7550
7551 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7552 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7553 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7554 if (!(dd->err_info_rcvport.status_and_code &
7555 OPA_EI_STATUS_SMASK)) {
7556 dd->err_info_rcvport.status_and_code =
7557 info & OPA_EI_CODE_SMASK;
7558 /* set status bit */
7559 dd->err_info_rcvport.status_and_code |=
7560 OPA_EI_STATUS_SMASK;
7561 /* save first 2 flits in the packet that caused
7562 * the error */
7563 dd->err_info_rcvport.packet_flit1 = hdr0;
7564 dd->err_info_rcvport.packet_flit2 = hdr1;
7565 }
7566 switch (info) {
7567 case 1:
7568 case 2:
7569 case 3:
7570 case 4:
7571 case 5:
7572 case 6:
7573 case 7:
7574 case 9:
7575 case 11:
7576 case 12:
7577 extra = port_rcv_txt[info];
7578 break;
7579 default:
7580 reason_valid = 0;
7581 snprintf(buf, sizeof(buf), "reserved%lld", info);
7582 extra = buf;
7583 break;
7584 }
7585
7586 if (reason_valid && !do_bounce) {
7587 do_bounce = ppd->port_error_action &
7588 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7589 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7590 }
7591
7592 /* just report this */
7593 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7594 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7595 hdr0, hdr1);
7596
7597 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7598 }
7599
7600 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7601 /* informative only */
7602 dd_dev_info(dd, "8051 access to LCB blocked\n");
7603 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7604 }
7605 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7606 /* informative only */
7607 dd_dev_info(dd, "host access to LCB blocked\n");
7608 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7609 }
7610
7611 /* report any remaining errors */
7612 if (reg)
7613 dd_dev_info(dd, "DCC Error: %s\n",
7614 dcc_err_string(buf, sizeof(buf), reg));
7615
7616 if (lcl_reason == 0)
7617 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7618
7619 if (do_bounce) {
7620 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7621 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7622 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7623 }
7624 }
7625
7626 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7627 {
7628 char buf[96];
7629
7630 dd_dev_info(dd, "LCB Error: %s\n",
7631 lcb_err_string(buf, sizeof(buf), reg));
7632 }
7633
7634 /*
7635 * CCE block DC interrupt. Source is < 8.
7636 */
7637 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7638 {
7639 const struct err_reg_info *eri = &dc_errs[source];
7640
7641 if (eri->handler) {
7642 interrupt_clear_down(dd, 0, eri);
7643 } else if (source == 3 /* dc_lbm_int */) {
7644 /*
7645 * This indicates that a parity error has occurred on the
7646 * address/control lines presented to the LBM. The error
7647 * is a single pulse, there is no associated error flag,
7648 * and it is non-maskable. This is because if a parity
7649 * error occurs on the request the request is dropped.
7650 * This should never occur, but it is nice to know if it
7651 * ever does.
7652 */
7653 dd_dev_err(dd, "Parity error in DC LBM block\n");
7654 } else {
7655 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7656 }
7657 }
7658
7659 /*
7660 * TX block send credit interrupt. Source is < 160.
7661 */
7662 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7663 {
7664 sc_group_release_update(dd, source);
7665 }
7666
7667 /*
7668 * TX block SDMA interrupt. Source is < 48.
7669 *
7670 * SDMA interrupts are grouped by type:
7671 *
7672 * 0 - N-1 = SDma
7673 * N - 2N-1 = SDmaProgress
7674 * 2N - 3N-1 = SDmaIdle
7675 */
7676 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7677 {
7678 /* what interrupt */
7679 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7680 /* which engine */
7681 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7682
7683 #ifdef CONFIG_SDMA_VERBOSITY
7684 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7685 slashstrip(__FILE__), __LINE__, __func__);
7686 sdma_dumpstate(&dd->per_sdma[which]);
7687 #endif
7688
7689 if (likely(what < 3 && which < dd->num_sdma)) {
7690 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7691 } else {
7692 /* should not happen */
7693 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7694 }
7695 }
7696
7697 /*
7698 * RX block receive available interrupt. Source is < 160.
7699 */
7700 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7701 {
7702 struct hfi1_ctxtdata *rcd;
7703 char *err_detail;
7704
7705 if (likely(source < dd->num_rcv_contexts)) {
7706 rcd = dd->rcd[source];
7707 if (rcd) {
7708 if (source < dd->first_user_ctxt)
7709 rcd->do_interrupt(rcd, 0);
7710 else
7711 handle_user_interrupt(rcd);
7712 return; /* OK */
7713 }
7714 /* received an interrupt, but no rcd */
7715 err_detail = "dataless";
7716 } else {
7717 /* received an interrupt, but are not using that context */
7718 err_detail = "out of range";
7719 }
7720 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7721 err_detail, source);
7722 }
7723
7724 /*
7725 * RX block receive urgent interrupt. Source is < 160.
7726 */
7727 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7728 {
7729 struct hfi1_ctxtdata *rcd;
7730 char *err_detail;
7731
7732 if (likely(source < dd->num_rcv_contexts)) {
7733 rcd = dd->rcd[source];
7734 if (rcd) {
7735 /* only pay attention to user urgent interrupts */
7736 if (source >= dd->first_user_ctxt)
7737 handle_user_interrupt(rcd);
7738 return; /* OK */
7739 }
7740 /* received an interrupt, but no rcd */
7741 err_detail = "dataless";
7742 } else {
7743 /* received an interrupt, but are not using that context */
7744 err_detail = "out of range";
7745 }
7746 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7747 err_detail, source);
7748 }
7749
7750 /*
7751 * Reserved range interrupt. Should not be called in normal operation.
7752 */
7753 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7754 {
7755 char name[64];
7756
7757 dd_dev_err(dd, "unexpected %s interrupt\n",
7758 is_reserved_name(name, sizeof(name), source));
7759 }
7760
7761 static const struct is_table is_table[] = {
7762 /* start end
7763 name func interrupt func */
7764 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7765 is_misc_err_name, is_misc_err_int },
7766 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7767 is_sdma_eng_err_name, is_sdma_eng_err_int },
7768 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7769 is_sendctxt_err_name, is_sendctxt_err_int },
7770 { IS_SDMA_START, IS_SDMA_END,
7771 is_sdma_eng_name, is_sdma_eng_int },
7772 { IS_VARIOUS_START, IS_VARIOUS_END,
7773 is_various_name, is_various_int },
7774 { IS_DC_START, IS_DC_END,
7775 is_dc_name, is_dc_int },
7776 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7777 is_rcv_avail_name, is_rcv_avail_int },
7778 { IS_RCVURGENT_START, IS_RCVURGENT_END,
7779 is_rcv_urgent_name, is_rcv_urgent_int },
7780 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7781 is_send_credit_name, is_send_credit_int},
7782 { IS_RESERVED_START, IS_RESERVED_END,
7783 is_reserved_name, is_reserved_int},
7784 };
7785
7786 /*
7787 * Interrupt source interrupt - called when the given source has an interrupt.
7788 * Source is a bit index into an array of 64-bit integers.
7789 */
7790 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7791 {
7792 const struct is_table *entry;
7793
7794 /* avoids a double compare by walking the table in-order */
7795 for (entry = &is_table[0]; entry->is_name; entry++) {
7796 if (source < entry->end) {
7797 trace_hfi1_interrupt(dd, entry, source);
7798 entry->is_int(dd, source - entry->start);
7799 return;
7800 }
7801 }
7802 /* fell off the end */
7803 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7804 }
7805
7806 /*
7807 * General interrupt handler. This is able to correctly handle
7808 * all interrupts in case INTx is used.
7809 */
7810 static irqreturn_t general_interrupt(int irq, void *data)
7811 {
7812 struct hfi1_devdata *dd = data;
7813 u64 regs[CCE_NUM_INT_CSRS];
7814 u32 bit;
7815 int i;
7816
7817 this_cpu_inc(*dd->int_counter);
7818
7819 /* phase 1: scan and clear all handled interrupts */
7820 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7821 if (dd->gi_mask[i] == 0) {
7822 regs[i] = 0; /* used later */
7823 continue;
7824 }
7825 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7826 dd->gi_mask[i];
7827 /* only clear if anything is set */
7828 if (regs[i])
7829 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7830 }
7831
7832 /* phase 2: call the appropriate handler */
7833 for_each_set_bit(bit, (unsigned long *)&regs[0],
7834 CCE_NUM_INT_CSRS*64) {
7835 is_interrupt(dd, bit);
7836 }
7837
7838 return IRQ_HANDLED;
7839 }
7840
7841 static irqreturn_t sdma_interrupt(int irq, void *data)
7842 {
7843 struct sdma_engine *sde = data;
7844 struct hfi1_devdata *dd = sde->dd;
7845 u64 status;
7846
7847 #ifdef CONFIG_SDMA_VERBOSITY
7848 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
7849 slashstrip(__FILE__), __LINE__, __func__);
7850 sdma_dumpstate(sde);
7851 #endif
7852
7853 this_cpu_inc(*dd->int_counter);
7854
7855 /* This read_csr is really bad in the hot path */
7856 status = read_csr(dd,
7857 CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
7858 & sde->imask;
7859 if (likely(status)) {
7860 /* clear the interrupt(s) */
7861 write_csr(dd,
7862 CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
7863 status);
7864
7865 /* handle the interrupt(s) */
7866 sdma_engine_interrupt(sde, status);
7867 } else
7868 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
7869 sde->this_idx);
7870
7871 return IRQ_HANDLED;
7872 }
7873
7874 /*
7875 * Clear the receive interrupt, forcing the write and making sure
7876 * we have data from the chip, pushing everything in front of it
7877 * back to the host.
7878 */
7879 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
7880 {
7881 struct hfi1_devdata *dd = rcd->dd;
7882 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
7883
7884 mmiowb(); /* make sure everything before is written */
7885 write_csr(dd, addr, rcd->imask);
7886 /* force the above write on the chip and get a value back */
7887 (void)read_csr(dd, addr);
7888 }
7889
7890 /* force the receive interrupt */
7891 void force_recv_intr(struct hfi1_ctxtdata *rcd)
7892 {
7893 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
7894 }
7895
7896 /* return non-zero if a packet is present */
7897 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
7898 {
7899 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
7900 return (rcd->seq_cnt ==
7901 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
7902
7903 /* else is RDMA rtail */
7904 return (rcd->head != get_rcvhdrtail(rcd));
7905 }
7906
7907 /*
7908 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
7909 * This routine will try to handle packets immediately (latency), but if
7910 * it finds too many, it will invoke the thread handler (bandwitdh). The
7911 * chip receive interupt is *not* cleared down until this or the thread (if
7912 * invoked) is finished. The intent is to avoid extra interrupts while we
7913 * are processing packets anyway.
7914 */
7915 static irqreturn_t receive_context_interrupt(int irq, void *data)
7916 {
7917 struct hfi1_ctxtdata *rcd = data;
7918 struct hfi1_devdata *dd = rcd->dd;
7919 int disposition;
7920 int present;
7921
7922 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
7923 this_cpu_inc(*dd->int_counter);
7924
7925 /* receive interrupt remains blocked while processing packets */
7926 disposition = rcd->do_interrupt(rcd, 0);
7927
7928 /*
7929 * Too many packets were seen while processing packets in this
7930 * IRQ handler. Invoke the handler thread. The receive interrupt
7931 * remains blocked.
7932 */
7933 if (disposition == RCV_PKT_LIMIT)
7934 return IRQ_WAKE_THREAD;
7935
7936 /*
7937 * The packet processor detected no more packets. Clear the receive
7938 * interrupt and recheck for a packet packet that may have arrived
7939 * after the previous check and interrupt clear. If a packet arrived,
7940 * force another interrupt.
7941 */
7942 clear_recv_intr(rcd);
7943 present = check_packet_present(rcd);
7944 if (present)
7945 force_recv_intr(rcd);
7946
7947 return IRQ_HANDLED;
7948 }
7949
7950 /*
7951 * Receive packet thread handler. This expects to be invoked with the
7952 * receive interrupt still blocked.
7953 */
7954 static irqreturn_t receive_context_thread(int irq, void *data)
7955 {
7956 struct hfi1_ctxtdata *rcd = data;
7957 int present;
7958
7959 /* receive interrupt is still blocked from the IRQ handler */
7960 (void)rcd->do_interrupt(rcd, 1);
7961
7962 /*
7963 * The packet processor will only return if it detected no more
7964 * packets. Hold IRQs here so we can safely clear the interrupt and
7965 * recheck for a packet that may have arrived after the previous
7966 * check and the interrupt clear. If a packet arrived, force another
7967 * interrupt.
7968 */
7969 local_irq_disable();
7970 clear_recv_intr(rcd);
7971 present = check_packet_present(rcd);
7972 if (present)
7973 force_recv_intr(rcd);
7974 local_irq_enable();
7975
7976 return IRQ_HANDLED;
7977 }
7978
7979 /* ========================================================================= */
7980
7981 u32 read_physical_state(struct hfi1_devdata *dd)
7982 {
7983 u64 reg;
7984
7985 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
7986 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
7987 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
7988 }
7989
7990 u32 read_logical_state(struct hfi1_devdata *dd)
7991 {
7992 u64 reg;
7993
7994 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
7995 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
7996 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
7997 }
7998
7999 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8000 {
8001 u64 reg;
8002
8003 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8004 /* clear current state, set new state */
8005 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8006 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8007 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8008 }
8009
8010 /*
8011 * Use the 8051 to read a LCB CSR.
8012 */
8013 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8014 {
8015 u32 regno;
8016 int ret;
8017
8018 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8019 if (acquire_lcb_access(dd, 0) == 0) {
8020 *data = read_csr(dd, addr);
8021 release_lcb_access(dd, 0);
8022 return 0;
8023 }
8024 return -EBUSY;
8025 }
8026
8027 /* register is an index of LCB registers: (offset - base) / 8 */
8028 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8029 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8030 if (ret != HCMD_SUCCESS)
8031 return -EBUSY;
8032 return 0;
8033 }
8034
8035 /*
8036 * Read an LCB CSR. Access may not be in host control, so check.
8037 * Return 0 on success, -EBUSY on failure.
8038 */
8039 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8040 {
8041 struct hfi1_pportdata *ppd = dd->pport;
8042
8043 /* if up, go through the 8051 for the value */
8044 if (ppd->host_link_state & HLS_UP)
8045 return read_lcb_via_8051(dd, addr, data);
8046 /* if going up or down, no access */
8047 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8048 return -EBUSY;
8049 /* otherwise, host has access */
8050 *data = read_csr(dd, addr);
8051 return 0;
8052 }
8053
8054 /*
8055 * Use the 8051 to write a LCB CSR.
8056 */
8057 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8058 {
8059 u32 regno;
8060 int ret;
8061
8062 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8063 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8064 if (acquire_lcb_access(dd, 0) == 0) {
8065 write_csr(dd, addr, data);
8066 release_lcb_access(dd, 0);
8067 return 0;
8068 }
8069 return -EBUSY;
8070 }
8071
8072 /* register is an index of LCB registers: (offset - base) / 8 */
8073 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8074 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8075 if (ret != HCMD_SUCCESS)
8076 return -EBUSY;
8077 return 0;
8078 }
8079
8080 /*
8081 * Write an LCB CSR. Access may not be in host control, so check.
8082 * Return 0 on success, -EBUSY on failure.
8083 */
8084 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8085 {
8086 struct hfi1_pportdata *ppd = dd->pport;
8087
8088 /* if up, go through the 8051 for the value */
8089 if (ppd->host_link_state & HLS_UP)
8090 return write_lcb_via_8051(dd, addr, data);
8091 /* if going up or down, no access */
8092 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8093 return -EBUSY;
8094 /* otherwise, host has access */
8095 write_csr(dd, addr, data);
8096 return 0;
8097 }
8098
8099 /*
8100 * Returns:
8101 * < 0 = Linux error, not able to get access
8102 * > 0 = 8051 command RETURN_CODE
8103 */
8104 static int do_8051_command(
8105 struct hfi1_devdata *dd,
8106 u32 type,
8107 u64 in_data,
8108 u64 *out_data)
8109 {
8110 u64 reg, completed;
8111 int return_code;
8112 unsigned long flags;
8113 unsigned long timeout;
8114
8115 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8116
8117 /*
8118 * Alternative to holding the lock for a long time:
8119 * - keep busy wait - have other users bounce off
8120 */
8121 spin_lock_irqsave(&dd->dc8051_lock, flags);
8122
8123 /* We can't send any commands to the 8051 if it's in reset */
8124 if (dd->dc_shutdown) {
8125 return_code = -ENODEV;
8126 goto fail;
8127 }
8128
8129 /*
8130 * If an 8051 host command timed out previously, then the 8051 is
8131 * stuck.
8132 *
8133 * On first timeout, attempt to reset and restart the entire DC
8134 * block (including 8051). (Is this too big of a hammer?)
8135 *
8136 * If the 8051 times out a second time, the reset did not bring it
8137 * back to healthy life. In that case, fail any subsequent commands.
8138 */
8139 if (dd->dc8051_timed_out) {
8140 if (dd->dc8051_timed_out > 1) {
8141 dd_dev_err(dd,
8142 "Previous 8051 host command timed out, skipping command %u\n",
8143 type);
8144 return_code = -ENXIO;
8145 goto fail;
8146 }
8147 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8148 dc_shutdown(dd);
8149 dc_start(dd);
8150 spin_lock_irqsave(&dd->dc8051_lock, flags);
8151 }
8152
8153 /*
8154 * If there is no timeout, then the 8051 command interface is
8155 * waiting for a command.
8156 */
8157
8158 /*
8159 * When writing a LCB CSR, out_data contains the full value to
8160 * to be written, while in_data contains the relative LCB
8161 * address in 7:0. Do the work here, rather than the caller,
8162 * of distrubting the write data to where it needs to go:
8163 *
8164 * Write data
8165 * 39:00 -> in_data[47:8]
8166 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8167 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8168 */
8169 if (type == HCMD_WRITE_LCB_CSR) {
8170 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8171 reg = ((((*out_data) >> 40) & 0xff) <<
8172 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8173 | ((((*out_data) >> 48) & 0xffff) <<
8174 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8175 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8176 }
8177
8178 /*
8179 * Do two writes: the first to stabilize the type and req_data, the
8180 * second to activate.
8181 */
8182 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8183 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8184 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8185 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8186 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8187 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8188 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8189
8190 /* wait for completion, alternate: interrupt */
8191 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8192 while (1) {
8193 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8194 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8195 if (completed)
8196 break;
8197 if (time_after(jiffies, timeout)) {
8198 dd->dc8051_timed_out++;
8199 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8200 if (out_data)
8201 *out_data = 0;
8202 return_code = -ETIMEDOUT;
8203 goto fail;
8204 }
8205 udelay(2);
8206 }
8207
8208 if (out_data) {
8209 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8210 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8211 if (type == HCMD_READ_LCB_CSR) {
8212 /* top 16 bits are in a different register */
8213 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8214 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8215 << (48
8216 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8217 }
8218 }
8219 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8220 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8221 dd->dc8051_timed_out = 0;
8222 /*
8223 * Clear command for next user.
8224 */
8225 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8226
8227 fail:
8228 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8229
8230 return return_code;
8231 }
8232
8233 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8234 {
8235 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8236 }
8237
8238 static int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8239 u8 lane_id, u32 config_data)
8240 {
8241 u64 data;
8242 int ret;
8243
8244 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8245 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8246 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8247 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8248 if (ret != HCMD_SUCCESS) {
8249 dd_dev_err(dd,
8250 "load 8051 config: field id %d, lane %d, err %d\n",
8251 (int)field_id, (int)lane_id, ret);
8252 }
8253 return ret;
8254 }
8255
8256 /*
8257 * Read the 8051 firmware "registers". Use the RAM directly. Always
8258 * set the result, even on error.
8259 * Return 0 on success, -errno on failure
8260 */
8261 static int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8262 u32 *result)
8263 {
8264 u64 big_data;
8265 u32 addr;
8266 int ret;
8267
8268 /* address start depends on the lane_id */
8269 if (lane_id < 4)
8270 addr = (4 * NUM_GENERAL_FIELDS)
8271 + (lane_id * 4 * NUM_LANE_FIELDS);
8272 else
8273 addr = 0;
8274 addr += field_id * 4;
8275
8276 /* read is in 8-byte chunks, hardware will truncate the address down */
8277 ret = read_8051_data(dd, addr, 8, &big_data);
8278
8279 if (ret == 0) {
8280 /* extract the 4 bytes we want */
8281 if (addr & 0x4)
8282 *result = (u32)(big_data >> 32);
8283 else
8284 *result = (u32)big_data;
8285 } else {
8286 *result = 0;
8287 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8288 __func__, lane_id, field_id);
8289 }
8290
8291 return ret;
8292 }
8293
8294 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8295 u8 continuous)
8296 {
8297 u32 frame;
8298
8299 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8300 | power_management << POWER_MANAGEMENT_SHIFT;
8301 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8302 GENERAL_CONFIG, frame);
8303 }
8304
8305 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8306 u16 vl15buf, u8 crc_sizes)
8307 {
8308 u32 frame;
8309
8310 frame = (u32)vau << VAU_SHIFT
8311 | (u32)z << Z_SHIFT
8312 | (u32)vcu << VCU_SHIFT
8313 | (u32)vl15buf << VL15BUF_SHIFT
8314 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8315 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8316 GENERAL_CONFIG, frame);
8317 }
8318
8319 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8320 u8 *flag_bits, u16 *link_widths)
8321 {
8322 u32 frame;
8323
8324 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8325 &frame);
8326 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8327 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8328 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8329 }
8330
8331 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8332 u8 misc_bits,
8333 u8 flag_bits,
8334 u16 link_widths)
8335 {
8336 u32 frame;
8337
8338 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8339 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8340 | (u32)link_widths << LINK_WIDTH_SHIFT;
8341 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8342 frame);
8343 }
8344
8345 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8346 u8 device_rev)
8347 {
8348 u32 frame;
8349
8350 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8351 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8352 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8353 }
8354
8355 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8356 u8 *device_rev)
8357 {
8358 u32 frame;
8359
8360 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8361 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8362 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8363 & REMOTE_DEVICE_REV_MASK;
8364 }
8365
8366 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8367 {
8368 u32 frame;
8369
8370 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8371 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8372 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8373 }
8374
8375 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8376 u8 *continuous)
8377 {
8378 u32 frame;
8379
8380 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8381 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8382 & POWER_MANAGEMENT_MASK;
8383 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8384 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8385 }
8386
8387 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8388 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8389 {
8390 u32 frame;
8391
8392 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8393 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8394 *z = (frame >> Z_SHIFT) & Z_MASK;
8395 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8396 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8397 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8398 }
8399
8400 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8401 u8 *remote_tx_rate,
8402 u16 *link_widths)
8403 {
8404 u32 frame;
8405
8406 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8407 &frame);
8408 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8409 & REMOTE_TX_RATE_MASK;
8410 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8411 }
8412
8413 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8414 {
8415 u32 frame;
8416
8417 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8418 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8419 }
8420
8421 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8422 {
8423 u32 frame;
8424
8425 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8426 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8427 }
8428
8429 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8430 {
8431 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8432 }
8433
8434 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8435 {
8436 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8437 }
8438
8439 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8440 {
8441 u32 frame;
8442 int ret;
8443
8444 *link_quality = 0;
8445 if (dd->pport->host_link_state & HLS_UP) {
8446 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8447 &frame);
8448 if (ret == 0)
8449 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8450 & LINK_QUALITY_MASK;
8451 }
8452 }
8453
8454 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8455 {
8456 u32 frame;
8457
8458 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8459 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8460 }
8461
8462 static int read_tx_settings(struct hfi1_devdata *dd,
8463 u8 *enable_lane_tx,
8464 u8 *tx_polarity_inversion,
8465 u8 *rx_polarity_inversion,
8466 u8 *max_rate)
8467 {
8468 u32 frame;
8469 int ret;
8470
8471 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8472 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8473 & ENABLE_LANE_TX_MASK;
8474 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8475 & TX_POLARITY_INVERSION_MASK;
8476 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8477 & RX_POLARITY_INVERSION_MASK;
8478 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8479 return ret;
8480 }
8481
8482 static int write_tx_settings(struct hfi1_devdata *dd,
8483 u8 enable_lane_tx,
8484 u8 tx_polarity_inversion,
8485 u8 rx_polarity_inversion,
8486 u8 max_rate)
8487 {
8488 u32 frame;
8489
8490 /* no need to mask, all variable sizes match field widths */
8491 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8492 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8493 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8494 | max_rate << MAX_RATE_SHIFT;
8495 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8496 }
8497
8498 static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8499 {
8500 u32 frame, version, prod_id;
8501 int ret, lane;
8502
8503 /* 4 lanes */
8504 for (lane = 0; lane < 4; lane++) {
8505 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8506 if (ret) {
8507 dd_dev_err(
8508 dd,
8509 "Unable to read lane %d firmware details\n",
8510 lane);
8511 continue;
8512 }
8513 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8514 & SPICO_ROM_VERSION_MASK;
8515 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8516 & SPICO_ROM_PROD_ID_MASK;
8517 dd_dev_info(dd,
8518 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8519 lane, version, prod_id);
8520 }
8521 }
8522
8523 /*
8524 * Read an idle LCB message.
8525 *
8526 * Returns 0 on success, -EINVAL on error
8527 */
8528 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8529 {
8530 int ret;
8531
8532 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8533 type, data_out);
8534 if (ret != HCMD_SUCCESS) {
8535 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8536 (u32)type, ret);
8537 return -EINVAL;
8538 }
8539 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8540 /* return only the payload as we already know the type */
8541 *data_out >>= IDLE_PAYLOAD_SHIFT;
8542 return 0;
8543 }
8544
8545 /*
8546 * Read an idle SMA message. To be done in response to a notification from
8547 * the 8051.
8548 *
8549 * Returns 0 on success, -EINVAL on error
8550 */
8551 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8552 {
8553 return read_idle_message(dd,
8554 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8555 }
8556
8557 /*
8558 * Send an idle LCB message.
8559 *
8560 * Returns 0 on success, -EINVAL on error
8561 */
8562 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8563 {
8564 int ret;
8565
8566 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8567 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8568 if (ret != HCMD_SUCCESS) {
8569 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8570 data, ret);
8571 return -EINVAL;
8572 }
8573 return 0;
8574 }
8575
8576 /*
8577 * Send an idle SMA message.
8578 *
8579 * Returns 0 on success, -EINVAL on error
8580 */
8581 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8582 {
8583 u64 data;
8584
8585 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8586 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8587 return send_idle_message(dd, data);
8588 }
8589
8590 /*
8591 * Initialize the LCB then do a quick link up. This may or may not be
8592 * in loopback.
8593 *
8594 * return 0 on success, -errno on error
8595 */
8596 static int do_quick_linkup(struct hfi1_devdata *dd)
8597 {
8598 u64 reg;
8599 unsigned long timeout;
8600 int ret;
8601
8602 lcb_shutdown(dd, 0);
8603
8604 if (loopback) {
8605 /* LCB_CFG_LOOPBACK.VAL = 2 */
8606 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8607 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8608 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8609 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8610 }
8611
8612 /* start the LCBs */
8613 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8614 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8615
8616 /* simulator only loopback steps */
8617 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8618 /* LCB_CFG_RUN.EN = 1 */
8619 write_csr(dd, DC_LCB_CFG_RUN,
8620 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8621
8622 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8623 timeout = jiffies + msecs_to_jiffies(10);
8624 while (1) {
8625 reg = read_csr(dd,
8626 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8627 if (reg)
8628 break;
8629 if (time_after(jiffies, timeout)) {
8630 dd_dev_err(dd,
8631 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8632 return -ETIMEDOUT;
8633 }
8634 udelay(2);
8635 }
8636
8637 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8638 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8639 }
8640
8641 if (!loopback) {
8642 /*
8643 * When doing quick linkup and not in loopback, both
8644 * sides must be done with LCB set-up before either
8645 * starts the quick linkup. Put a delay here so that
8646 * both sides can be started and have a chance to be
8647 * done with LCB set up before resuming.
8648 */
8649 dd_dev_err(dd,
8650 "Pausing for peer to be finished with LCB set up\n");
8651 msleep(5000);
8652 dd_dev_err(dd,
8653 "Continuing with quick linkup\n");
8654 }
8655
8656 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8657 set_8051_lcb_access(dd);
8658
8659 /*
8660 * State "quick" LinkUp request sets the physical link state to
8661 * LinkUp without a verify capability sequence.
8662 * This state is in simulator v37 and later.
8663 */
8664 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8665 if (ret != HCMD_SUCCESS) {
8666 dd_dev_err(dd,
8667 "%s: set physical link state to quick LinkUp failed with return %d\n",
8668 __func__, ret);
8669
8670 set_host_lcb_access(dd);
8671 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8672
8673 if (ret >= 0)
8674 ret = -EINVAL;
8675 return ret;
8676 }
8677
8678 return 0; /* success */
8679 }
8680
8681 /*
8682 * Set the SerDes to internal loopback mode.
8683 * Returns 0 on success, -errno on error.
8684 */
8685 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8686 {
8687 int ret;
8688
8689 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8690 if (ret == HCMD_SUCCESS)
8691 return 0;
8692 dd_dev_err(dd,
8693 "Set physical link state to SerDes Loopback failed with return %d\n",
8694 ret);
8695 if (ret >= 0)
8696 ret = -EINVAL;
8697 return ret;
8698 }
8699
8700 /*
8701 * Do all special steps to set up loopback.
8702 */
8703 static int init_loopback(struct hfi1_devdata *dd)
8704 {
8705 dd_dev_info(dd, "Entering loopback mode\n");
8706
8707 /* all loopbacks should disable self GUID check */
8708 write_csr(dd, DC_DC8051_CFG_MODE,
8709 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8710
8711 /*
8712 * The simulator has only one loopback option - LCB. Switch
8713 * to that option, which includes quick link up.
8714 *
8715 * Accept all valid loopback values.
8716 */
8717 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8718 && (loopback == LOOPBACK_SERDES
8719 || loopback == LOOPBACK_LCB
8720 || loopback == LOOPBACK_CABLE)) {
8721 loopback = LOOPBACK_LCB;
8722 quick_linkup = 1;
8723 return 0;
8724 }
8725
8726 /* handle serdes loopback */
8727 if (loopback == LOOPBACK_SERDES) {
8728 /* internal serdes loopack needs quick linkup on RTL */
8729 if (dd->icode == ICODE_RTL_SILICON)
8730 quick_linkup = 1;
8731 return set_serdes_loopback_mode(dd);
8732 }
8733
8734 /* LCB loopback - handled at poll time */
8735 if (loopback == LOOPBACK_LCB) {
8736 quick_linkup = 1; /* LCB is always quick linkup */
8737
8738 /* not supported in emulation due to emulation RTL changes */
8739 if (dd->icode == ICODE_FPGA_EMULATION) {
8740 dd_dev_err(dd,
8741 "LCB loopback not supported in emulation\n");
8742 return -EINVAL;
8743 }
8744 return 0;
8745 }
8746
8747 /* external cable loopback requires no extra steps */
8748 if (loopback == LOOPBACK_CABLE)
8749 return 0;
8750
8751 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8752 return -EINVAL;
8753 }
8754
8755 /*
8756 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8757 * used in the Verify Capability link width attribute.
8758 */
8759 static u16 opa_to_vc_link_widths(u16 opa_widths)
8760 {
8761 int i;
8762 u16 result = 0;
8763
8764 static const struct link_bits {
8765 u16 from;
8766 u16 to;
8767 } opa_link_xlate[] = {
8768 { OPA_LINK_WIDTH_1X, 1 << (1-1) },
8769 { OPA_LINK_WIDTH_2X, 1 << (2-1) },
8770 { OPA_LINK_WIDTH_3X, 1 << (3-1) },
8771 { OPA_LINK_WIDTH_4X, 1 << (4-1) },
8772 };
8773
8774 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8775 if (opa_widths & opa_link_xlate[i].from)
8776 result |= opa_link_xlate[i].to;
8777 }
8778 return result;
8779 }
8780
8781 /*
8782 * Set link attributes before moving to polling.
8783 */
8784 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8785 {
8786 struct hfi1_devdata *dd = ppd->dd;
8787 u8 enable_lane_tx;
8788 u8 tx_polarity_inversion;
8789 u8 rx_polarity_inversion;
8790 int ret;
8791
8792 /* reset our fabric serdes to clear any lingering problems */
8793 fabric_serdes_reset(dd);
8794
8795 /* set the local tx rate - need to read-modify-write */
8796 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8797 &rx_polarity_inversion, &ppd->local_tx_rate);
8798 if (ret)
8799 goto set_local_link_attributes_fail;
8800
8801 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8802 /* set the tx rate to the fastest enabled */
8803 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8804 ppd->local_tx_rate = 1;
8805 else
8806 ppd->local_tx_rate = 0;
8807 } else {
8808 /* set the tx rate to all enabled */
8809 ppd->local_tx_rate = 0;
8810 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8811 ppd->local_tx_rate |= 2;
8812 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8813 ppd->local_tx_rate |= 1;
8814 }
8815
8816 enable_lane_tx = 0xF; /* enable all four lanes */
8817 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8818 rx_polarity_inversion, ppd->local_tx_rate);
8819 if (ret != HCMD_SUCCESS)
8820 goto set_local_link_attributes_fail;
8821
8822 /*
8823 * DC supports continuous updates.
8824 */
8825 ret = write_vc_local_phy(dd, 0 /* no power management */,
8826 1 /* continuous updates */);
8827 if (ret != HCMD_SUCCESS)
8828 goto set_local_link_attributes_fail;
8829
8830 /* z=1 in the next call: AU of 0 is not supported by the hardware */
8831 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
8832 ppd->port_crc_mode_enabled);
8833 if (ret != HCMD_SUCCESS)
8834 goto set_local_link_attributes_fail;
8835
8836 ret = write_vc_local_link_width(dd, 0, 0,
8837 opa_to_vc_link_widths(ppd->link_width_enabled));
8838 if (ret != HCMD_SUCCESS)
8839 goto set_local_link_attributes_fail;
8840
8841 /* let peer know who we are */
8842 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
8843 if (ret == HCMD_SUCCESS)
8844 return 0;
8845
8846 set_local_link_attributes_fail:
8847 dd_dev_err(dd,
8848 "Failed to set local link attributes, return 0x%x\n",
8849 ret);
8850 return ret;
8851 }
8852
8853 /*
8854 * Call this to start the link. Schedule a retry if the cable is not
8855 * present or if unable to start polling. Do not do anything if the
8856 * link is disabled. Returns 0 if link is disabled or moved to polling
8857 */
8858 int start_link(struct hfi1_pportdata *ppd)
8859 {
8860 if (!ppd->link_enabled) {
8861 dd_dev_info(ppd->dd,
8862 "%s: stopping link start because link is disabled\n",
8863 __func__);
8864 return 0;
8865 }
8866 if (!ppd->driver_link_ready) {
8867 dd_dev_info(ppd->dd,
8868 "%s: stopping link start because driver is not ready\n",
8869 __func__);
8870 return 0;
8871 }
8872
8873 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
8874 loopback == LOOPBACK_LCB ||
8875 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8876 return set_link_state(ppd, HLS_DN_POLL);
8877
8878 dd_dev_info(ppd->dd,
8879 "%s: stopping link start because no cable is present\n",
8880 __func__);
8881 return -EAGAIN;
8882 }
8883
8884 static void reset_qsfp(struct hfi1_pportdata *ppd)
8885 {
8886 struct hfi1_devdata *dd = ppd->dd;
8887 u64 mask, qsfp_mask;
8888
8889 mask = (u64)QSFP_HFI0_RESET_N;
8890 qsfp_mask = read_csr(dd,
8891 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
8892 qsfp_mask |= mask;
8893 write_csr(dd,
8894 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE,
8895 qsfp_mask);
8896
8897 qsfp_mask = read_csr(dd,
8898 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
8899 qsfp_mask &= ~mask;
8900 write_csr(dd,
8901 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
8902 qsfp_mask);
8903
8904 udelay(10);
8905
8906 qsfp_mask |= mask;
8907 write_csr(dd,
8908 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
8909 qsfp_mask);
8910 }
8911
8912 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
8913 u8 *qsfp_interrupt_status)
8914 {
8915 struct hfi1_devdata *dd = ppd->dd;
8916
8917 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
8918 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
8919 dd_dev_info(dd,
8920 "%s: QSFP cable on fire\n",
8921 __func__);
8922
8923 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
8924 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
8925 dd_dev_info(dd,
8926 "%s: QSFP cable temperature too low\n",
8927 __func__);
8928
8929 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
8930 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
8931 dd_dev_info(dd,
8932 "%s: QSFP supply voltage too high\n",
8933 __func__);
8934
8935 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
8936 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
8937 dd_dev_info(dd,
8938 "%s: QSFP supply voltage too low\n",
8939 __func__);
8940
8941 /* Byte 2 is vendor specific */
8942
8943 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
8944 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
8945 dd_dev_info(dd,
8946 "%s: Cable RX channel 1/2 power too high\n",
8947 __func__);
8948
8949 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
8950 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
8951 dd_dev_info(dd,
8952 "%s: Cable RX channel 1/2 power too low\n",
8953 __func__);
8954
8955 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
8956 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
8957 dd_dev_info(dd,
8958 "%s: Cable RX channel 3/4 power too high\n",
8959 __func__);
8960
8961 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
8962 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
8963 dd_dev_info(dd,
8964 "%s: Cable RX channel 3/4 power too low\n",
8965 __func__);
8966
8967 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
8968 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
8969 dd_dev_info(dd,
8970 "%s: Cable TX channel 1/2 bias too high\n",
8971 __func__);
8972
8973 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
8974 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
8975 dd_dev_info(dd,
8976 "%s: Cable TX channel 1/2 bias too low\n",
8977 __func__);
8978
8979 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
8980 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
8981 dd_dev_info(dd,
8982 "%s: Cable TX channel 3/4 bias too high\n",
8983 __func__);
8984
8985 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
8986 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
8987 dd_dev_info(dd,
8988 "%s: Cable TX channel 3/4 bias too low\n",
8989 __func__);
8990
8991 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
8992 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
8993 dd_dev_info(dd,
8994 "%s: Cable TX channel 1/2 power too high\n",
8995 __func__);
8996
8997 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
8998 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
8999 dd_dev_info(dd,
9000 "%s: Cable TX channel 1/2 power too low\n",
9001 __func__);
9002
9003 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9004 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9005 dd_dev_info(dd,
9006 "%s: Cable TX channel 3/4 power too high\n",
9007 __func__);
9008
9009 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9010 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9011 dd_dev_info(dd,
9012 "%s: Cable TX channel 3/4 power too low\n",
9013 __func__);
9014
9015 /* Bytes 9-10 and 11-12 are reserved */
9016 /* Bytes 13-15 are vendor specific */
9017
9018 return 0;
9019 }
9020
9021 static int do_pre_lni_host_behaviors(struct hfi1_pportdata *ppd)
9022 {
9023 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
9024
9025 return 0;
9026 }
9027
9028 static int do_qsfp_intr_fallback(struct hfi1_pportdata *ppd)
9029 {
9030 struct hfi1_devdata *dd = ppd->dd;
9031 u8 qsfp_interrupt_status = 0;
9032
9033 if (qsfp_read(ppd, dd->hfi1_id, 2, &qsfp_interrupt_status, 1)
9034 != 1) {
9035 dd_dev_info(dd,
9036 "%s: Failed to read status of QSFP module\n",
9037 __func__);
9038 return -EIO;
9039 }
9040
9041 /* We don't care about alarms & warnings with a non-functional INT_N */
9042 if (!(qsfp_interrupt_status & QSFP_DATA_NOT_READY))
9043 do_pre_lni_host_behaviors(ppd);
9044
9045 return 0;
9046 }
9047
9048 /* This routine will only be scheduled if the QSFP module is present */
9049 static void qsfp_event(struct work_struct *work)
9050 {
9051 struct qsfp_data *qd;
9052 struct hfi1_pportdata *ppd;
9053 struct hfi1_devdata *dd;
9054
9055 qd = container_of(work, struct qsfp_data, qsfp_work);
9056 ppd = qd->ppd;
9057 dd = ppd->dd;
9058
9059 /* Sanity check */
9060 if (!qsfp_mod_present(ppd))
9061 return;
9062
9063 /*
9064 * Turn DC back on after cables has been
9065 * re-inserted. Up until now, the DC has been in
9066 * reset to save power.
9067 */
9068 dc_start(dd);
9069
9070 if (qd->cache_refresh_required) {
9071 msleep(3000);
9072 reset_qsfp(ppd);
9073
9074 /* Check for QSFP interrupt after t_init (SFF 8679)
9075 * + extra
9076 */
9077 msleep(3000);
9078 if (!qd->qsfp_interrupt_functional) {
9079 if (do_qsfp_intr_fallback(ppd) < 0)
9080 dd_dev_info(dd, "%s: QSFP fallback failed\n",
9081 __func__);
9082 ppd->driver_link_ready = 1;
9083 start_link(ppd);
9084 }
9085 }
9086
9087 if (qd->check_interrupt_flags) {
9088 u8 qsfp_interrupt_status[16] = {0,};
9089
9090 if (qsfp_read(ppd, dd->hfi1_id, 6,
9091 &qsfp_interrupt_status[0], 16) != 16) {
9092 dd_dev_info(dd,
9093 "%s: Failed to read status of QSFP module\n",
9094 __func__);
9095 } else {
9096 unsigned long flags;
9097 u8 data_status;
9098
9099 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9100 ppd->qsfp_info.check_interrupt_flags = 0;
9101 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9102 flags);
9103
9104 if (qsfp_read(ppd, dd->hfi1_id, 2, &data_status, 1)
9105 != 1) {
9106 dd_dev_info(dd,
9107 "%s: Failed to read status of QSFP module\n",
9108 __func__);
9109 }
9110 if (!(data_status & QSFP_DATA_NOT_READY)) {
9111 do_pre_lni_host_behaviors(ppd);
9112 start_link(ppd);
9113 } else
9114 handle_qsfp_error_conditions(ppd,
9115 qsfp_interrupt_status);
9116 }
9117 }
9118 }
9119
9120 void init_qsfp(struct hfi1_pportdata *ppd)
9121 {
9122 struct hfi1_devdata *dd = ppd->dd;
9123 u64 qsfp_mask;
9124
9125 if (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9126 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9127 ppd->driver_link_ready = 1;
9128 return;
9129 }
9130
9131 ppd->qsfp_info.ppd = ppd;
9132 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
9133
9134 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9135 /* Clear current status to avoid spurious interrupts */
9136 write_csr(dd,
9137 dd->hfi1_id ?
9138 ASIC_QSFP2_CLEAR :
9139 ASIC_QSFP1_CLEAR,
9140 qsfp_mask);
9141
9142 /* Handle active low nature of INT_N and MODPRST_N pins */
9143 if (qsfp_mod_present(ppd))
9144 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9145 write_csr(dd,
9146 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9147 qsfp_mask);
9148
9149 /* Allow only INT_N and MODPRST_N to trigger QSFP interrupts */
9150 qsfp_mask |= (u64)QSFP_HFI0_MODPRST_N;
9151 write_csr(dd,
9152 dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9153 qsfp_mask);
9154
9155 if (qsfp_mod_present(ppd)) {
9156 msleep(3000);
9157 reset_qsfp(ppd);
9158
9159 /* Check for QSFP interrupt after t_init (SFF 8679)
9160 * + extra
9161 */
9162 msleep(3000);
9163 if (!ppd->qsfp_info.qsfp_interrupt_functional) {
9164 if (do_qsfp_intr_fallback(ppd) < 0)
9165 dd_dev_info(dd,
9166 "%s: QSFP fallback failed\n",
9167 __func__);
9168 ppd->driver_link_ready = 1;
9169 }
9170 }
9171 }
9172
9173 /*
9174 * Do a one-time initialize of the LCB block.
9175 */
9176 static void init_lcb(struct hfi1_devdata *dd)
9177 {
9178 /* the DC has been reset earlier in the driver load */
9179
9180 /* set LCB for cclk loopback on the port */
9181 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9182 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9183 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9184 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9185 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9186 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9187 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9188 }
9189
9190 int bringup_serdes(struct hfi1_pportdata *ppd)
9191 {
9192 struct hfi1_devdata *dd = ppd->dd;
9193 u64 guid;
9194 int ret;
9195
9196 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9197 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9198
9199 guid = ppd->guid;
9200 if (!guid) {
9201 if (dd->base_guid)
9202 guid = dd->base_guid + ppd->port - 1;
9203 ppd->guid = guid;
9204 }
9205
9206 /* the link defaults to enabled */
9207 ppd->link_enabled = 1;
9208 /* Set linkinit_reason on power up per OPA spec */
9209 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9210
9211 /* one-time init of the LCB */
9212 init_lcb(dd);
9213
9214 if (loopback) {
9215 ret = init_loopback(dd);
9216 if (ret < 0)
9217 return ret;
9218 }
9219
9220 return start_link(ppd);
9221 }
9222
9223 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9224 {
9225 struct hfi1_devdata *dd = ppd->dd;
9226
9227 /*
9228 * Shut down the link and keep it down. First turn off that the
9229 * driver wants to allow the link to be up (driver_link_ready).
9230 * Then make sure the link is not automatically restarted
9231 * (link_enabled). Cancel any pending restart. And finally
9232 * go offline.
9233 */
9234 ppd->driver_link_ready = 0;
9235 ppd->link_enabled = 0;
9236
9237 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9238 OPA_LINKDOWN_REASON_SMA_DISABLED);
9239 set_link_state(ppd, HLS_DN_OFFLINE);
9240
9241 /* disable the port */
9242 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9243 }
9244
9245 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9246 {
9247 struct hfi1_pportdata *ppd;
9248 int i;
9249
9250 ppd = (struct hfi1_pportdata *)(dd + 1);
9251 for (i = 0; i < dd->num_pports; i++, ppd++) {
9252 ppd->ibport_data.rvp.rc_acks = NULL;
9253 ppd->ibport_data.rvp.rc_qacks = NULL;
9254 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9255 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9256 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9257 if (!ppd->ibport_data.rvp.rc_acks ||
9258 !ppd->ibport_data.rvp.rc_delayed_comp ||
9259 !ppd->ibport_data.rvp.rc_qacks)
9260 return -ENOMEM;
9261 }
9262
9263 return 0;
9264 }
9265
9266 static const char * const pt_names[] = {
9267 "expected",
9268 "eager",
9269 "invalid"
9270 };
9271
9272 static const char *pt_name(u32 type)
9273 {
9274 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9275 }
9276
9277 /*
9278 * index is the index into the receive array
9279 */
9280 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9281 u32 type, unsigned long pa, u16 order)
9282 {
9283 u64 reg;
9284 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9285 (dd->kregbase + RCV_ARRAY));
9286
9287 if (!(dd->flags & HFI1_PRESENT))
9288 goto done;
9289
9290 if (type == PT_INVALID) {
9291 pa = 0;
9292 } else if (type > PT_INVALID) {
9293 dd_dev_err(dd,
9294 "unexpected receive array type %u for index %u, not handled\n",
9295 type, index);
9296 goto done;
9297 }
9298
9299 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9300 pt_name(type), index, pa, (unsigned long)order);
9301
9302 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9303 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9304 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9305 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9306 << RCV_ARRAY_RT_ADDR_SHIFT;
9307 writeq(reg, base + (index * 8));
9308
9309 if (type == PT_EAGER)
9310 /*
9311 * Eager entries are written one-by-one so we have to push them
9312 * after we write the entry.
9313 */
9314 flush_wc();
9315 done:
9316 return;
9317 }
9318
9319 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9320 {
9321 struct hfi1_devdata *dd = rcd->dd;
9322 u32 i;
9323
9324 /* this could be optimized */
9325 for (i = rcd->eager_base; i < rcd->eager_base +
9326 rcd->egrbufs.alloced; i++)
9327 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9328
9329 for (i = rcd->expected_base;
9330 i < rcd->expected_base + rcd->expected_count; i++)
9331 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9332 }
9333
9334 int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9335 struct hfi1_ctxt_info *kinfo)
9336 {
9337 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9338 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9339 return 0;
9340 }
9341
9342 struct hfi1_message_header *hfi1_get_msgheader(
9343 struct hfi1_devdata *dd, __le32 *rhf_addr)
9344 {
9345 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9346
9347 return (struct hfi1_message_header *)
9348 (rhf_addr - dd->rhf_offset + offset);
9349 }
9350
9351 static const char * const ib_cfg_name_strings[] = {
9352 "HFI1_IB_CFG_LIDLMC",
9353 "HFI1_IB_CFG_LWID_DG_ENB",
9354 "HFI1_IB_CFG_LWID_ENB",
9355 "HFI1_IB_CFG_LWID",
9356 "HFI1_IB_CFG_SPD_ENB",
9357 "HFI1_IB_CFG_SPD",
9358 "HFI1_IB_CFG_RXPOL_ENB",
9359 "HFI1_IB_CFG_LREV_ENB",
9360 "HFI1_IB_CFG_LINKLATENCY",
9361 "HFI1_IB_CFG_HRTBT",
9362 "HFI1_IB_CFG_OP_VLS",
9363 "HFI1_IB_CFG_VL_HIGH_CAP",
9364 "HFI1_IB_CFG_VL_LOW_CAP",
9365 "HFI1_IB_CFG_OVERRUN_THRESH",
9366 "HFI1_IB_CFG_PHYERR_THRESH",
9367 "HFI1_IB_CFG_LINKDEFAULT",
9368 "HFI1_IB_CFG_PKEYS",
9369 "HFI1_IB_CFG_MTU",
9370 "HFI1_IB_CFG_LSTATE",
9371 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9372 "HFI1_IB_CFG_PMA_TICKS",
9373 "HFI1_IB_CFG_PORT"
9374 };
9375
9376 static const char *ib_cfg_name(int which)
9377 {
9378 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9379 return "invalid";
9380 return ib_cfg_name_strings[which];
9381 }
9382
9383 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9384 {
9385 struct hfi1_devdata *dd = ppd->dd;
9386 int val = 0;
9387
9388 switch (which) {
9389 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9390 val = ppd->link_width_enabled;
9391 break;
9392 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9393 val = ppd->link_width_active;
9394 break;
9395 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9396 val = ppd->link_speed_enabled;
9397 break;
9398 case HFI1_IB_CFG_SPD: /* current Link speed */
9399 val = ppd->link_speed_active;
9400 break;
9401
9402 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9403 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9404 case HFI1_IB_CFG_LINKLATENCY:
9405 goto unimplemented;
9406
9407 case HFI1_IB_CFG_OP_VLS:
9408 val = ppd->vls_operational;
9409 break;
9410 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9411 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9412 break;
9413 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9414 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9415 break;
9416 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9417 val = ppd->overrun_threshold;
9418 break;
9419 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9420 val = ppd->phy_error_threshold;
9421 break;
9422 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9423 val = dd->link_default;
9424 break;
9425
9426 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9427 case HFI1_IB_CFG_PMA_TICKS:
9428 default:
9429 unimplemented:
9430 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9431 dd_dev_info(
9432 dd,
9433 "%s: which %s: not implemented\n",
9434 __func__,
9435 ib_cfg_name(which));
9436 break;
9437 }
9438
9439 return val;
9440 }
9441
9442 /*
9443 * The largest MAD packet size.
9444 */
9445 #define MAX_MAD_PACKET 2048
9446
9447 /*
9448 * Return the maximum header bytes that can go on the _wire_
9449 * for this device. This count includes the ICRC which is
9450 * not part of the packet held in memory but it is appended
9451 * by the HW.
9452 * This is dependent on the device's receive header entry size.
9453 * HFI allows this to be set per-receive context, but the
9454 * driver presently enforces a global value.
9455 */
9456 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9457 {
9458 /*
9459 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9460 * the Receive Header Entry Size minus the PBC (or RHF) size
9461 * plus one DW for the ICRC appended by HW.
9462 *
9463 * dd->rcd[0].rcvhdrqentsize is in DW.
9464 * We use rcd[0] as all context will have the same value. Also,
9465 * the first kernel context would have been allocated by now so
9466 * we are guaranteed a valid value.
9467 */
9468 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9469 }
9470
9471 /*
9472 * Set Send Length
9473 * @ppd - per port data
9474 *
9475 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9476 * registers compare against LRH.PktLen, so use the max bytes included
9477 * in the LRH.
9478 *
9479 * This routine changes all VL values except VL15, which it maintains at
9480 * the same value.
9481 */
9482 static void set_send_length(struct hfi1_pportdata *ppd)
9483 {
9484 struct hfi1_devdata *dd = ppd->dd;
9485 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9486 u32 maxvlmtu = dd->vld[15].mtu;
9487 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9488 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9489 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9490 int i;
9491
9492 for (i = 0; i < ppd->vls_supported; i++) {
9493 if (dd->vld[i].mtu > maxvlmtu)
9494 maxvlmtu = dd->vld[i].mtu;
9495 if (i <= 3)
9496 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9497 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9498 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9499 else
9500 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9501 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9502 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9503 }
9504 write_csr(dd, SEND_LEN_CHECK0, len1);
9505 write_csr(dd, SEND_LEN_CHECK1, len2);
9506 /* adjust kernel credit return thresholds based on new MTUs */
9507 /* all kernel receive contexts have the same hdrqentsize */
9508 for (i = 0; i < ppd->vls_supported; i++) {
9509 sc_set_cr_threshold(dd->vld[i].sc,
9510 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9511 dd->rcd[0]->rcvhdrqentsize));
9512 }
9513 sc_set_cr_threshold(dd->vld[15].sc,
9514 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9515 dd->rcd[0]->rcvhdrqentsize));
9516
9517 /* Adjust maximum MTU for the port in DC */
9518 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9519 (ilog2(maxvlmtu >> 8) + 1);
9520 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9521 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9522 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9523 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9524 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9525 }
9526
9527 static void set_lidlmc(struct hfi1_pportdata *ppd)
9528 {
9529 int i;
9530 u64 sreg = 0;
9531 struct hfi1_devdata *dd = ppd->dd;
9532 u32 mask = ~((1U << ppd->lmc) - 1);
9533 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9534
9535 if (dd->hfi1_snoop.mode_flag)
9536 dd_dev_info(dd, "Set lid/lmc while snooping");
9537
9538 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9539 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9540 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9541 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
9542 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9543 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9544 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9545
9546 /*
9547 * Iterate over all the send contexts and set their SLID check
9548 */
9549 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9550 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9551 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9552 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9553
9554 for (i = 0; i < dd->chip_send_contexts; i++) {
9555 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9556 i, (u32)sreg);
9557 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9558 }
9559
9560 /* Now we have to do the same thing for the sdma engines */
9561 sdma_update_lmc(dd, mask, ppd->lid);
9562 }
9563
9564 static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9565 {
9566 unsigned long timeout;
9567 u32 curr_state;
9568
9569 timeout = jiffies + msecs_to_jiffies(msecs);
9570 while (1) {
9571 curr_state = read_physical_state(dd);
9572 if (curr_state == state)
9573 break;
9574 if (time_after(jiffies, timeout)) {
9575 dd_dev_err(dd,
9576 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9577 state, curr_state);
9578 return -ETIMEDOUT;
9579 }
9580 usleep_range(1950, 2050); /* sleep 2ms-ish */
9581 }
9582
9583 return 0;
9584 }
9585
9586 /*
9587 * Helper for set_link_state(). Do not call except from that routine.
9588 * Expects ppd->hls_mutex to be held.
9589 *
9590 * @rem_reason value to be sent to the neighbor
9591 *
9592 * LinkDownReasons only set if transition succeeds.
9593 */
9594 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9595 {
9596 struct hfi1_devdata *dd = ppd->dd;
9597 u32 pstate, previous_state;
9598 u32 last_local_state;
9599 u32 last_remote_state;
9600 int ret;
9601 int do_transition;
9602 int do_wait;
9603
9604 previous_state = ppd->host_link_state;
9605 ppd->host_link_state = HLS_GOING_OFFLINE;
9606 pstate = read_physical_state(dd);
9607 if (pstate == PLS_OFFLINE) {
9608 do_transition = 0; /* in right state */
9609 do_wait = 0; /* ...no need to wait */
9610 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9611 do_transition = 0; /* in an offline transient state */
9612 do_wait = 1; /* ...wait for it to settle */
9613 } else {
9614 do_transition = 1; /* need to move to offline */
9615 do_wait = 1; /* ...will need to wait */
9616 }
9617
9618 if (do_transition) {
9619 ret = set_physical_link_state(dd,
9620 PLS_OFFLINE | (rem_reason << 8));
9621
9622 if (ret != HCMD_SUCCESS) {
9623 dd_dev_err(dd,
9624 "Failed to transition to Offline link state, return %d\n",
9625 ret);
9626 return -EINVAL;
9627 }
9628 if (ppd->offline_disabled_reason ==
9629 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
9630 ppd->offline_disabled_reason =
9631 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
9632 }
9633
9634 if (do_wait) {
9635 /* it can take a while for the link to go down */
9636 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
9637 if (ret < 0)
9638 return ret;
9639 }
9640
9641 /* make sure the logical state is also down */
9642 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9643
9644 /*
9645 * Now in charge of LCB - must be after the physical state is
9646 * offline.quiet and before host_link_state is changed.
9647 */
9648 set_host_lcb_access(dd);
9649 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9650 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9651
9652 /*
9653 * The LNI has a mandatory wait time after the physical state
9654 * moves to Offline.Quiet. The wait time may be different
9655 * depending on how the link went down. The 8051 firmware
9656 * will observe the needed wait time and only move to ready
9657 * when that is completed. The largest of the quiet timeouts
9658 * is 6s, so wait that long and then at least 0.5s more for
9659 * other transitions, and another 0.5s for a buffer.
9660 */
9661 ret = wait_fm_ready(dd, 7000);
9662 if (ret) {
9663 dd_dev_err(dd,
9664 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9665 /* state is really offline, so make it so */
9666 ppd->host_link_state = HLS_DN_OFFLINE;
9667 return ret;
9668 }
9669
9670 /*
9671 * The state is now offline and the 8051 is ready to accept host
9672 * requests.
9673 * - change our state
9674 * - notify others if we were previously in a linkup state
9675 */
9676 ppd->host_link_state = HLS_DN_OFFLINE;
9677 if (previous_state & HLS_UP) {
9678 /* went down while link was up */
9679 handle_linkup_change(dd, 0);
9680 } else if (previous_state
9681 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9682 /* went down while attempting link up */
9683 /* byte 1 of last_*_state is the failure reason */
9684 read_last_local_state(dd, &last_local_state);
9685 read_last_remote_state(dd, &last_remote_state);
9686 dd_dev_err(dd,
9687 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9688 last_local_state, last_remote_state);
9689 }
9690
9691 /* the active link width (downgrade) is 0 on link down */
9692 ppd->link_width_active = 0;
9693 ppd->link_width_downgrade_tx_active = 0;
9694 ppd->link_width_downgrade_rx_active = 0;
9695 ppd->current_egress_rate = 0;
9696 return 0;
9697 }
9698
9699 /* return the link state name */
9700 static const char *link_state_name(u32 state)
9701 {
9702 const char *name;
9703 int n = ilog2(state);
9704 static const char * const names[] = {
9705 [__HLS_UP_INIT_BP] = "INIT",
9706 [__HLS_UP_ARMED_BP] = "ARMED",
9707 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9708 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9709 [__HLS_DN_POLL_BP] = "POLL",
9710 [__HLS_DN_DISABLE_BP] = "DISABLE",
9711 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9712 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9713 [__HLS_GOING_UP_BP] = "GOING_UP",
9714 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9715 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9716 };
9717
9718 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9719 return name ? name : "unknown";
9720 }
9721
9722 /* return the link state reason name */
9723 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9724 {
9725 if (state == HLS_UP_INIT) {
9726 switch (ppd->linkinit_reason) {
9727 case OPA_LINKINIT_REASON_LINKUP:
9728 return "(LINKUP)";
9729 case OPA_LINKINIT_REASON_FLAPPING:
9730 return "(FLAPPING)";
9731 case OPA_LINKINIT_OUTSIDE_POLICY:
9732 return "(OUTSIDE_POLICY)";
9733 case OPA_LINKINIT_QUARANTINED:
9734 return "(QUARANTINED)";
9735 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9736 return "(INSUFIC_CAPABILITY)";
9737 default:
9738 break;
9739 }
9740 }
9741 return "";
9742 }
9743
9744 /*
9745 * driver_physical_state - convert the driver's notion of a port's
9746 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9747 * Return -1 (converted to a u32) to indicate error.
9748 */
9749 u32 driver_physical_state(struct hfi1_pportdata *ppd)
9750 {
9751 switch (ppd->host_link_state) {
9752 case HLS_UP_INIT:
9753 case HLS_UP_ARMED:
9754 case HLS_UP_ACTIVE:
9755 return IB_PORTPHYSSTATE_LINKUP;
9756 case HLS_DN_POLL:
9757 return IB_PORTPHYSSTATE_POLLING;
9758 case HLS_DN_DISABLE:
9759 return IB_PORTPHYSSTATE_DISABLED;
9760 case HLS_DN_OFFLINE:
9761 return OPA_PORTPHYSSTATE_OFFLINE;
9762 case HLS_VERIFY_CAP:
9763 return IB_PORTPHYSSTATE_POLLING;
9764 case HLS_GOING_UP:
9765 return IB_PORTPHYSSTATE_POLLING;
9766 case HLS_GOING_OFFLINE:
9767 return OPA_PORTPHYSSTATE_OFFLINE;
9768 case HLS_LINK_COOLDOWN:
9769 return OPA_PORTPHYSSTATE_OFFLINE;
9770 case HLS_DN_DOWNDEF:
9771 default:
9772 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9773 ppd->host_link_state);
9774 return -1;
9775 }
9776 }
9777
9778 /*
9779 * driver_logical_state - convert the driver's notion of a port's
9780 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9781 * (converted to a u32) to indicate error.
9782 */
9783 u32 driver_logical_state(struct hfi1_pportdata *ppd)
9784 {
9785 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9786 return IB_PORT_DOWN;
9787
9788 switch (ppd->host_link_state & HLS_UP) {
9789 case HLS_UP_INIT:
9790 return IB_PORT_INIT;
9791 case HLS_UP_ARMED:
9792 return IB_PORT_ARMED;
9793 case HLS_UP_ACTIVE:
9794 return IB_PORT_ACTIVE;
9795 default:
9796 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9797 ppd->host_link_state);
9798 return -1;
9799 }
9800 }
9801
9802 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9803 u8 neigh_reason, u8 rem_reason)
9804 {
9805 if (ppd->local_link_down_reason.latest == 0 &&
9806 ppd->neigh_link_down_reason.latest == 0) {
9807 ppd->local_link_down_reason.latest = lcl_reason;
9808 ppd->neigh_link_down_reason.latest = neigh_reason;
9809 ppd->remote_link_down_reason = rem_reason;
9810 }
9811 }
9812
9813 /*
9814 * Change the physical and/or logical link state.
9815 *
9816 * Do not call this routine while inside an interrupt. It contains
9817 * calls to routines that can take multiple seconds to finish.
9818 *
9819 * Returns 0 on success, -errno on failure.
9820 */
9821 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
9822 {
9823 struct hfi1_devdata *dd = ppd->dd;
9824 struct ib_event event = {.device = NULL};
9825 int ret1, ret = 0;
9826 int was_up, is_down;
9827 int orig_new_state, poll_bounce;
9828
9829 mutex_lock(&ppd->hls_lock);
9830
9831 orig_new_state = state;
9832 if (state == HLS_DN_DOWNDEF)
9833 state = dd->link_default;
9834
9835 /* interpret poll -> poll as a link bounce */
9836 poll_bounce = ppd->host_link_state == HLS_DN_POLL
9837 && state == HLS_DN_POLL;
9838
9839 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
9840 link_state_name(ppd->host_link_state),
9841 link_state_name(orig_new_state),
9842 poll_bounce ? "(bounce) " : "",
9843 link_state_reason_name(ppd, state));
9844
9845 was_up = !!(ppd->host_link_state & HLS_UP);
9846
9847 /*
9848 * If we're going to a (HLS_*) link state that implies the logical
9849 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
9850 * reset is_sm_config_started to 0.
9851 */
9852 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
9853 ppd->is_sm_config_started = 0;
9854
9855 /*
9856 * Do nothing if the states match. Let a poll to poll link bounce
9857 * go through.
9858 */
9859 if (ppd->host_link_state == state && !poll_bounce)
9860 goto done;
9861
9862 switch (state) {
9863 case HLS_UP_INIT:
9864 if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
9865 || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
9866 /*
9867 * Quick link up jumps from polling to here.
9868 *
9869 * Whether in normal or loopback mode, the
9870 * simulator jumps from polling to link up.
9871 * Accept that here.
9872 */
9873 /* OK */;
9874 } else if (ppd->host_link_state != HLS_GOING_UP) {
9875 goto unexpected;
9876 }
9877
9878 ppd->host_link_state = HLS_UP_INIT;
9879 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
9880 if (ret) {
9881 /* logical state didn't change, stay at going_up */
9882 ppd->host_link_state = HLS_GOING_UP;
9883 dd_dev_err(dd,
9884 "%s: logical state did not change to INIT\n",
9885 __func__);
9886 } else {
9887 /* clear old transient LINKINIT_REASON code */
9888 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
9889 ppd->linkinit_reason =
9890 OPA_LINKINIT_REASON_LINKUP;
9891
9892 /* enable the port */
9893 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9894
9895 handle_linkup_change(dd, 1);
9896 }
9897 break;
9898 case HLS_UP_ARMED:
9899 if (ppd->host_link_state != HLS_UP_INIT)
9900 goto unexpected;
9901
9902 ppd->host_link_state = HLS_UP_ARMED;
9903 set_logical_state(dd, LSTATE_ARMED);
9904 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
9905 if (ret) {
9906 /* logical state didn't change, stay at init */
9907 ppd->host_link_state = HLS_UP_INIT;
9908 dd_dev_err(dd,
9909 "%s: logical state did not change to ARMED\n",
9910 __func__);
9911 }
9912 /*
9913 * The simulator does not currently implement SMA messages,
9914 * so neighbor_normal is not set. Set it here when we first
9915 * move to Armed.
9916 */
9917 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9918 ppd->neighbor_normal = 1;
9919 break;
9920 case HLS_UP_ACTIVE:
9921 if (ppd->host_link_state != HLS_UP_ARMED)
9922 goto unexpected;
9923
9924 ppd->host_link_state = HLS_UP_ACTIVE;
9925 set_logical_state(dd, LSTATE_ACTIVE);
9926 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
9927 if (ret) {
9928 /* logical state didn't change, stay at armed */
9929 ppd->host_link_state = HLS_UP_ARMED;
9930 dd_dev_err(dd,
9931 "%s: logical state did not change to ACTIVE\n",
9932 __func__);
9933 } else {
9934
9935 /* tell all engines to go running */
9936 sdma_all_running(dd);
9937
9938 /* Signal the IB layer that the port has went active */
9939 event.device = &dd->verbs_dev.rdi.ibdev;
9940 event.element.port_num = ppd->port;
9941 event.event = IB_EVENT_PORT_ACTIVE;
9942 }
9943 break;
9944 case HLS_DN_POLL:
9945 if ((ppd->host_link_state == HLS_DN_DISABLE ||
9946 ppd->host_link_state == HLS_DN_OFFLINE) &&
9947 dd->dc_shutdown)
9948 dc_start(dd);
9949 /* Hand LED control to the DC */
9950 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
9951
9952 if (ppd->host_link_state != HLS_DN_OFFLINE) {
9953 u8 tmp = ppd->link_enabled;
9954
9955 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9956 if (ret) {
9957 ppd->link_enabled = tmp;
9958 break;
9959 }
9960 ppd->remote_link_down_reason = 0;
9961
9962 if (ppd->driver_link_ready)
9963 ppd->link_enabled = 1;
9964 }
9965
9966 set_all_slowpath(ppd->dd);
9967 ret = set_local_link_attributes(ppd);
9968 if (ret)
9969 break;
9970
9971 ppd->port_error_action = 0;
9972 ppd->host_link_state = HLS_DN_POLL;
9973
9974 if (quick_linkup) {
9975 /* quick linkup does not go into polling */
9976 ret = do_quick_linkup(dd);
9977 } else {
9978 ret1 = set_physical_link_state(dd, PLS_POLLING);
9979 if (ret1 != HCMD_SUCCESS) {
9980 dd_dev_err(dd,
9981 "Failed to transition to Polling link state, return 0x%x\n",
9982 ret1);
9983 ret = -EINVAL;
9984 }
9985 }
9986 ppd->offline_disabled_reason =
9987 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
9988 /*
9989 * If an error occurred above, go back to offline. The
9990 * caller may reschedule another attempt.
9991 */
9992 if (ret)
9993 goto_offline(ppd, 0);
9994 break;
9995 case HLS_DN_DISABLE:
9996 /* link is disabled */
9997 ppd->link_enabled = 0;
9998
9999 /* allow any state to transition to disabled */
10000
10001 /* must transition to offline first */
10002 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10003 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10004 if (ret)
10005 break;
10006 ppd->remote_link_down_reason = 0;
10007 }
10008
10009 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10010 if (ret1 != HCMD_SUCCESS) {
10011 dd_dev_err(dd,
10012 "Failed to transition to Disabled link state, return 0x%x\n",
10013 ret1);
10014 ret = -EINVAL;
10015 break;
10016 }
10017 ppd->host_link_state = HLS_DN_DISABLE;
10018 dc_shutdown(dd);
10019 break;
10020 case HLS_DN_OFFLINE:
10021 if (ppd->host_link_state == HLS_DN_DISABLE)
10022 dc_start(dd);
10023
10024 /* allow any state to transition to offline */
10025 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10026 if (!ret)
10027 ppd->remote_link_down_reason = 0;
10028 break;
10029 case HLS_VERIFY_CAP:
10030 if (ppd->host_link_state != HLS_DN_POLL)
10031 goto unexpected;
10032 ppd->host_link_state = HLS_VERIFY_CAP;
10033 break;
10034 case HLS_GOING_UP:
10035 if (ppd->host_link_state != HLS_VERIFY_CAP)
10036 goto unexpected;
10037
10038 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10039 if (ret1 != HCMD_SUCCESS) {
10040 dd_dev_err(dd,
10041 "Failed to transition to link up state, return 0x%x\n",
10042 ret1);
10043 ret = -EINVAL;
10044 break;
10045 }
10046 ppd->host_link_state = HLS_GOING_UP;
10047 break;
10048
10049 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10050 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10051 default:
10052 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10053 __func__, state);
10054 ret = -EINVAL;
10055 break;
10056 }
10057
10058 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10059 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10060
10061 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10062 ppd->neigh_link_down_reason.sma == 0) {
10063 ppd->local_link_down_reason.sma =
10064 ppd->local_link_down_reason.latest;
10065 ppd->neigh_link_down_reason.sma =
10066 ppd->neigh_link_down_reason.latest;
10067 }
10068
10069 goto done;
10070
10071 unexpected:
10072 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10073 __func__, link_state_name(ppd->host_link_state),
10074 link_state_name(state));
10075 ret = -EINVAL;
10076
10077 done:
10078 mutex_unlock(&ppd->hls_lock);
10079
10080 if (event.device)
10081 ib_dispatch_event(&event);
10082
10083 return ret;
10084 }
10085
10086 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10087 {
10088 u64 reg;
10089 int ret = 0;
10090
10091 switch (which) {
10092 case HFI1_IB_CFG_LIDLMC:
10093 set_lidlmc(ppd);
10094 break;
10095 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10096 /*
10097 * The VL Arbitrator high limit is sent in units of 4k
10098 * bytes, while HFI stores it in units of 64 bytes.
10099 */
10100 val *= 4096/64;
10101 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10102 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10103 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10104 break;
10105 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10106 /* HFI only supports POLL as the default link down state */
10107 if (val != HLS_DN_POLL)
10108 ret = -EINVAL;
10109 break;
10110 case HFI1_IB_CFG_OP_VLS:
10111 if (ppd->vls_operational != val) {
10112 ppd->vls_operational = val;
10113 if (!ppd->port)
10114 ret = -EINVAL;
10115 else
10116 ret = sdma_map_init(
10117 ppd->dd,
10118 ppd->port - 1,
10119 val,
10120 NULL);
10121 }
10122 break;
10123 /*
10124 * For link width, link width downgrade, and speed enable, always AND
10125 * the setting with what is actually supported. This has two benefits.
10126 * First, enabled can't have unsupported values, no matter what the
10127 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10128 * "fill in with your supported value" have all the bits in the
10129 * field set, so simply ANDing with supported has the desired result.
10130 */
10131 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10132 ppd->link_width_enabled = val & ppd->link_width_supported;
10133 break;
10134 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10135 ppd->link_width_downgrade_enabled =
10136 val & ppd->link_width_downgrade_supported;
10137 break;
10138 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10139 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10140 break;
10141 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10142 /*
10143 * HFI does not follow IB specs, save this value
10144 * so we can report it, if asked.
10145 */
10146 ppd->overrun_threshold = val;
10147 break;
10148 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10149 /*
10150 * HFI does not follow IB specs, save this value
10151 * so we can report it, if asked.
10152 */
10153 ppd->phy_error_threshold = val;
10154 break;
10155
10156 case HFI1_IB_CFG_MTU:
10157 set_send_length(ppd);
10158 break;
10159
10160 case HFI1_IB_CFG_PKEYS:
10161 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10162 set_partition_keys(ppd);
10163 break;
10164
10165 default:
10166 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10167 dd_dev_info(ppd->dd,
10168 "%s: which %s, val 0x%x: not implemented\n",
10169 __func__, ib_cfg_name(which), val);
10170 break;
10171 }
10172 return ret;
10173 }
10174
10175 /* begin functions related to vl arbitration table caching */
10176 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10177 {
10178 int i;
10179
10180 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10181 VL_ARB_LOW_PRIO_TABLE_SIZE);
10182 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10183 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10184
10185 /*
10186 * Note that we always return values directly from the
10187 * 'vl_arb_cache' (and do no CSR reads) in response to a
10188 * 'Get(VLArbTable)'. This is obviously correct after a
10189 * 'Set(VLArbTable)', since the cache will then be up to
10190 * date. But it's also correct prior to any 'Set(VLArbTable)'
10191 * since then both the cache, and the relevant h/w registers
10192 * will be zeroed.
10193 */
10194
10195 for (i = 0; i < MAX_PRIO_TABLE; i++)
10196 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10197 }
10198
10199 /*
10200 * vl_arb_lock_cache
10201 *
10202 * All other vl_arb_* functions should be called only after locking
10203 * the cache.
10204 */
10205 static inline struct vl_arb_cache *
10206 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10207 {
10208 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10209 return NULL;
10210 spin_lock(&ppd->vl_arb_cache[idx].lock);
10211 return &ppd->vl_arb_cache[idx];
10212 }
10213
10214 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10215 {
10216 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10217 }
10218
10219 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10220 struct ib_vl_weight_elem *vl)
10221 {
10222 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10223 }
10224
10225 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10226 struct ib_vl_weight_elem *vl)
10227 {
10228 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10229 }
10230
10231 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10232 struct ib_vl_weight_elem *vl)
10233 {
10234 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10235 }
10236 /* end functions related to vl arbitration table caching */
10237
10238 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10239 u32 size, struct ib_vl_weight_elem *vl)
10240 {
10241 struct hfi1_devdata *dd = ppd->dd;
10242 u64 reg;
10243 unsigned int i, is_up = 0;
10244 int drain, ret = 0;
10245
10246 mutex_lock(&ppd->hls_lock);
10247
10248 if (ppd->host_link_state & HLS_UP)
10249 is_up = 1;
10250
10251 drain = !is_ax(dd) && is_up;
10252
10253 if (drain)
10254 /*
10255 * Before adjusting VL arbitration weights, empty per-VL
10256 * FIFOs, otherwise a packet whose VL weight is being
10257 * set to 0 could get stuck in a FIFO with no chance to
10258 * egress.
10259 */
10260 ret = stop_drain_data_vls(dd);
10261
10262 if (ret) {
10263 dd_dev_err(
10264 dd,
10265 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10266 __func__);
10267 goto err;
10268 }
10269
10270 for (i = 0; i < size; i++, vl++) {
10271 /*
10272 * NOTE: The low priority shift and mask are used here, but
10273 * they are the same for both the low and high registers.
10274 */
10275 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10276 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10277 | (((u64)vl->weight
10278 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10279 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10280 write_csr(dd, target + (i * 8), reg);
10281 }
10282 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10283
10284 if (drain)
10285 open_fill_data_vls(dd); /* reopen all VLs */
10286
10287 err:
10288 mutex_unlock(&ppd->hls_lock);
10289
10290 return ret;
10291 }
10292
10293 /*
10294 * Read one credit merge VL register.
10295 */
10296 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10297 struct vl_limit *vll)
10298 {
10299 u64 reg = read_csr(dd, csr);
10300
10301 vll->dedicated = cpu_to_be16(
10302 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10303 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10304 vll->shared = cpu_to_be16(
10305 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10306 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10307 }
10308
10309 /*
10310 * Read the current credit merge limits.
10311 */
10312 static int get_buffer_control(struct hfi1_devdata *dd,
10313 struct buffer_control *bc, u16 *overall_limit)
10314 {
10315 u64 reg;
10316 int i;
10317
10318 /* not all entries are filled in */
10319 memset(bc, 0, sizeof(*bc));
10320
10321 /* OPA and HFI have a 1-1 mapping */
10322 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10323 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
10324
10325 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10326 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10327
10328 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10329 bc->overall_shared_limit = cpu_to_be16(
10330 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10331 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10332 if (overall_limit)
10333 *overall_limit = (reg
10334 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10335 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10336 return sizeof(struct buffer_control);
10337 }
10338
10339 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10340 {
10341 u64 reg;
10342 int i;
10343
10344 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10345 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10346 for (i = 0; i < sizeof(u64); i++) {
10347 u8 byte = *(((u8 *)&reg) + i);
10348
10349 dp->vlnt[2 * i] = byte & 0xf;
10350 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10351 }
10352
10353 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10354 for (i = 0; i < sizeof(u64); i++) {
10355 u8 byte = *(((u8 *)&reg) + i);
10356
10357 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10358 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10359 }
10360 return sizeof(struct sc2vlnt);
10361 }
10362
10363 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10364 struct ib_vl_weight_elem *vl)
10365 {
10366 unsigned int i;
10367
10368 for (i = 0; i < nelems; i++, vl++) {
10369 vl->vl = 0xf;
10370 vl->weight = 0;
10371 }
10372 }
10373
10374 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10375 {
10376 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10377 DC_SC_VL_VAL(15_0,
10378 0, dp->vlnt[0] & 0xf,
10379 1, dp->vlnt[1] & 0xf,
10380 2, dp->vlnt[2] & 0xf,
10381 3, dp->vlnt[3] & 0xf,
10382 4, dp->vlnt[4] & 0xf,
10383 5, dp->vlnt[5] & 0xf,
10384 6, dp->vlnt[6] & 0xf,
10385 7, dp->vlnt[7] & 0xf,
10386 8, dp->vlnt[8] & 0xf,
10387 9, dp->vlnt[9] & 0xf,
10388 10, dp->vlnt[10] & 0xf,
10389 11, dp->vlnt[11] & 0xf,
10390 12, dp->vlnt[12] & 0xf,
10391 13, dp->vlnt[13] & 0xf,
10392 14, dp->vlnt[14] & 0xf,
10393 15, dp->vlnt[15] & 0xf));
10394 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10395 DC_SC_VL_VAL(31_16,
10396 16, dp->vlnt[16] & 0xf,
10397 17, dp->vlnt[17] & 0xf,
10398 18, dp->vlnt[18] & 0xf,
10399 19, dp->vlnt[19] & 0xf,
10400 20, dp->vlnt[20] & 0xf,
10401 21, dp->vlnt[21] & 0xf,
10402 22, dp->vlnt[22] & 0xf,
10403 23, dp->vlnt[23] & 0xf,
10404 24, dp->vlnt[24] & 0xf,
10405 25, dp->vlnt[25] & 0xf,
10406 26, dp->vlnt[26] & 0xf,
10407 27, dp->vlnt[27] & 0xf,
10408 28, dp->vlnt[28] & 0xf,
10409 29, dp->vlnt[29] & 0xf,
10410 30, dp->vlnt[30] & 0xf,
10411 31, dp->vlnt[31] & 0xf));
10412 }
10413
10414 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10415 u16 limit)
10416 {
10417 if (limit != 0)
10418 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10419 what, (int)limit, idx);
10420 }
10421
10422 /* change only the shared limit portion of SendCmGLobalCredit */
10423 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10424 {
10425 u64 reg;
10426
10427 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10428 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10429 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10430 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10431 }
10432
10433 /* change only the total credit limit portion of SendCmGLobalCredit */
10434 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10435 {
10436 u64 reg;
10437
10438 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10439 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10440 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10441 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10442 }
10443
10444 /* set the given per-VL shared limit */
10445 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10446 {
10447 u64 reg;
10448 u32 addr;
10449
10450 if (vl < TXE_NUM_DATA_VL)
10451 addr = SEND_CM_CREDIT_VL + (8 * vl);
10452 else
10453 addr = SEND_CM_CREDIT_VL15;
10454
10455 reg = read_csr(dd, addr);
10456 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10457 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10458 write_csr(dd, addr, reg);
10459 }
10460
10461 /* set the given per-VL dedicated limit */
10462 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10463 {
10464 u64 reg;
10465 u32 addr;
10466
10467 if (vl < TXE_NUM_DATA_VL)
10468 addr = SEND_CM_CREDIT_VL + (8 * vl);
10469 else
10470 addr = SEND_CM_CREDIT_VL15;
10471
10472 reg = read_csr(dd, addr);
10473 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10474 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10475 write_csr(dd, addr, reg);
10476 }
10477
10478 /* spin until the given per-VL status mask bits clear */
10479 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10480 const char *which)
10481 {
10482 unsigned long timeout;
10483 u64 reg;
10484
10485 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10486 while (1) {
10487 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10488
10489 if (reg == 0)
10490 return; /* success */
10491 if (time_after(jiffies, timeout))
10492 break; /* timed out */
10493 udelay(1);
10494 }
10495
10496 dd_dev_err(dd,
10497 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10498 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10499 /*
10500 * If this occurs, it is likely there was a credit loss on the link.
10501 * The only recovery from that is a link bounce.
10502 */
10503 dd_dev_err(dd,
10504 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10505 }
10506
10507 /*
10508 * The number of credits on the VLs may be changed while everything
10509 * is "live", but the following algorithm must be followed due to
10510 * how the hardware is actually implemented. In particular,
10511 * Return_Credit_Status[] is the only correct status check.
10512 *
10513 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10514 * set Global_Shared_Credit_Limit = 0
10515 * use_all_vl = 1
10516 * mask0 = all VLs that are changing either dedicated or shared limits
10517 * set Shared_Limit[mask0] = 0
10518 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10519 * if (changing any dedicated limit)
10520 * mask1 = all VLs that are lowering dedicated limits
10521 * lower Dedicated_Limit[mask1]
10522 * spin until Return_Credit_Status[mask1] == 0
10523 * raise Dedicated_Limits
10524 * raise Shared_Limits
10525 * raise Global_Shared_Credit_Limit
10526 *
10527 * lower = if the new limit is lower, set the limit to the new value
10528 * raise = if the new limit is higher than the current value (may be changed
10529 * earlier in the algorithm), set the new limit to the new value
10530 */
10531 static int set_buffer_control(struct hfi1_devdata *dd,
10532 struct buffer_control *new_bc)
10533 {
10534 u64 changing_mask, ld_mask, stat_mask;
10535 int change_count;
10536 int i, use_all_mask;
10537 int this_shared_changing;
10538 /*
10539 * A0: add the variable any_shared_limit_changing below and in the
10540 * algorithm above. If removing A0 support, it can be removed.
10541 */
10542 int any_shared_limit_changing;
10543 struct buffer_control cur_bc;
10544 u8 changing[OPA_MAX_VLS];
10545 u8 lowering_dedicated[OPA_MAX_VLS];
10546 u16 cur_total;
10547 u32 new_total = 0;
10548 const u64 all_mask =
10549 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10550 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10551 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10552 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10553 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10554 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10555 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10556 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10557 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10558
10559 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10560 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
10561
10562
10563 /* find the new total credits, do sanity check on unused VLs */
10564 for (i = 0; i < OPA_MAX_VLS; i++) {
10565 if (valid_vl(i)) {
10566 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10567 continue;
10568 }
10569 nonzero_msg(dd, i, "dedicated",
10570 be16_to_cpu(new_bc->vl[i].dedicated));
10571 nonzero_msg(dd, i, "shared",
10572 be16_to_cpu(new_bc->vl[i].shared));
10573 new_bc->vl[i].dedicated = 0;
10574 new_bc->vl[i].shared = 0;
10575 }
10576 new_total += be16_to_cpu(new_bc->overall_shared_limit);
10577
10578 /* fetch the current values */
10579 get_buffer_control(dd, &cur_bc, &cur_total);
10580
10581 /*
10582 * Create the masks we will use.
10583 */
10584 memset(changing, 0, sizeof(changing));
10585 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10586 /* NOTE: Assumes that the individual VL bits are adjacent and in
10587 increasing order */
10588 stat_mask =
10589 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10590 changing_mask = 0;
10591 ld_mask = 0;
10592 change_count = 0;
10593 any_shared_limit_changing = 0;
10594 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10595 if (!valid_vl(i))
10596 continue;
10597 this_shared_changing = new_bc->vl[i].shared
10598 != cur_bc.vl[i].shared;
10599 if (this_shared_changing)
10600 any_shared_limit_changing = 1;
10601 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
10602 || this_shared_changing) {
10603 changing[i] = 1;
10604 changing_mask |= stat_mask;
10605 change_count++;
10606 }
10607 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10608 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10609 lowering_dedicated[i] = 1;
10610 ld_mask |= stat_mask;
10611 }
10612 }
10613
10614 /* bracket the credit change with a total adjustment */
10615 if (new_total > cur_total)
10616 set_global_limit(dd, new_total);
10617
10618 /*
10619 * Start the credit change algorithm.
10620 */
10621 use_all_mask = 0;
10622 if ((be16_to_cpu(new_bc->overall_shared_limit) <
10623 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10624 (is_ax(dd) && any_shared_limit_changing)) {
10625 set_global_shared(dd, 0);
10626 cur_bc.overall_shared_limit = 0;
10627 use_all_mask = 1;
10628 }
10629
10630 for (i = 0; i < NUM_USABLE_VLS; i++) {
10631 if (!valid_vl(i))
10632 continue;
10633
10634 if (changing[i]) {
10635 set_vl_shared(dd, i, 0);
10636 cur_bc.vl[i].shared = 0;
10637 }
10638 }
10639
10640 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10641 "shared");
10642
10643 if (change_count > 0) {
10644 for (i = 0; i < NUM_USABLE_VLS; i++) {
10645 if (!valid_vl(i))
10646 continue;
10647
10648 if (lowering_dedicated[i]) {
10649 set_vl_dedicated(dd, i,
10650 be16_to_cpu(new_bc->vl[i].dedicated));
10651 cur_bc.vl[i].dedicated =
10652 new_bc->vl[i].dedicated;
10653 }
10654 }
10655
10656 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10657
10658 /* now raise all dedicated that are going up */
10659 for (i = 0; i < NUM_USABLE_VLS; i++) {
10660 if (!valid_vl(i))
10661 continue;
10662
10663 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10664 be16_to_cpu(cur_bc.vl[i].dedicated))
10665 set_vl_dedicated(dd, i,
10666 be16_to_cpu(new_bc->vl[i].dedicated));
10667 }
10668 }
10669
10670 /* next raise all shared that are going up */
10671 for (i = 0; i < NUM_USABLE_VLS; i++) {
10672 if (!valid_vl(i))
10673 continue;
10674
10675 if (be16_to_cpu(new_bc->vl[i].shared) >
10676 be16_to_cpu(cur_bc.vl[i].shared))
10677 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10678 }
10679
10680 /* finally raise the global shared */
10681 if (be16_to_cpu(new_bc->overall_shared_limit) >
10682 be16_to_cpu(cur_bc.overall_shared_limit))
10683 set_global_shared(dd,
10684 be16_to_cpu(new_bc->overall_shared_limit));
10685
10686 /* bracket the credit change with a total adjustment */
10687 if (new_total < cur_total)
10688 set_global_limit(dd, new_total);
10689 return 0;
10690 }
10691
10692 /*
10693 * Read the given fabric manager table. Return the size of the
10694 * table (in bytes) on success, and a negative error code on
10695 * failure.
10696 */
10697 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10698
10699 {
10700 int size;
10701 struct vl_arb_cache *vlc;
10702
10703 switch (which) {
10704 case FM_TBL_VL_HIGH_ARB:
10705 size = 256;
10706 /*
10707 * OPA specifies 128 elements (of 2 bytes each), though
10708 * HFI supports only 16 elements in h/w.
10709 */
10710 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10711 vl_arb_get_cache(vlc, t);
10712 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10713 break;
10714 case FM_TBL_VL_LOW_ARB:
10715 size = 256;
10716 /*
10717 * OPA specifies 128 elements (of 2 bytes each), though
10718 * HFI supports only 16 elements in h/w.
10719 */
10720 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10721 vl_arb_get_cache(vlc, t);
10722 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10723 break;
10724 case FM_TBL_BUFFER_CONTROL:
10725 size = get_buffer_control(ppd->dd, t, NULL);
10726 break;
10727 case FM_TBL_SC2VLNT:
10728 size = get_sc2vlnt(ppd->dd, t);
10729 break;
10730 case FM_TBL_VL_PREEMPT_ELEMS:
10731 size = 256;
10732 /* OPA specifies 128 elements, of 2 bytes each */
10733 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10734 break;
10735 case FM_TBL_VL_PREEMPT_MATRIX:
10736 size = 256;
10737 /*
10738 * OPA specifies that this is the same size as the VL
10739 * arbitration tables (i.e., 256 bytes).
10740 */
10741 break;
10742 default:
10743 return -EINVAL;
10744 }
10745 return size;
10746 }
10747
10748 /*
10749 * Write the given fabric manager table.
10750 */
10751 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10752 {
10753 int ret = 0;
10754 struct vl_arb_cache *vlc;
10755
10756 switch (which) {
10757 case FM_TBL_VL_HIGH_ARB:
10758 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10759 if (vl_arb_match_cache(vlc, t)) {
10760 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10761 break;
10762 }
10763 vl_arb_set_cache(vlc, t);
10764 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10765 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10766 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10767 break;
10768 case FM_TBL_VL_LOW_ARB:
10769 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10770 if (vl_arb_match_cache(vlc, t)) {
10771 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10772 break;
10773 }
10774 vl_arb_set_cache(vlc, t);
10775 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10776 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10777 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10778 break;
10779 case FM_TBL_BUFFER_CONTROL:
10780 ret = set_buffer_control(ppd->dd, t);
10781 break;
10782 case FM_TBL_SC2VLNT:
10783 set_sc2vlnt(ppd->dd, t);
10784 break;
10785 default:
10786 ret = -EINVAL;
10787 }
10788 return ret;
10789 }
10790
10791 /*
10792 * Disable all data VLs.
10793 *
10794 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10795 */
10796 static int disable_data_vls(struct hfi1_devdata *dd)
10797 {
10798 if (is_ax(dd))
10799 return 1;
10800
10801 pio_send_control(dd, PSC_DATA_VL_DISABLE);
10802
10803 return 0;
10804 }
10805
10806 /*
10807 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
10808 * Just re-enables all data VLs (the "fill" part happens
10809 * automatically - the name was chosen for symmetry with
10810 * stop_drain_data_vls()).
10811 *
10812 * Return 0 if successful, non-zero if the VLs cannot be enabled.
10813 */
10814 int open_fill_data_vls(struct hfi1_devdata *dd)
10815 {
10816 if (is_ax(dd))
10817 return 1;
10818
10819 pio_send_control(dd, PSC_DATA_VL_ENABLE);
10820
10821 return 0;
10822 }
10823
10824 /*
10825 * drain_data_vls() - assumes that disable_data_vls() has been called,
10826 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
10827 * engines to drop to 0.
10828 */
10829 static void drain_data_vls(struct hfi1_devdata *dd)
10830 {
10831 sc_wait(dd);
10832 sdma_wait(dd);
10833 pause_for_credit_return(dd);
10834 }
10835
10836 /*
10837 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
10838 *
10839 * Use open_fill_data_vls() to resume using data VLs. This pair is
10840 * meant to be used like this:
10841 *
10842 * stop_drain_data_vls(dd);
10843 * // do things with per-VL resources
10844 * open_fill_data_vls(dd);
10845 */
10846 int stop_drain_data_vls(struct hfi1_devdata *dd)
10847 {
10848 int ret;
10849
10850 ret = disable_data_vls(dd);
10851 if (ret == 0)
10852 drain_data_vls(dd);
10853
10854 return ret;
10855 }
10856
10857 /*
10858 * Convert a nanosecond time to a cclock count. No matter how slow
10859 * the cclock, a non-zero ns will always have a non-zero result.
10860 */
10861 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
10862 {
10863 u32 cclocks;
10864
10865 if (dd->icode == ICODE_FPGA_EMULATION)
10866 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
10867 else /* simulation pretends to be ASIC */
10868 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
10869 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
10870 cclocks = 1;
10871 return cclocks;
10872 }
10873
10874 /*
10875 * Convert a cclock count to nanoseconds. Not matter how slow
10876 * the cclock, a non-zero cclocks will always have a non-zero result.
10877 */
10878 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
10879 {
10880 u32 ns;
10881
10882 if (dd->icode == ICODE_FPGA_EMULATION)
10883 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
10884 else /* simulation pretends to be ASIC */
10885 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
10886 if (cclocks && !ns)
10887 ns = 1;
10888 return ns;
10889 }
10890
10891 /*
10892 * Dynamically adjust the receive interrupt timeout for a context based on
10893 * incoming packet rate.
10894 *
10895 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
10896 */
10897 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
10898 {
10899 struct hfi1_devdata *dd = rcd->dd;
10900 u32 timeout = rcd->rcvavail_timeout;
10901
10902 /*
10903 * This algorithm doubles or halves the timeout depending on whether
10904 * the number of packets received in this interrupt were less than or
10905 * greater equal the interrupt count.
10906 *
10907 * The calculations below do not allow a steady state to be achieved.
10908 * Only at the endpoints it is possible to have an unchanging
10909 * timeout.
10910 */
10911 if (npkts < rcv_intr_count) {
10912 /*
10913 * Not enough packets arrived before the timeout, adjust
10914 * timeout downward.
10915 */
10916 if (timeout < 2) /* already at minimum? */
10917 return;
10918 timeout >>= 1;
10919 } else {
10920 /*
10921 * More than enough packets arrived before the timeout, adjust
10922 * timeout upward.
10923 */
10924 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
10925 return;
10926 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
10927 }
10928
10929 rcd->rcvavail_timeout = timeout;
10930 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
10931 been verified to be in range */
10932 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
10933 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
10934 }
10935
10936 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
10937 u32 intr_adjust, u32 npkts)
10938 {
10939 struct hfi1_devdata *dd = rcd->dd;
10940 u64 reg;
10941 u32 ctxt = rcd->ctxt;
10942
10943 /*
10944 * Need to write timeout register before updating RcvHdrHead to ensure
10945 * that a new value is used when the HW decides to restart counting.
10946 */
10947 if (intr_adjust)
10948 adjust_rcv_timeout(rcd, npkts);
10949 if (updegr) {
10950 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
10951 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
10952 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
10953 }
10954 mmiowb();
10955 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
10956 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
10957 << RCV_HDR_HEAD_HEAD_SHIFT);
10958 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
10959 mmiowb();
10960 }
10961
10962 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
10963 {
10964 u32 head, tail;
10965
10966 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
10967 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
10968
10969 if (rcd->rcvhdrtail_kvaddr)
10970 tail = get_rcvhdrtail(rcd);
10971 else
10972 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
10973
10974 return head == tail;
10975 }
10976
10977 /*
10978 * Context Control and Receive Array encoding for buffer size:
10979 * 0x0 invalid
10980 * 0x1 4 KB
10981 * 0x2 8 KB
10982 * 0x3 16 KB
10983 * 0x4 32 KB
10984 * 0x5 64 KB
10985 * 0x6 128 KB
10986 * 0x7 256 KB
10987 * 0x8 512 KB (Receive Array only)
10988 * 0x9 1 MB (Receive Array only)
10989 * 0xa 2 MB (Receive Array only)
10990 *
10991 * 0xB-0xF - reserved (Receive Array only)
10992 *
10993 *
10994 * This routine assumes that the value has already been sanity checked.
10995 */
10996 static u32 encoded_size(u32 size)
10997 {
10998 switch (size) {
10999 case 4*1024: return 0x1;
11000 case 8*1024: return 0x2;
11001 case 16*1024: return 0x3;
11002 case 32*1024: return 0x4;
11003 case 64*1024: return 0x5;
11004 case 128*1024: return 0x6;
11005 case 256*1024: return 0x7;
11006 case 512*1024: return 0x8;
11007 case 1*1024*1024: return 0x9;
11008 case 2*1024*1024: return 0xa;
11009 }
11010 return 0x1; /* if invalid, go with the minimum size */
11011 }
11012
11013 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11014 {
11015 struct hfi1_ctxtdata *rcd;
11016 u64 rcvctrl, reg;
11017 int did_enable = 0;
11018
11019 rcd = dd->rcd[ctxt];
11020 if (!rcd)
11021 return;
11022
11023 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11024
11025 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11026 /* if the context already enabled, don't do the extra steps */
11027 if ((op & HFI1_RCVCTRL_CTXT_ENB)
11028 && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11029 /* reset the tail and hdr addresses, and sequence count */
11030 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11031 rcd->rcvhdrq_phys);
11032 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11033 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11034 rcd->rcvhdrqtailaddr_phys);
11035 rcd->seq_cnt = 1;
11036
11037 /* reset the cached receive header queue head value */
11038 rcd->head = 0;
11039
11040 /*
11041 * Zero the receive header queue so we don't get false
11042 * positives when checking the sequence number. The
11043 * sequence numbers could land exactly on the same spot.
11044 * E.g. a rcd restart before the receive header wrapped.
11045 */
11046 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11047
11048 /* starting timeout */
11049 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11050
11051 /* enable the context */
11052 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11053
11054 /* clean the egr buffer size first */
11055 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11056 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11057 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11058 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11059
11060 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11061 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11062 did_enable = 1;
11063
11064 /* zero RcvEgrIndexHead */
11065 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11066
11067 /* set eager count and base index */
11068 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11069 & RCV_EGR_CTRL_EGR_CNT_MASK)
11070 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11071 (((rcd->eager_base >> RCV_SHIFT)
11072 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11073 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11074 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11075
11076 /*
11077 * Set TID (expected) count and base index.
11078 * rcd->expected_count is set to individual RcvArray entries,
11079 * not pairs, and the CSR takes a pair-count in groups of
11080 * four, so divide by 8.
11081 */
11082 reg = (((rcd->expected_count >> RCV_SHIFT)
11083 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11084 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11085 (((rcd->expected_base >> RCV_SHIFT)
11086 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11087 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11088 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11089 if (ctxt == HFI1_CTRL_CTXT)
11090 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11091 }
11092 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11093 write_csr(dd, RCV_VL15, 0);
11094 /*
11095 * When receive context is being disabled turn on tail
11096 * update with a dummy tail address and then disable
11097 * receive context.
11098 */
11099 if (dd->rcvhdrtail_dummy_physaddr) {
11100 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11101 dd->rcvhdrtail_dummy_physaddr);
11102 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11103 }
11104
11105 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11106 }
11107 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11108 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11109 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11110 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11111 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11112 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11113 if (op & HFI1_RCVCTRL_TAILUPD_DIS)
11114 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11115 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11116 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11117 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11118 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11119 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11120 /* In one-packet-per-eager mode, the size comes from
11121 the RcvArray entry. */
11122 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11123 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11124 }
11125 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11126 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11127 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11128 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11129 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11130 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11131 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11132 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11133 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11134 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11135 rcd->rcvctrl = rcvctrl;
11136 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11137 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11138
11139 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11140 if (did_enable
11141 && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11142 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11143 if (reg != 0) {
11144 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11145 ctxt, reg);
11146 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11147 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11148 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11149 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11150 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11151 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11152 ctxt, reg, reg == 0 ? "not" : "still");
11153 }
11154 }
11155
11156 if (did_enable) {
11157 /*
11158 * The interrupt timeout and count must be set after
11159 * the context is enabled to take effect.
11160 */
11161 /* set interrupt timeout */
11162 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11163 (u64)rcd->rcvavail_timeout <<
11164 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11165
11166 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11167 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11168 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11169 }
11170
11171 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11172 /*
11173 * If the context has been disabled and the Tail Update has
11174 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11175 * so it doesn't contain an address that is invalid.
11176 */
11177 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11178 dd->rcvhdrtail_dummy_physaddr);
11179 }
11180
11181 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11182 u64 **cntrp)
11183 {
11184 int ret;
11185 u64 val = 0;
11186
11187 if (namep) {
11188 ret = dd->cntrnameslen;
11189 if (pos != 0) {
11190 dd_dev_err(dd, "read_cntrs does not support indexing");
11191 return 0;
11192 }
11193 *namep = dd->cntrnames;
11194 } else {
11195 const struct cntr_entry *entry;
11196 int i, j;
11197
11198 ret = (dd->ndevcntrs) * sizeof(u64);
11199 if (pos != 0) {
11200 dd_dev_err(dd, "read_cntrs does not support indexing");
11201 return 0;
11202 }
11203
11204 /* Get the start of the block of counters */
11205 *cntrp = dd->cntrs;
11206
11207 /*
11208 * Now go and fill in each counter in the block.
11209 */
11210 for (i = 0; i < DEV_CNTR_LAST; i++) {
11211 entry = &dev_cntrs[i];
11212 hfi1_cdbg(CNTR, "reading %s", entry->name);
11213 if (entry->flags & CNTR_DISABLED) {
11214 /* Nothing */
11215 hfi1_cdbg(CNTR, "\tDisabled\n");
11216 } else {
11217 if (entry->flags & CNTR_VL) {
11218 hfi1_cdbg(CNTR, "\tPer VL\n");
11219 for (j = 0; j < C_VL_COUNT; j++) {
11220 val = entry->rw_cntr(entry,
11221 dd, j,
11222 CNTR_MODE_R,
11223 0);
11224 hfi1_cdbg(
11225 CNTR,
11226 "\t\tRead 0x%llx for %d\n",
11227 val, j);
11228 dd->cntrs[entry->offset + j] =
11229 val;
11230 }
11231 } else if (entry->flags & CNTR_SDMA) {
11232 hfi1_cdbg(CNTR,
11233 "\t Per SDMA Engine\n");
11234 for (j = 0; j < dd->chip_sdma_engines;
11235 j++) {
11236 val =
11237 entry->rw_cntr(entry, dd, j,
11238 CNTR_MODE_R, 0);
11239 hfi1_cdbg(CNTR,
11240 "\t\tRead 0x%llx for %d\n",
11241 val, j);
11242 dd->cntrs[entry->offset + j] =
11243 val;
11244 }
11245 } else {
11246 val = entry->rw_cntr(entry, dd,
11247 CNTR_INVALID_VL,
11248 CNTR_MODE_R, 0);
11249 dd->cntrs[entry->offset] = val;
11250 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11251 }
11252 }
11253 }
11254 }
11255 return ret;
11256 }
11257
11258 /*
11259 * Used by sysfs to create files for hfi stats to read
11260 */
11261 u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11262 char **namep, u64 **cntrp)
11263 {
11264 int ret;
11265 u64 val = 0;
11266
11267 if (namep) {
11268 ret = dd->portcntrnameslen;
11269 if (pos != 0) {
11270 dd_dev_err(dd, "index not supported");
11271 return 0;
11272 }
11273 *namep = dd->portcntrnames;
11274 } else {
11275 const struct cntr_entry *entry;
11276 struct hfi1_pportdata *ppd;
11277 int i, j;
11278
11279 ret = (dd->nportcntrs) * sizeof(u64);
11280 if (pos != 0) {
11281 dd_dev_err(dd, "indexing not supported");
11282 return 0;
11283 }
11284 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11285 *cntrp = ppd->cntrs;
11286
11287 for (i = 0; i < PORT_CNTR_LAST; i++) {
11288 entry = &port_cntrs[i];
11289 hfi1_cdbg(CNTR, "reading %s", entry->name);
11290 if (entry->flags & CNTR_DISABLED) {
11291 /* Nothing */
11292 hfi1_cdbg(CNTR, "\tDisabled\n");
11293 continue;
11294 }
11295
11296 if (entry->flags & CNTR_VL) {
11297 hfi1_cdbg(CNTR, "\tPer VL");
11298 for (j = 0; j < C_VL_COUNT; j++) {
11299 val = entry->rw_cntr(entry, ppd, j,
11300 CNTR_MODE_R,
11301 0);
11302 hfi1_cdbg(
11303 CNTR,
11304 "\t\tRead 0x%llx for %d",
11305 val, j);
11306 ppd->cntrs[entry->offset + j] = val;
11307 }
11308 } else {
11309 val = entry->rw_cntr(entry, ppd,
11310 CNTR_INVALID_VL,
11311 CNTR_MODE_R,
11312 0);
11313 ppd->cntrs[entry->offset] = val;
11314 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11315 }
11316 }
11317 }
11318 return ret;
11319 }
11320
11321 static void free_cntrs(struct hfi1_devdata *dd)
11322 {
11323 struct hfi1_pportdata *ppd;
11324 int i;
11325
11326 if (dd->synth_stats_timer.data)
11327 del_timer_sync(&dd->synth_stats_timer);
11328 dd->synth_stats_timer.data = 0;
11329 ppd = (struct hfi1_pportdata *)(dd + 1);
11330 for (i = 0; i < dd->num_pports; i++, ppd++) {
11331 kfree(ppd->cntrs);
11332 kfree(ppd->scntrs);
11333 free_percpu(ppd->ibport_data.rvp.rc_acks);
11334 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11335 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
11336 ppd->cntrs = NULL;
11337 ppd->scntrs = NULL;
11338 ppd->ibport_data.rvp.rc_acks = NULL;
11339 ppd->ibport_data.rvp.rc_qacks = NULL;
11340 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
11341 }
11342 kfree(dd->portcntrnames);
11343 dd->portcntrnames = NULL;
11344 kfree(dd->cntrs);
11345 dd->cntrs = NULL;
11346 kfree(dd->scntrs);
11347 dd->scntrs = NULL;
11348 kfree(dd->cntrnames);
11349 dd->cntrnames = NULL;
11350 }
11351
11352 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11353 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11354
11355 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11356 u64 *psval, void *context, int vl)
11357 {
11358 u64 val;
11359 u64 sval = *psval;
11360
11361 if (entry->flags & CNTR_DISABLED) {
11362 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11363 return 0;
11364 }
11365
11366 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11367
11368 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11369
11370 /* If its a synthetic counter there is more work we need to do */
11371 if (entry->flags & CNTR_SYNTH) {
11372 if (sval == CNTR_MAX) {
11373 /* No need to read already saturated */
11374 return CNTR_MAX;
11375 }
11376
11377 if (entry->flags & CNTR_32BIT) {
11378 /* 32bit counters can wrap multiple times */
11379 u64 upper = sval >> 32;
11380 u64 lower = (sval << 32) >> 32;
11381
11382 if (lower > val) { /* hw wrapped */
11383 if (upper == CNTR_32BIT_MAX)
11384 val = CNTR_MAX;
11385 else
11386 upper++;
11387 }
11388
11389 if (val != CNTR_MAX)
11390 val = (upper << 32) | val;
11391
11392 } else {
11393 /* If we rolled we are saturated */
11394 if ((val < sval) || (val > CNTR_MAX))
11395 val = CNTR_MAX;
11396 }
11397 }
11398
11399 *psval = val;
11400
11401 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11402
11403 return val;
11404 }
11405
11406 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11407 struct cntr_entry *entry,
11408 u64 *psval, void *context, int vl, u64 data)
11409 {
11410 u64 val;
11411
11412 if (entry->flags & CNTR_DISABLED) {
11413 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11414 return 0;
11415 }
11416
11417 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11418
11419 if (entry->flags & CNTR_SYNTH) {
11420 *psval = data;
11421 if (entry->flags & CNTR_32BIT) {
11422 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11423 (data << 32) >> 32);
11424 val = data; /* return the full 64bit value */
11425 } else {
11426 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11427 data);
11428 }
11429 } else {
11430 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11431 }
11432
11433 *psval = val;
11434
11435 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11436
11437 return val;
11438 }
11439
11440 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11441 {
11442 struct cntr_entry *entry;
11443 u64 *sval;
11444
11445 entry = &dev_cntrs[index];
11446 sval = dd->scntrs + entry->offset;
11447
11448 if (vl != CNTR_INVALID_VL)
11449 sval += vl;
11450
11451 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11452 }
11453
11454 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11455 {
11456 struct cntr_entry *entry;
11457 u64 *sval;
11458
11459 entry = &dev_cntrs[index];
11460 sval = dd->scntrs + entry->offset;
11461
11462 if (vl != CNTR_INVALID_VL)
11463 sval += vl;
11464
11465 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11466 }
11467
11468 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11469 {
11470 struct cntr_entry *entry;
11471 u64 *sval;
11472
11473 entry = &port_cntrs[index];
11474 sval = ppd->scntrs + entry->offset;
11475
11476 if (vl != CNTR_INVALID_VL)
11477 sval += vl;
11478
11479 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11480 (index <= C_RCV_HDR_OVF_LAST)) {
11481 /* We do not want to bother for disabled contexts */
11482 return 0;
11483 }
11484
11485 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11486 }
11487
11488 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11489 {
11490 struct cntr_entry *entry;
11491 u64 *sval;
11492
11493 entry = &port_cntrs[index];
11494 sval = ppd->scntrs + entry->offset;
11495
11496 if (vl != CNTR_INVALID_VL)
11497 sval += vl;
11498
11499 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11500 (index <= C_RCV_HDR_OVF_LAST)) {
11501 /* We do not want to bother for disabled contexts */
11502 return 0;
11503 }
11504
11505 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11506 }
11507
11508 static void update_synth_timer(unsigned long opaque)
11509 {
11510 u64 cur_tx;
11511 u64 cur_rx;
11512 u64 total_flits;
11513 u8 update = 0;
11514 int i, j, vl;
11515 struct hfi1_pportdata *ppd;
11516 struct cntr_entry *entry;
11517
11518 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11519
11520 /*
11521 * Rather than keep beating on the CSRs pick a minimal set that we can
11522 * check to watch for potential roll over. We can do this by looking at
11523 * the number of flits sent/recv. If the total flits exceeds 32bits then
11524 * we have to iterate all the counters and update.
11525 */
11526 entry = &dev_cntrs[C_DC_RCV_FLITS];
11527 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11528
11529 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11530 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11531
11532 hfi1_cdbg(
11533 CNTR,
11534 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11535 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11536
11537 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11538 /*
11539 * May not be strictly necessary to update but it won't hurt and
11540 * simplifies the logic here.
11541 */
11542 update = 1;
11543 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11544 dd->unit);
11545 } else {
11546 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11547 hfi1_cdbg(CNTR,
11548 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11549 total_flits, (u64)CNTR_32BIT_MAX);
11550 if (total_flits >= CNTR_32BIT_MAX) {
11551 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11552 dd->unit);
11553 update = 1;
11554 }
11555 }
11556
11557 if (update) {
11558 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11559 for (i = 0; i < DEV_CNTR_LAST; i++) {
11560 entry = &dev_cntrs[i];
11561 if (entry->flags & CNTR_VL) {
11562 for (vl = 0; vl < C_VL_COUNT; vl++)
11563 read_dev_cntr(dd, i, vl);
11564 } else {
11565 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11566 }
11567 }
11568 ppd = (struct hfi1_pportdata *)(dd + 1);
11569 for (i = 0; i < dd->num_pports; i++, ppd++) {
11570 for (j = 0; j < PORT_CNTR_LAST; j++) {
11571 entry = &port_cntrs[j];
11572 if (entry->flags & CNTR_VL) {
11573 for (vl = 0; vl < C_VL_COUNT; vl++)
11574 read_port_cntr(ppd, j, vl);
11575 } else {
11576 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11577 }
11578 }
11579 }
11580
11581 /*
11582 * We want the value in the register. The goal is to keep track
11583 * of the number of "ticks" not the counter value. In other
11584 * words if the register rolls we want to notice it and go ahead
11585 * and force an update.
11586 */
11587 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11588 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11589 CNTR_MODE_R, 0);
11590
11591 entry = &dev_cntrs[C_DC_RCV_FLITS];
11592 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11593 CNTR_MODE_R, 0);
11594
11595 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11596 dd->unit, dd->last_tx, dd->last_rx);
11597
11598 } else {
11599 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11600 }
11601
11602 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11603 }
11604
11605 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
11606 static int init_cntrs(struct hfi1_devdata *dd)
11607 {
11608 int i, rcv_ctxts, j;
11609 size_t sz;
11610 char *p;
11611 char name[C_MAX_NAME];
11612 struct hfi1_pportdata *ppd;
11613
11614 /* set up the stats timer; the add_timer is done at the end */
11615 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11616 (unsigned long)dd);
11617
11618 /***********************/
11619 /* per device counters */
11620 /***********************/
11621
11622 /* size names and determine how many we have*/
11623 dd->ndevcntrs = 0;
11624 sz = 0;
11625
11626 for (i = 0; i < DEV_CNTR_LAST; i++) {
11627 hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
11628 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11629 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11630 continue;
11631 }
11632
11633 if (dev_cntrs[i].flags & CNTR_VL) {
11634 hfi1_dbg_early("\tProcessing VL cntr\n");
11635 dev_cntrs[i].offset = dd->ndevcntrs;
11636 for (j = 0; j < C_VL_COUNT; j++) {
11637 memset(name, '\0', C_MAX_NAME);
11638 snprintf(name, C_MAX_NAME, "%s%d",
11639 dev_cntrs[i].name,
11640 vl_from_idx(j));
11641 sz += strlen(name);
11642 sz++;
11643 hfi1_dbg_early("\t\t%s\n", name);
11644 dd->ndevcntrs++;
11645 }
11646 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11647 hfi1_dbg_early(
11648 "\tProcessing per SDE counters chip enginers %u\n",
11649 dd->chip_sdma_engines);
11650 dev_cntrs[i].offset = dd->ndevcntrs;
11651 for (j = 0; j < dd->chip_sdma_engines; j++) {
11652 memset(name, '\0', C_MAX_NAME);
11653 snprintf(name, C_MAX_NAME, "%s%d",
11654 dev_cntrs[i].name, j);
11655 sz += strlen(name);
11656 sz++;
11657 hfi1_dbg_early("\t\t%s\n", name);
11658 dd->ndevcntrs++;
11659 }
11660 } else {
11661 /* +1 for newline */
11662 sz += strlen(dev_cntrs[i].name) + 1;
11663 dev_cntrs[i].offset = dd->ndevcntrs;
11664 dd->ndevcntrs++;
11665 hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
11666 }
11667 }
11668
11669 /* allocate space for the counter values */
11670 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11671 if (!dd->cntrs)
11672 goto bail;
11673
11674 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11675 if (!dd->scntrs)
11676 goto bail;
11677
11678
11679 /* allocate space for the counter names */
11680 dd->cntrnameslen = sz;
11681 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11682 if (!dd->cntrnames)
11683 goto bail;
11684
11685 /* fill in the names */
11686 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
11687 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11688 /* Nothing */
11689 } else {
11690 if (dev_cntrs[i].flags & CNTR_VL) {
11691 for (j = 0; j < C_VL_COUNT; j++) {
11692 memset(name, '\0', C_MAX_NAME);
11693 snprintf(name, C_MAX_NAME, "%s%d",
11694 dev_cntrs[i].name,
11695 vl_from_idx(j));
11696 memcpy(p, name, strlen(name));
11697 p += strlen(name);
11698 *p++ = '\n';
11699 }
11700 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11701 for (j = 0; j < TXE_NUM_SDMA_ENGINES;
11702 j++) {
11703 memset(name, '\0', C_MAX_NAME);
11704 snprintf(name, C_MAX_NAME, "%s%d",
11705 dev_cntrs[i].name, j);
11706 memcpy(p, name, strlen(name));
11707 p += strlen(name);
11708 *p++ = '\n';
11709 }
11710 } else {
11711 memcpy(p, dev_cntrs[i].name,
11712 strlen(dev_cntrs[i].name));
11713 p += strlen(dev_cntrs[i].name);
11714 *p++ = '\n';
11715 }
11716 }
11717 }
11718
11719 /*********************/
11720 /* per port counters */
11721 /*********************/
11722
11723 /*
11724 * Go through the counters for the overflows and disable the ones we
11725 * don't need. This varies based on platform so we need to do it
11726 * dynamically here.
11727 */
11728 rcv_ctxts = dd->num_rcv_contexts;
11729 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11730 i <= C_RCV_HDR_OVF_LAST; i++) {
11731 port_cntrs[i].flags |= CNTR_DISABLED;
11732 }
11733
11734 /* size port counter names and determine how many we have*/
11735 sz = 0;
11736 dd->nportcntrs = 0;
11737 for (i = 0; i < PORT_CNTR_LAST; i++) {
11738 hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
11739 if (port_cntrs[i].flags & CNTR_DISABLED) {
11740 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11741 continue;
11742 }
11743
11744 if (port_cntrs[i].flags & CNTR_VL) {
11745 hfi1_dbg_early("\tProcessing VL cntr\n");
11746 port_cntrs[i].offset = dd->nportcntrs;
11747 for (j = 0; j < C_VL_COUNT; j++) {
11748 memset(name, '\0', C_MAX_NAME);
11749 snprintf(name, C_MAX_NAME, "%s%d",
11750 port_cntrs[i].name,
11751 vl_from_idx(j));
11752 sz += strlen(name);
11753 sz++;
11754 hfi1_dbg_early("\t\t%s\n", name);
11755 dd->nportcntrs++;
11756 }
11757 } else {
11758 /* +1 for newline */
11759 sz += strlen(port_cntrs[i].name) + 1;
11760 port_cntrs[i].offset = dd->nportcntrs;
11761 dd->nportcntrs++;
11762 hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
11763 }
11764 }
11765
11766 /* allocate space for the counter names */
11767 dd->portcntrnameslen = sz;
11768 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11769 if (!dd->portcntrnames)
11770 goto bail;
11771
11772 /* fill in port cntr names */
11773 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11774 if (port_cntrs[i].flags & CNTR_DISABLED)
11775 continue;
11776
11777 if (port_cntrs[i].flags & CNTR_VL) {
11778 for (j = 0; j < C_VL_COUNT; j++) {
11779 memset(name, '\0', C_MAX_NAME);
11780 snprintf(name, C_MAX_NAME, "%s%d",
11781 port_cntrs[i].name,
11782 vl_from_idx(j));
11783 memcpy(p, name, strlen(name));
11784 p += strlen(name);
11785 *p++ = '\n';
11786 }
11787 } else {
11788 memcpy(p, port_cntrs[i].name,
11789 strlen(port_cntrs[i].name));
11790 p += strlen(port_cntrs[i].name);
11791 *p++ = '\n';
11792 }
11793 }
11794
11795 /* allocate per port storage for counter values */
11796 ppd = (struct hfi1_pportdata *)(dd + 1);
11797 for (i = 0; i < dd->num_pports; i++, ppd++) {
11798 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11799 if (!ppd->cntrs)
11800 goto bail;
11801
11802 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11803 if (!ppd->scntrs)
11804 goto bail;
11805 }
11806
11807 /* CPU counters need to be allocated and zeroed */
11808 if (init_cpu_counters(dd))
11809 goto bail;
11810
11811 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11812 return 0;
11813 bail:
11814 free_cntrs(dd);
11815 return -ENOMEM;
11816 }
11817
11818
11819 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
11820 {
11821 switch (chip_lstate) {
11822 default:
11823 dd_dev_err(dd,
11824 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
11825 chip_lstate);
11826 /* fall through */
11827 case LSTATE_DOWN:
11828 return IB_PORT_DOWN;
11829 case LSTATE_INIT:
11830 return IB_PORT_INIT;
11831 case LSTATE_ARMED:
11832 return IB_PORT_ARMED;
11833 case LSTATE_ACTIVE:
11834 return IB_PORT_ACTIVE;
11835 }
11836 }
11837
11838 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
11839 {
11840 /* look at the HFI meta-states only */
11841 switch (chip_pstate & 0xf0) {
11842 default:
11843 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
11844 chip_pstate);
11845 /* fall through */
11846 case PLS_DISABLED:
11847 return IB_PORTPHYSSTATE_DISABLED;
11848 case PLS_OFFLINE:
11849 return OPA_PORTPHYSSTATE_OFFLINE;
11850 case PLS_POLLING:
11851 return IB_PORTPHYSSTATE_POLLING;
11852 case PLS_CONFIGPHY:
11853 return IB_PORTPHYSSTATE_TRAINING;
11854 case PLS_LINKUP:
11855 return IB_PORTPHYSSTATE_LINKUP;
11856 case PLS_PHYTEST:
11857 return IB_PORTPHYSSTATE_PHY_TEST;
11858 }
11859 }
11860
11861 /* return the OPA port logical state name */
11862 const char *opa_lstate_name(u32 lstate)
11863 {
11864 static const char * const port_logical_names[] = {
11865 "PORT_NOP",
11866 "PORT_DOWN",
11867 "PORT_INIT",
11868 "PORT_ARMED",
11869 "PORT_ACTIVE",
11870 "PORT_ACTIVE_DEFER",
11871 };
11872 if (lstate < ARRAY_SIZE(port_logical_names))
11873 return port_logical_names[lstate];
11874 return "unknown";
11875 }
11876
11877 /* return the OPA port physical state name */
11878 const char *opa_pstate_name(u32 pstate)
11879 {
11880 static const char * const port_physical_names[] = {
11881 "PHYS_NOP",
11882 "reserved1",
11883 "PHYS_POLL",
11884 "PHYS_DISABLED",
11885 "PHYS_TRAINING",
11886 "PHYS_LINKUP",
11887 "PHYS_LINK_ERR_RECOVER",
11888 "PHYS_PHY_TEST",
11889 "reserved8",
11890 "PHYS_OFFLINE",
11891 "PHYS_GANGED",
11892 "PHYS_TEST",
11893 };
11894 if (pstate < ARRAY_SIZE(port_physical_names))
11895 return port_physical_names[pstate];
11896 return "unknown";
11897 }
11898
11899 /*
11900 * Read the hardware link state and set the driver's cached value of it.
11901 * Return the (new) current value.
11902 */
11903 u32 get_logical_state(struct hfi1_pportdata *ppd)
11904 {
11905 u32 new_state;
11906
11907 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
11908 if (new_state != ppd->lstate) {
11909 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
11910 opa_lstate_name(new_state), new_state);
11911 ppd->lstate = new_state;
11912 }
11913 /*
11914 * Set port status flags in the page mapped into userspace
11915 * memory. Do it here to ensure a reliable state - this is
11916 * the only function called by all state handling code.
11917 * Always set the flags due to the fact that the cache value
11918 * might have been changed explicitly outside of this
11919 * function.
11920 */
11921 if (ppd->statusp) {
11922 switch (ppd->lstate) {
11923 case IB_PORT_DOWN:
11924 case IB_PORT_INIT:
11925 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
11926 HFI1_STATUS_IB_READY);
11927 break;
11928 case IB_PORT_ARMED:
11929 *ppd->statusp |= HFI1_STATUS_IB_CONF;
11930 break;
11931 case IB_PORT_ACTIVE:
11932 *ppd->statusp |= HFI1_STATUS_IB_READY;
11933 break;
11934 }
11935 }
11936 return ppd->lstate;
11937 }
11938
11939 /**
11940 * wait_logical_linkstate - wait for an IB link state change to occur
11941 * @ppd: port device
11942 * @state: the state to wait for
11943 * @msecs: the number of milliseconds to wait
11944 *
11945 * Wait up to msecs milliseconds for IB link state change to occur.
11946 * For now, take the easy polling route.
11947 * Returns 0 if state reached, otherwise -ETIMEDOUT.
11948 */
11949 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
11950 int msecs)
11951 {
11952 unsigned long timeout;
11953
11954 timeout = jiffies + msecs_to_jiffies(msecs);
11955 while (1) {
11956 if (get_logical_state(ppd) == state)
11957 return 0;
11958 if (time_after(jiffies, timeout))
11959 break;
11960 msleep(20);
11961 }
11962 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
11963
11964 return -ETIMEDOUT;
11965 }
11966
11967 u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
11968 {
11969 static u32 remembered_state = 0xff;
11970 u32 pstate;
11971 u32 ib_pstate;
11972
11973 pstate = read_physical_state(ppd->dd);
11974 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
11975 if (remembered_state != ib_pstate) {
11976 dd_dev_info(ppd->dd,
11977 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
11978 __func__, opa_pstate_name(ib_pstate), ib_pstate,
11979 pstate);
11980 remembered_state = ib_pstate;
11981 }
11982 return ib_pstate;
11983 }
11984
11985 /*
11986 * Read/modify/write ASIC_QSFP register bits as selected by mask
11987 * data: 0 or 1 in the positions depending on what needs to be written
11988 * dir: 0 for read, 1 for write
11989 * mask: select by setting
11990 * I2CCLK (bit 0)
11991 * I2CDATA (bit 1)
11992 */
11993 u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
11994 u32 mask)
11995 {
11996 u64 qsfp_oe, target_oe;
11997
11998 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
11999 if (mask) {
12000 /* We are writing register bits, so lock access */
12001 dir &= mask;
12002 data &= mask;
12003
12004 qsfp_oe = read_csr(dd, target_oe);
12005 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12006 write_csr(dd, target_oe, qsfp_oe);
12007 }
12008 /* We are exclusively reading bits here, but it is unlikely
12009 * we'll get valid data when we set the direction of the pin
12010 * in the same call, so read should call this function again
12011 * to get valid data
12012 */
12013 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12014 }
12015
12016 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12017 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12018
12019 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12020 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12021
12022 int hfi1_init_ctxt(struct send_context *sc)
12023 {
12024 if (sc != NULL) {
12025 struct hfi1_devdata *dd = sc->dd;
12026 u64 reg;
12027 u8 set = (sc->type == SC_USER ?
12028 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12029 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12030 reg = read_kctxt_csr(dd, sc->hw_context,
12031 SEND_CTXT_CHECK_ENABLE);
12032 if (set)
12033 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12034 else
12035 SET_STATIC_RATE_CONTROL_SMASK(reg);
12036 write_kctxt_csr(dd, sc->hw_context,
12037 SEND_CTXT_CHECK_ENABLE, reg);
12038 }
12039 return 0;
12040 }
12041
12042 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12043 {
12044 int ret = 0;
12045 u64 reg;
12046
12047 if (dd->icode != ICODE_RTL_SILICON) {
12048 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12049 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12050 __func__);
12051 return -EINVAL;
12052 }
12053 reg = read_csr(dd, ASIC_STS_THERM);
12054 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12055 ASIC_STS_THERM_CURR_TEMP_MASK);
12056 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12057 ASIC_STS_THERM_LO_TEMP_MASK);
12058 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12059 ASIC_STS_THERM_HI_TEMP_MASK);
12060 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12061 ASIC_STS_THERM_CRIT_TEMP_MASK);
12062 /* triggers is a 3-bit value - 1 bit per trigger. */
12063 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12064
12065 return ret;
12066 }
12067
12068 /* ========================================================================= */
12069
12070 /*
12071 * Enable/disable chip from delivering interrupts.
12072 */
12073 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12074 {
12075 int i;
12076
12077 /*
12078 * In HFI, the mask needs to be 1 to allow interrupts.
12079 */
12080 if (enable) {
12081 u64 cce_int_mask;
12082 const int qsfp1_int_smask = QSFP1_INT % 64;
12083 const int qsfp2_int_smask = QSFP2_INT % 64;
12084
12085 /* enable all interrupts */
12086 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12087 write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
12088
12089 /*
12090 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
12091 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
12092 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
12093 * the index of the appropriate CSR in the CCEIntMask CSR array
12094 */
12095 cce_int_mask = read_csr(dd, CCE_INT_MASK +
12096 (8*(QSFP1_INT/64)));
12097 if (dd->hfi1_id) {
12098 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
12099 write_csr(dd, CCE_INT_MASK + (8*(QSFP1_INT/64)),
12100 cce_int_mask);
12101 } else {
12102 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
12103 write_csr(dd, CCE_INT_MASK + (8*(QSFP2_INT/64)),
12104 cce_int_mask);
12105 }
12106 } else {
12107 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12108 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
12109 }
12110 }
12111
12112 /*
12113 * Clear all interrupt sources on the chip.
12114 */
12115 static void clear_all_interrupts(struct hfi1_devdata *dd)
12116 {
12117 int i;
12118
12119 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12120 write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
12121
12122 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12123 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12124 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12125 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12126 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12127 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12128 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12129 for (i = 0; i < dd->chip_send_contexts; i++)
12130 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12131 for (i = 0; i < dd->chip_sdma_engines; i++)
12132 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12133
12134 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12135 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12136 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12137 }
12138
12139 /* Move to pcie.c? */
12140 static void disable_intx(struct pci_dev *pdev)
12141 {
12142 pci_intx(pdev, 0);
12143 }
12144
12145 static void clean_up_interrupts(struct hfi1_devdata *dd)
12146 {
12147 int i;
12148
12149 /* remove irqs - must happen before disabling/turning off */
12150 if (dd->num_msix_entries) {
12151 /* MSI-X */
12152 struct hfi1_msix_entry *me = dd->msix_entries;
12153
12154 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12155 if (me->arg == NULL) /* => no irq, no affinity */
12156 break;
12157 irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
12158 NULL);
12159 free_irq(me->msix.vector, me->arg);
12160 }
12161 } else {
12162 /* INTx */
12163 if (dd->requested_intx_irq) {
12164 free_irq(dd->pcidev->irq, dd);
12165 dd->requested_intx_irq = 0;
12166 }
12167 }
12168
12169 /* turn off interrupts */
12170 if (dd->num_msix_entries) {
12171 /* MSI-X */
12172 pci_disable_msix(dd->pcidev);
12173 } else {
12174 /* INTx */
12175 disable_intx(dd->pcidev);
12176 }
12177
12178 /* clean structures */
12179 for (i = 0; i < dd->num_msix_entries; i++)
12180 free_cpumask_var(dd->msix_entries[i].mask);
12181 kfree(dd->msix_entries);
12182 dd->msix_entries = NULL;
12183 dd->num_msix_entries = 0;
12184 }
12185
12186 /*
12187 * Remap the interrupt source from the general handler to the given MSI-X
12188 * interrupt.
12189 */
12190 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12191 {
12192 u64 reg;
12193 int m, n;
12194
12195 /* clear from the handled mask of the general interrupt */
12196 m = isrc / 64;
12197 n = isrc % 64;
12198 dd->gi_mask[m] &= ~((u64)1 << n);
12199
12200 /* direct the chip source to the given MSI-X interrupt */
12201 m = isrc / 8;
12202 n = isrc % 8;
12203 reg = read_csr(dd, CCE_INT_MAP + (8*m));
12204 reg &= ~((u64)0xff << (8*n));
12205 reg |= ((u64)msix_intr & 0xff) << (8*n);
12206 write_csr(dd, CCE_INT_MAP + (8*m), reg);
12207 }
12208
12209 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12210 int engine, int msix_intr)
12211 {
12212 /*
12213 * SDMA engine interrupt sources grouped by type, rather than
12214 * engine. Per-engine interrupts are as follows:
12215 * SDMA
12216 * SDMAProgress
12217 * SDMAIdle
12218 */
12219 remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
12220 msix_intr);
12221 remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
12222 msix_intr);
12223 remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
12224 msix_intr);
12225 }
12226
12227 static int request_intx_irq(struct hfi1_devdata *dd)
12228 {
12229 int ret;
12230
12231 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12232 dd->unit);
12233 ret = request_irq(dd->pcidev->irq, general_interrupt,
12234 IRQF_SHARED, dd->intx_name, dd);
12235 if (ret)
12236 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12237 ret);
12238 else
12239 dd->requested_intx_irq = 1;
12240 return ret;
12241 }
12242
12243 static int request_msix_irqs(struct hfi1_devdata *dd)
12244 {
12245 const struct cpumask *local_mask;
12246 cpumask_var_t def, rcv;
12247 bool def_ret, rcv_ret;
12248 int first_general, last_general;
12249 int first_sdma, last_sdma;
12250 int first_rx, last_rx;
12251 int first_cpu, curr_cpu;
12252 int rcv_cpu, sdma_cpu;
12253 int i, ret = 0, possible;
12254 int ht;
12255
12256 /* calculate the ranges we are going to use */
12257 first_general = 0;
12258 first_sdma = last_general = first_general + 1;
12259 first_rx = last_sdma = first_sdma + dd->num_sdma;
12260 last_rx = first_rx + dd->n_krcv_queues;
12261
12262 /*
12263 * Interrupt affinity.
12264 *
12265 * non-rcv avail gets a default mask that
12266 * starts as possible cpus with threads reset
12267 * and each rcv avail reset.
12268 *
12269 * rcv avail gets node relative 1 wrapping back
12270 * to the node relative 1 as necessary.
12271 *
12272 */
12273 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
12274 /* if first cpu is invalid, use NUMA 0 */
12275 if (cpumask_first(local_mask) >= nr_cpu_ids)
12276 local_mask = topology_core_cpumask(0);
12277
12278 def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
12279 rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
12280 if (!def_ret || !rcv_ret)
12281 goto bail;
12282 /* use local mask as default */
12283 cpumask_copy(def, local_mask);
12284 possible = cpumask_weight(def);
12285 /* disarm threads from default */
12286 ht = cpumask_weight(
12287 topology_sibling_cpumask(cpumask_first(local_mask)));
12288 for (i = possible/ht; i < possible; i++)
12289 cpumask_clear_cpu(i, def);
12290 /* def now has full cores on chosen node*/
12291 first_cpu = cpumask_first(def);
12292 if (nr_cpu_ids >= first_cpu)
12293 first_cpu++;
12294 curr_cpu = first_cpu;
12295
12296 /* One context is reserved as control context */
12297 for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
12298 cpumask_clear_cpu(curr_cpu, def);
12299 cpumask_set_cpu(curr_cpu, rcv);
12300 curr_cpu = cpumask_next(curr_cpu, def);
12301 if (curr_cpu >= nr_cpu_ids)
12302 break;
12303 }
12304 /* def mask has non-rcv, rcv has recv mask */
12305 rcv_cpu = cpumask_first(rcv);
12306 sdma_cpu = cpumask_first(def);
12307
12308 /*
12309 * Sanity check - the code expects all SDMA chip source
12310 * interrupts to be in the same CSR, starting at bit 0. Verify
12311 * that this is true by checking the bit location of the start.
12312 */
12313 BUILD_BUG_ON(IS_SDMA_START % 64);
12314
12315 for (i = 0; i < dd->num_msix_entries; i++) {
12316 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12317 const char *err_info;
12318 irq_handler_t handler;
12319 irq_handler_t thread = NULL;
12320 void *arg;
12321 int idx;
12322 struct hfi1_ctxtdata *rcd = NULL;
12323 struct sdma_engine *sde = NULL;
12324
12325 /* obtain the arguments to request_irq */
12326 if (first_general <= i && i < last_general) {
12327 idx = i - first_general;
12328 handler = general_interrupt;
12329 arg = dd;
12330 snprintf(me->name, sizeof(me->name),
12331 DRIVER_NAME "_%d", dd->unit);
12332 err_info = "general";
12333 } else if (first_sdma <= i && i < last_sdma) {
12334 idx = i - first_sdma;
12335 sde = &dd->per_sdma[idx];
12336 handler = sdma_interrupt;
12337 arg = sde;
12338 snprintf(me->name, sizeof(me->name),
12339 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12340 err_info = "sdma";
12341 remap_sdma_interrupts(dd, idx, i);
12342 } else if (first_rx <= i && i < last_rx) {
12343 idx = i - first_rx;
12344 rcd = dd->rcd[idx];
12345 /* no interrupt if no rcd */
12346 if (!rcd)
12347 continue;
12348 /*
12349 * Set the interrupt register and mask for this
12350 * context's interrupt.
12351 */
12352 rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
12353 rcd->imask = ((u64)1) <<
12354 ((IS_RCVAVAIL_START+idx) % 64);
12355 handler = receive_context_interrupt;
12356 thread = receive_context_thread;
12357 arg = rcd;
12358 snprintf(me->name, sizeof(me->name),
12359 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12360 err_info = "receive context";
12361 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12362 } else {
12363 /* not in our expected range - complain, then
12364 ignore it */
12365 dd_dev_err(dd,
12366 "Unexpected extra MSI-X interrupt %d\n", i);
12367 continue;
12368 }
12369 /* no argument, no interrupt */
12370 if (arg == NULL)
12371 continue;
12372 /* make sure the name is terminated */
12373 me->name[sizeof(me->name)-1] = 0;
12374
12375 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12376 me->name, arg);
12377 if (ret) {
12378 dd_dev_err(dd,
12379 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12380 err_info, me->msix.vector, idx, ret);
12381 return ret;
12382 }
12383 /*
12384 * assign arg after request_irq call, so it will be
12385 * cleaned up
12386 */
12387 me->arg = arg;
12388
12389 if (!zalloc_cpumask_var(
12390 &dd->msix_entries[i].mask,
12391 GFP_KERNEL))
12392 goto bail;
12393 if (handler == sdma_interrupt) {
12394 dd_dev_info(dd, "sdma engine %d cpu %d\n",
12395 sde->this_idx, sdma_cpu);
12396 sde->cpu = sdma_cpu;
12397 cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
12398 sdma_cpu = cpumask_next(sdma_cpu, def);
12399 if (sdma_cpu >= nr_cpu_ids)
12400 sdma_cpu = cpumask_first(def);
12401 } else if (handler == receive_context_interrupt) {
12402 dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
12403 (rcd->ctxt == HFI1_CTRL_CTXT) ?
12404 cpumask_first(def) : rcv_cpu);
12405 if (rcd->ctxt == HFI1_CTRL_CTXT) {
12406 /* map to first default */
12407 cpumask_set_cpu(cpumask_first(def),
12408 dd->msix_entries[i].mask);
12409 } else {
12410 cpumask_set_cpu(rcv_cpu,
12411 dd->msix_entries[i].mask);
12412 rcv_cpu = cpumask_next(rcv_cpu, rcv);
12413 if (rcv_cpu >= nr_cpu_ids)
12414 rcv_cpu = cpumask_first(rcv);
12415 }
12416 } else {
12417 /* otherwise first def */
12418 dd_dev_info(dd, "%s cpu %d\n",
12419 err_info, cpumask_first(def));
12420 cpumask_set_cpu(
12421 cpumask_first(def), dd->msix_entries[i].mask);
12422 }
12423 irq_set_affinity_hint(
12424 dd->msix_entries[i].msix.vector,
12425 dd->msix_entries[i].mask);
12426 }
12427
12428 out:
12429 free_cpumask_var(def);
12430 free_cpumask_var(rcv);
12431 return ret;
12432 bail:
12433 ret = -ENOMEM;
12434 goto out;
12435 }
12436
12437 /*
12438 * Set the general handler to accept all interrupts, remap all
12439 * chip interrupts back to MSI-X 0.
12440 */
12441 static void reset_interrupts(struct hfi1_devdata *dd)
12442 {
12443 int i;
12444
12445 /* all interrupts handled by the general handler */
12446 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12447 dd->gi_mask[i] = ~(u64)0;
12448
12449 /* all chip interrupts map to MSI-X 0 */
12450 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12451 write_csr(dd, CCE_INT_MAP + (8*i), 0);
12452 }
12453
12454 static int set_up_interrupts(struct hfi1_devdata *dd)
12455 {
12456 struct hfi1_msix_entry *entries;
12457 u32 total, request;
12458 int i, ret;
12459 int single_interrupt = 0; /* we expect to have all the interrupts */
12460
12461 /*
12462 * Interrupt count:
12463 * 1 general, "slow path" interrupt (includes the SDMA engines
12464 * slow source, SDMACleanupDone)
12465 * N interrupts - one per used SDMA engine
12466 * M interrupt - one per kernel receive context
12467 */
12468 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12469
12470 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12471 if (!entries) {
12472 ret = -ENOMEM;
12473 goto fail;
12474 }
12475 /* 1-1 MSI-X entry assignment */
12476 for (i = 0; i < total; i++)
12477 entries[i].msix.entry = i;
12478
12479 /* ask for MSI-X interrupts */
12480 request = total;
12481 request_msix(dd, &request, entries);
12482
12483 if (request == 0) {
12484 /* using INTx */
12485 /* dd->num_msix_entries already zero */
12486 kfree(entries);
12487 single_interrupt = 1;
12488 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12489 } else {
12490 /* using MSI-X */
12491 dd->num_msix_entries = request;
12492 dd->msix_entries = entries;
12493
12494 if (request != total) {
12495 /* using MSI-X, with reduced interrupts */
12496 dd_dev_err(
12497 dd,
12498 "cannot handle reduced interrupt case, want %u, got %u\n",
12499 total, request);
12500 ret = -EINVAL;
12501 goto fail;
12502 }
12503 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12504 }
12505
12506 /* mask all interrupts */
12507 set_intr_state(dd, 0);
12508 /* clear all pending interrupts */
12509 clear_all_interrupts(dd);
12510
12511 /* reset general handler mask, chip MSI-X mappings */
12512 reset_interrupts(dd);
12513
12514 if (single_interrupt)
12515 ret = request_intx_irq(dd);
12516 else
12517 ret = request_msix_irqs(dd);
12518 if (ret)
12519 goto fail;
12520
12521 return 0;
12522
12523 fail:
12524 clean_up_interrupts(dd);
12525 return ret;
12526 }
12527
12528 /*
12529 * Set up context values in dd. Sets:
12530 *
12531 * num_rcv_contexts - number of contexts being used
12532 * n_krcv_queues - number of kernel contexts
12533 * first_user_ctxt - first non-kernel context in array of contexts
12534 * freectxts - number of free user contexts
12535 * num_send_contexts - number of PIO send contexts being used
12536 */
12537 static int set_up_context_variables(struct hfi1_devdata *dd)
12538 {
12539 int num_kernel_contexts;
12540 int total_contexts;
12541 int ret;
12542 unsigned ngroups;
12543
12544 /*
12545 * Kernel contexts: (to be fixed later):
12546 * - min or 2 or 1 context/numa
12547 * - Context 0 - control context (VL15/multicast/error)
12548 * - Context 1 - default context
12549 */
12550 if (n_krcvqs)
12551 /*
12552 * Don't count context 0 in n_krcvqs since
12553 * is isn't used for normal verbs traffic.
12554 *
12555 * krcvqs will reflect number of kernel
12556 * receive contexts above 0.
12557 */
12558 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
12559 else
12560 num_kernel_contexts = num_online_nodes() + 1;
12561 num_kernel_contexts =
12562 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12563 /*
12564 * Every kernel receive context needs an ACK send context.
12565 * one send context is allocated for each VL{0-7} and VL15
12566 */
12567 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12568 dd_dev_err(dd,
12569 "Reducing # kernel rcv contexts to: %d, from %d\n",
12570 (int)(dd->chip_send_contexts - num_vls - 1),
12571 (int)num_kernel_contexts);
12572 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12573 }
12574 /*
12575 * User contexts: (to be fixed later)
12576 * - default to 1 user context per CPU if num_user_contexts is
12577 * negative
12578 */
12579 if (num_user_contexts < 0)
12580 num_user_contexts = num_online_cpus();
12581
12582 total_contexts = num_kernel_contexts + num_user_contexts;
12583
12584 /*
12585 * Adjust the counts given a global max.
12586 */
12587 if (total_contexts > dd->chip_rcv_contexts) {
12588 dd_dev_err(dd,
12589 "Reducing # user receive contexts to: %d, from %d\n",
12590 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12591 (int)num_user_contexts);
12592 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12593 /* recalculate */
12594 total_contexts = num_kernel_contexts + num_user_contexts;
12595 }
12596
12597 /* the first N are kernel contexts, the rest are user contexts */
12598 dd->num_rcv_contexts = total_contexts;
12599 dd->n_krcv_queues = num_kernel_contexts;
12600 dd->first_user_ctxt = num_kernel_contexts;
12601 dd->freectxts = num_user_contexts;
12602 dd_dev_info(dd,
12603 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12604 (int)dd->chip_rcv_contexts,
12605 (int)dd->num_rcv_contexts,
12606 (int)dd->n_krcv_queues,
12607 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12608
12609 /*
12610 * Receive array allocation:
12611 * All RcvArray entries are divided into groups of 8. This
12612 * is required by the hardware and will speed up writes to
12613 * consecutive entries by using write-combining of the entire
12614 * cacheline.
12615 *
12616 * The number of groups are evenly divided among all contexts.
12617 * any left over groups will be given to the first N user
12618 * contexts.
12619 */
12620 dd->rcv_entries.group_size = RCV_INCREMENT;
12621 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12622 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12623 dd->rcv_entries.nctxt_extra = ngroups -
12624 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12625 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12626 dd->rcv_entries.ngroups,
12627 dd->rcv_entries.nctxt_extra);
12628 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12629 MAX_EAGER_ENTRIES * 2) {
12630 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12631 dd->rcv_entries.group_size;
12632 dd_dev_info(dd,
12633 "RcvArray group count too high, change to %u\n",
12634 dd->rcv_entries.ngroups);
12635 dd->rcv_entries.nctxt_extra = 0;
12636 }
12637 /*
12638 * PIO send contexts
12639 */
12640 ret = init_sc_pools_and_sizes(dd);
12641 if (ret >= 0) { /* success */
12642 dd->num_send_contexts = ret;
12643 dd_dev_info(
12644 dd,
12645 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12646 dd->chip_send_contexts,
12647 dd->num_send_contexts,
12648 dd->sc_sizes[SC_KERNEL].count,
12649 dd->sc_sizes[SC_ACK].count,
12650 dd->sc_sizes[SC_USER].count);
12651 ret = 0; /* success */
12652 }
12653
12654 return ret;
12655 }
12656
12657 /*
12658 * Set the device/port partition key table. The MAD code
12659 * will ensure that, at least, the partial management
12660 * partition key is present in the table.
12661 */
12662 static void set_partition_keys(struct hfi1_pportdata *ppd)
12663 {
12664 struct hfi1_devdata *dd = ppd->dd;
12665 u64 reg = 0;
12666 int i;
12667
12668 dd_dev_info(dd, "Setting partition keys\n");
12669 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12670 reg |= (ppd->pkeys[i] &
12671 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12672 ((i % 4) *
12673 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12674 /* Each register holds 4 PKey values. */
12675 if ((i % 4) == 3) {
12676 write_csr(dd, RCV_PARTITION_KEY +
12677 ((i - 3) * 2), reg);
12678 reg = 0;
12679 }
12680 }
12681
12682 /* Always enable HW pkeys check when pkeys table is set */
12683 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12684 }
12685
12686 /*
12687 * These CSRs and memories are uninitialized on reset and must be
12688 * written before reading to set the ECC/parity bits.
12689 *
12690 * NOTE: All user context CSRs that are not mmaped write-only
12691 * (e.g. the TID flows) must be initialized even if the driver never
12692 * reads them.
12693 */
12694 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12695 {
12696 int i, j;
12697
12698 /* CceIntMap */
12699 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12700 write_csr(dd, CCE_INT_MAP+(8*i), 0);
12701
12702 /* SendCtxtCreditReturnAddr */
12703 for (i = 0; i < dd->chip_send_contexts; i++)
12704 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12705
12706 /* PIO Send buffers */
12707 /* SDMA Send buffers */
12708 /* These are not normally read, and (presently) have no method
12709 to be read, so are not pre-initialized */
12710
12711 /* RcvHdrAddr */
12712 /* RcvHdrTailAddr */
12713 /* RcvTidFlowTable */
12714 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12715 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12716 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12717 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12718 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
12719 }
12720
12721 /* RcvArray */
12722 for (i = 0; i < dd->chip_rcv_array_count; i++)
12723 write_csr(dd, RCV_ARRAY + (8*i),
12724 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12725
12726 /* RcvQPMapTable */
12727 for (i = 0; i < 32; i++)
12728 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12729 }
12730
12731 /*
12732 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12733 */
12734 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12735 u64 ctrl_bits)
12736 {
12737 unsigned long timeout;
12738 u64 reg;
12739
12740 /* is the condition present? */
12741 reg = read_csr(dd, CCE_STATUS);
12742 if ((reg & status_bits) == 0)
12743 return;
12744
12745 /* clear the condition */
12746 write_csr(dd, CCE_CTRL, ctrl_bits);
12747
12748 /* wait for the condition to clear */
12749 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12750 while (1) {
12751 reg = read_csr(dd, CCE_STATUS);
12752 if ((reg & status_bits) == 0)
12753 return;
12754 if (time_after(jiffies, timeout)) {
12755 dd_dev_err(dd,
12756 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12757 status_bits, reg & status_bits);
12758 return;
12759 }
12760 udelay(1);
12761 }
12762 }
12763
12764 /* set CCE CSRs to chip reset defaults */
12765 static void reset_cce_csrs(struct hfi1_devdata *dd)
12766 {
12767 int i;
12768
12769 /* CCE_REVISION read-only */
12770 /* CCE_REVISION2 read-only */
12771 /* CCE_CTRL - bits clear automatically */
12772 /* CCE_STATUS read-only, use CceCtrl to clear */
12773 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12774 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12775 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12776 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12777 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12778 /* CCE_ERR_STATUS read-only */
12779 write_csr(dd, CCE_ERR_MASK, 0);
12780 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12781 /* CCE_ERR_FORCE leave alone */
12782 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12783 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12784 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12785 /* CCE_PCIE_CTRL leave alone */
12786 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12787 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12788 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12789 CCE_MSIX_TABLE_UPPER_RESETCSR);
12790 }
12791 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12792 /* CCE_MSIX_PBA read-only */
12793 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12794 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12795 }
12796 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12797 write_csr(dd, CCE_INT_MAP, 0);
12798 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12799 /* CCE_INT_STATUS read-only */
12800 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12801 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12802 /* CCE_INT_FORCE leave alone */
12803 /* CCE_INT_BLOCKED read-only */
12804 }
12805 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12806 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12807 }
12808
12809 /* set ASIC CSRs to chip reset defaults */
12810 static void reset_asic_csrs(struct hfi1_devdata *dd)
12811 {
12812 int i;
12813
12814 /*
12815 * If the HFIs are shared between separate nodes or VMs,
12816 * then more will need to be done here. One idea is a module
12817 * parameter that returns early, letting the first power-on or
12818 * a known first load do the reset and blocking all others.
12819 */
12820
12821 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12822 return;
12823
12824 if (dd->icode != ICODE_FPGA_EMULATION) {
12825 /* emulation does not have an SBus - leave these alone */
12826 /*
12827 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12828 * Notes:
12829 * o The reset is not zero if aimed at the core. See the
12830 * SBus documentation for details.
12831 * o If the SBus firmware has been updated (e.g. by the BIOS),
12832 * will the reset revert that?
12833 */
12834 /* ASIC_CFG_SBUS_REQUEST leave alone */
12835 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12836 }
12837 /* ASIC_SBUS_RESULT read-only */
12838 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12839 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12840 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12841 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
12842
12843 /* We might want to retain this state across FLR if we ever use it */
12844 write_csr(dd, ASIC_CFG_DRV_STR, 0);
12845
12846 /* ASIC_CFG_THERM_POLL_EN leave alone */
12847 /* ASIC_STS_THERM read-only */
12848 /* ASIC_CFG_RESET leave alone */
12849
12850 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12851 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12852 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12853 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12854 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12855 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12856 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12857 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12858 for (i = 0; i < 16; i++)
12859 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12860
12861 /* ASIC_GPIO_IN read-only */
12862 write_csr(dd, ASIC_GPIO_OE, 0);
12863 write_csr(dd, ASIC_GPIO_INVERT, 0);
12864 write_csr(dd, ASIC_GPIO_OUT, 0);
12865 write_csr(dd, ASIC_GPIO_MASK, 0);
12866 /* ASIC_GPIO_STATUS read-only */
12867 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
12868 /* ASIC_GPIO_FORCE leave alone */
12869
12870 /* ASIC_QSFP1_IN read-only */
12871 write_csr(dd, ASIC_QSFP1_OE, 0);
12872 write_csr(dd, ASIC_QSFP1_INVERT, 0);
12873 write_csr(dd, ASIC_QSFP1_OUT, 0);
12874 write_csr(dd, ASIC_QSFP1_MASK, 0);
12875 /* ASIC_QSFP1_STATUS read-only */
12876 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
12877 /* ASIC_QSFP1_FORCE leave alone */
12878
12879 /* ASIC_QSFP2_IN read-only */
12880 write_csr(dd, ASIC_QSFP2_OE, 0);
12881 write_csr(dd, ASIC_QSFP2_INVERT, 0);
12882 write_csr(dd, ASIC_QSFP2_OUT, 0);
12883 write_csr(dd, ASIC_QSFP2_MASK, 0);
12884 /* ASIC_QSFP2_STATUS read-only */
12885 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
12886 /* ASIC_QSFP2_FORCE leave alone */
12887
12888 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
12889 /* this also writes a NOP command, clearing paging mode */
12890 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
12891 write_csr(dd, ASIC_EEP_DATA, 0);
12892 }
12893
12894 /* set MISC CSRs to chip reset defaults */
12895 static void reset_misc_csrs(struct hfi1_devdata *dd)
12896 {
12897 int i;
12898
12899 for (i = 0; i < 32; i++) {
12900 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
12901 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
12902 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
12903 }
12904 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
12905 only be written 128-byte chunks */
12906 /* init RSA engine to clear lingering errors */
12907 write_csr(dd, MISC_CFG_RSA_CMD, 1);
12908 write_csr(dd, MISC_CFG_RSA_MU, 0);
12909 write_csr(dd, MISC_CFG_FW_CTRL, 0);
12910 /* MISC_STS_8051_DIGEST read-only */
12911 /* MISC_STS_SBM_DIGEST read-only */
12912 /* MISC_STS_PCIE_DIGEST read-only */
12913 /* MISC_STS_FAB_DIGEST read-only */
12914 /* MISC_ERR_STATUS read-only */
12915 write_csr(dd, MISC_ERR_MASK, 0);
12916 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
12917 /* MISC_ERR_FORCE leave alone */
12918 }
12919
12920 /* set TXE CSRs to chip reset defaults */
12921 static void reset_txe_csrs(struct hfi1_devdata *dd)
12922 {
12923 int i;
12924
12925 /*
12926 * TXE Kernel CSRs
12927 */
12928 write_csr(dd, SEND_CTRL, 0);
12929 __cm_reset(dd, 0); /* reset CM internal state */
12930 /* SEND_CONTEXTS read-only */
12931 /* SEND_DMA_ENGINES read-only */
12932 /* SEND_PIO_MEM_SIZE read-only */
12933 /* SEND_DMA_MEM_SIZE read-only */
12934 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
12935 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
12936 /* SEND_PIO_ERR_STATUS read-only */
12937 write_csr(dd, SEND_PIO_ERR_MASK, 0);
12938 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
12939 /* SEND_PIO_ERR_FORCE leave alone */
12940 /* SEND_DMA_ERR_STATUS read-only */
12941 write_csr(dd, SEND_DMA_ERR_MASK, 0);
12942 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
12943 /* SEND_DMA_ERR_FORCE leave alone */
12944 /* SEND_EGRESS_ERR_STATUS read-only */
12945 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
12946 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
12947 /* SEND_EGRESS_ERR_FORCE leave alone */
12948 write_csr(dd, SEND_BTH_QP, 0);
12949 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
12950 write_csr(dd, SEND_SC2VLT0, 0);
12951 write_csr(dd, SEND_SC2VLT1, 0);
12952 write_csr(dd, SEND_SC2VLT2, 0);
12953 write_csr(dd, SEND_SC2VLT3, 0);
12954 write_csr(dd, SEND_LEN_CHECK0, 0);
12955 write_csr(dd, SEND_LEN_CHECK1, 0);
12956 /* SEND_ERR_STATUS read-only */
12957 write_csr(dd, SEND_ERR_MASK, 0);
12958 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
12959 /* SEND_ERR_FORCE read-only */
12960 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
12961 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
12962 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
12963 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
12964 for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
12965 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
12966 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
12967 write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
12968 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
12969 write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
12970 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
12971 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
12972 SEND_CM_GLOBAL_CREDIT_RESETCSR);
12973 /* SEND_CM_CREDIT_USED_STATUS read-only */
12974 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
12975 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
12976 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
12977 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
12978 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
12979 for (i = 0; i < TXE_NUM_DATA_VL; i++)
12980 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
12981 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
12982 /* SEND_CM_CREDIT_USED_VL read-only */
12983 /* SEND_CM_CREDIT_USED_VL15 read-only */
12984 /* SEND_EGRESS_CTXT_STATUS read-only */
12985 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
12986 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
12987 /* SEND_EGRESS_ERR_INFO read-only */
12988 /* SEND_EGRESS_ERR_SOURCE read-only */
12989
12990 /*
12991 * TXE Per-Context CSRs
12992 */
12993 for (i = 0; i < dd->chip_send_contexts; i++) {
12994 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
12995 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
12996 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12997 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
12998 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
12999 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13000 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13001 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13002 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13003 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13004 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13005 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13006 }
13007
13008 /*
13009 * TXE Per-SDMA CSRs
13010 */
13011 for (i = 0; i < dd->chip_sdma_engines; i++) {
13012 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13013 /* SEND_DMA_STATUS read-only */
13014 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13015 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13016 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13017 /* SEND_DMA_HEAD read-only */
13018 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13019 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13020 /* SEND_DMA_IDLE_CNT read-only */
13021 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13022 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13023 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13024 /* SEND_DMA_ENG_ERR_STATUS read-only */
13025 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13026 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13027 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13028 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13029 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13030 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13031 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13032 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13033 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13034 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13035 }
13036 }
13037
13038 /*
13039 * Expect on entry:
13040 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13041 */
13042 static void init_rbufs(struct hfi1_devdata *dd)
13043 {
13044 u64 reg;
13045 int count;
13046
13047 /*
13048 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13049 * clear.
13050 */
13051 count = 0;
13052 while (1) {
13053 reg = read_csr(dd, RCV_STATUS);
13054 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13055 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13056 break;
13057 /*
13058 * Give up after 1ms - maximum wait time.
13059 *
13060 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13061 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13062 * 148 KB / (66% * 250MB/s) = 920us
13063 */
13064 if (count++ > 500) {
13065 dd_dev_err(dd,
13066 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13067 __func__, reg);
13068 break;
13069 }
13070 udelay(2); /* do not busy-wait the CSR */
13071 }
13072
13073 /* start the init - expect RcvCtrl to be 0 */
13074 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13075
13076 /*
13077 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13078 * period after the write before RcvStatus.RxRbufInitDone is valid.
13079 * The delay in the first run through the loop below is sufficient and
13080 * required before the first read of RcvStatus.RxRbufInintDone.
13081 */
13082 read_csr(dd, RCV_CTRL);
13083
13084 /* wait for the init to finish */
13085 count = 0;
13086 while (1) {
13087 /* delay is required first time through - see above */
13088 udelay(2); /* do not busy-wait the CSR */
13089 reg = read_csr(dd, RCV_STATUS);
13090 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13091 break;
13092
13093 /* give up after 100us - slowest possible at 33MHz is 73us */
13094 if (count++ > 50) {
13095 dd_dev_err(dd,
13096 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13097 __func__);
13098 break;
13099 }
13100 }
13101 }
13102
13103 /* set RXE CSRs to chip reset defaults */
13104 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13105 {
13106 int i, j;
13107
13108 /*
13109 * RXE Kernel CSRs
13110 */
13111 write_csr(dd, RCV_CTRL, 0);
13112 init_rbufs(dd);
13113 /* RCV_STATUS read-only */
13114 /* RCV_CONTEXTS read-only */
13115 /* RCV_ARRAY_CNT read-only */
13116 /* RCV_BUF_SIZE read-only */
13117 write_csr(dd, RCV_BTH_QP, 0);
13118 write_csr(dd, RCV_MULTICAST, 0);
13119 write_csr(dd, RCV_BYPASS, 0);
13120 write_csr(dd, RCV_VL15, 0);
13121 /* this is a clear-down */
13122 write_csr(dd, RCV_ERR_INFO,
13123 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13124 /* RCV_ERR_STATUS read-only */
13125 write_csr(dd, RCV_ERR_MASK, 0);
13126 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13127 /* RCV_ERR_FORCE leave alone */
13128 for (i = 0; i < 32; i++)
13129 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13130 for (i = 0; i < 4; i++)
13131 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13132 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13133 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13134 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13135 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13136 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13137 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13138 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13139 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13140 }
13141 for (i = 0; i < 32; i++)
13142 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13143
13144 /*
13145 * RXE Kernel and User Per-Context CSRs
13146 */
13147 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13148 /* kernel */
13149 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13150 /* RCV_CTXT_STATUS read-only */
13151 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13152 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13153 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13154 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13155 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13156 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13157 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13158 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13159 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13160 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13161
13162 /* user */
13163 /* RCV_HDR_TAIL read-only */
13164 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13165 /* RCV_EGR_INDEX_TAIL read-only */
13166 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13167 /* RCV_EGR_OFFSET_TAIL read-only */
13168 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13169 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13170 0);
13171 }
13172 }
13173 }
13174
13175 /*
13176 * Set sc2vl tables.
13177 *
13178 * They power on to zeros, so to avoid send context errors
13179 * they need to be set:
13180 *
13181 * SC 0-7 -> VL 0-7 (respectively)
13182 * SC 15 -> VL 15
13183 * otherwise
13184 * -> VL 0
13185 */
13186 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13187 {
13188 int i;
13189 /* init per architecture spec, constrained by hardware capability */
13190
13191 /* HFI maps sent packets */
13192 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13193 0,
13194 0, 0, 1, 1,
13195 2, 2, 3, 3,
13196 4, 4, 5, 5,
13197 6, 6, 7, 7));
13198 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13199 1,
13200 8, 0, 9, 0,
13201 10, 0, 11, 0,
13202 12, 0, 13, 0,
13203 14, 0, 15, 15));
13204 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13205 2,
13206 16, 0, 17, 0,
13207 18, 0, 19, 0,
13208 20, 0, 21, 0,
13209 22, 0, 23, 0));
13210 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13211 3,
13212 24, 0, 25, 0,
13213 26, 0, 27, 0,
13214 28, 0, 29, 0,
13215 30, 0, 31, 0));
13216
13217 /* DC maps received packets */
13218 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13219 15_0,
13220 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13221 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13222 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13223 31_16,
13224 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13225 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13226
13227 /* initialize the cached sc2vl values consistently with h/w */
13228 for (i = 0; i < 32; i++) {
13229 if (i < 8 || i == 15)
13230 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13231 else
13232 *((u8 *)(dd->sc2vl) + i) = 0;
13233 }
13234 }
13235
13236 /*
13237 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13238 * depend on the chip going through a power-on reset - a driver may be loaded
13239 * and unloaded many times.
13240 *
13241 * Do not write any CSR values to the chip in this routine - there may be
13242 * a reset following the (possible) FLR in this routine.
13243 *
13244 */
13245 static void init_chip(struct hfi1_devdata *dd)
13246 {
13247 int i;
13248
13249 /*
13250 * Put the HFI CSRs in a known state.
13251 * Combine this with a DC reset.
13252 *
13253 * Stop the device from doing anything while we do a
13254 * reset. We know there are no other active users of
13255 * the device since we are now in charge. Turn off
13256 * off all outbound and inbound traffic and make sure
13257 * the device does not generate any interrupts.
13258 */
13259
13260 /* disable send contexts and SDMA engines */
13261 write_csr(dd, SEND_CTRL, 0);
13262 for (i = 0; i < dd->chip_send_contexts; i++)
13263 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13264 for (i = 0; i < dd->chip_sdma_engines; i++)
13265 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13266 /* disable port (turn off RXE inbound traffic) and contexts */
13267 write_csr(dd, RCV_CTRL, 0);
13268 for (i = 0; i < dd->chip_rcv_contexts; i++)
13269 write_csr(dd, RCV_CTXT_CTRL, 0);
13270 /* mask all interrupt sources */
13271 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13272 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
13273
13274 /*
13275 * DC Reset: do a full DC reset before the register clear.
13276 * A recommended length of time to hold is one CSR read,
13277 * so reread the CceDcCtrl. Then, hold the DC in reset
13278 * across the clear.
13279 */
13280 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13281 (void) read_csr(dd, CCE_DC_CTRL);
13282
13283 if (use_flr) {
13284 /*
13285 * A FLR will reset the SPC core and part of the PCIe.
13286 * The parts that need to be restored have already been
13287 * saved.
13288 */
13289 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13290
13291 /* do the FLR, the DC reset will remain */
13292 hfi1_pcie_flr(dd);
13293
13294 /* restore command and BARs */
13295 restore_pci_variables(dd);
13296
13297 if (is_ax(dd)) {
13298 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13299 hfi1_pcie_flr(dd);
13300 restore_pci_variables(dd);
13301 }
13302
13303 reset_asic_csrs(dd);
13304 } else {
13305 dd_dev_info(dd, "Resetting CSRs with writes\n");
13306 reset_cce_csrs(dd);
13307 reset_txe_csrs(dd);
13308 reset_rxe_csrs(dd);
13309 reset_asic_csrs(dd);
13310 reset_misc_csrs(dd);
13311 }
13312 /* clear the DC reset */
13313 write_csr(dd, CCE_DC_CTRL, 0);
13314
13315 /* Set the LED off */
13316 if (is_ax(dd))
13317 setextled(dd, 0);
13318 /*
13319 * Clear the QSFP reset.
13320 * An FLR enforces a 0 on all out pins. The driver does not touch
13321 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
13322 * anything plugged constantly in reset, if it pays attention
13323 * to RESET_N.
13324 * Prime examples of this are optical cables. Set all pins high.
13325 * I2CCLK and I2CDAT will change per direction, and INT_N and
13326 * MODPRS_N are input only and their value is ignored.
13327 */
13328 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13329 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13330 }
13331
13332 static void init_early_variables(struct hfi1_devdata *dd)
13333 {
13334 int i;
13335
13336 /* assign link credit variables */
13337 dd->vau = CM_VAU;
13338 dd->link_credits = CM_GLOBAL_CREDITS;
13339 if (is_ax(dd))
13340 dd->link_credits--;
13341 dd->vcu = cu_to_vcu(hfi1_cu);
13342 /* enough room for 8 MAD packets plus header - 17K */
13343 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13344 if (dd->vl15_init > dd->link_credits)
13345 dd->vl15_init = dd->link_credits;
13346
13347 write_uninitialized_csrs_and_memories(dd);
13348
13349 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13350 for (i = 0; i < dd->num_pports; i++) {
13351 struct hfi1_pportdata *ppd = &dd->pport[i];
13352
13353 set_partition_keys(ppd);
13354 }
13355 init_sc2vl_tables(dd);
13356 }
13357
13358 static void init_kdeth_qp(struct hfi1_devdata *dd)
13359 {
13360 /* user changed the KDETH_QP */
13361 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13362 /* out of range or illegal value */
13363 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13364 kdeth_qp = 0;
13365 }
13366 if (kdeth_qp == 0) /* not set, or failed range check */
13367 kdeth_qp = DEFAULT_KDETH_QP;
13368
13369 write_csr(dd, SEND_BTH_QP,
13370 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13371 << SEND_BTH_QP_KDETH_QP_SHIFT);
13372
13373 write_csr(dd, RCV_BTH_QP,
13374 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13375 << RCV_BTH_QP_KDETH_QP_SHIFT);
13376 }
13377
13378 /**
13379 * init_qpmap_table
13380 * @dd - device data
13381 * @first_ctxt - first context
13382 * @last_ctxt - first context
13383 *
13384 * This return sets the qpn mapping table that
13385 * is indexed by qpn[8:1].
13386 *
13387 * The routine will round robin the 256 settings
13388 * from first_ctxt to last_ctxt.
13389 *
13390 * The first/last looks ahead to having specialized
13391 * receive contexts for mgmt and bypass. Normal
13392 * verbs traffic will assumed to be on a range
13393 * of receive contexts.
13394 */
13395 static void init_qpmap_table(struct hfi1_devdata *dd,
13396 u32 first_ctxt,
13397 u32 last_ctxt)
13398 {
13399 u64 reg = 0;
13400 u64 regno = RCV_QP_MAP_TABLE;
13401 int i;
13402 u64 ctxt = first_ctxt;
13403
13404 for (i = 0; i < 256;) {
13405 reg |= ctxt << (8 * (i % 8));
13406 i++;
13407 ctxt++;
13408 if (ctxt > last_ctxt)
13409 ctxt = first_ctxt;
13410 if (i % 8 == 0) {
13411 write_csr(dd, regno, reg);
13412 reg = 0;
13413 regno += 8;
13414 }
13415 }
13416 if (i % 8)
13417 write_csr(dd, regno, reg);
13418
13419 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13420 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13421 }
13422
13423 /**
13424 * init_qos - init RX qos
13425 * @dd - device data
13426 * @first_context
13427 *
13428 * This routine initializes Rule 0 and the
13429 * RSM map table to implement qos.
13430 *
13431 * If all of the limit tests succeed,
13432 * qos is applied based on the array
13433 * interpretation of krcvqs where
13434 * entry 0 is VL0.
13435 *
13436 * The number of vl bits (n) and the number of qpn
13437 * bits (m) are computed to feed both the RSM map table
13438 * and the single rule.
13439 *
13440 */
13441 static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13442 {
13443 u8 max_by_vl = 0;
13444 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13445 u64 *rsmmap;
13446 u64 reg;
13447 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13448
13449 /* validate */
13450 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13451 num_vls == 1 ||
13452 krcvqsset <= 1)
13453 goto bail;
13454 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13455 if (krcvqs[i] > max_by_vl)
13456 max_by_vl = krcvqs[i];
13457 if (max_by_vl > 32)
13458 goto bail;
13459 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13460 /* determine bits vl */
13461 n = ilog2(num_vls);
13462 /* determine bits for qpn */
13463 m = ilog2(qpns_per_vl);
13464 if ((m + n) > 7)
13465 goto bail;
13466 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13467 goto bail;
13468 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
13469 if (!rsmmap)
13470 goto bail;
13471 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13472 /* init the local copy of the table */
13473 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13474 unsigned tctxt;
13475
13476 for (qpn = 0, tctxt = ctxt;
13477 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13478 unsigned idx, regoff, regidx;
13479
13480 /* generate index <= 128 */
13481 idx = (qpn << n) ^ i;
13482 regoff = (idx % 8) * 8;
13483 regidx = idx / 8;
13484 reg = rsmmap[regidx];
13485 /* replace 0xff with context number */
13486 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13487 << regoff);
13488 reg |= (u64)(tctxt++) << regoff;
13489 rsmmap[regidx] = reg;
13490 if (tctxt == ctxt + krcvqs[i])
13491 tctxt = ctxt;
13492 }
13493 ctxt += krcvqs[i];
13494 }
13495 /* flush cached copies to chip */
13496 for (i = 0; i < NUM_MAP_REGS; i++)
13497 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13498 /* add rule0 */
13499 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13500 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13501 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13502 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13503 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13504 LRH_BTH_MATCH_OFFSET
13505 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13506 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13507 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13508 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13509 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13510 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13511 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13512 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13513 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13514 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13515 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13516 /* Enable RSM */
13517 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13518 kfree(rsmmap);
13519 /* map everything else to first context */
13520 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
13521 dd->qos_shift = n + 1;
13522 return;
13523 bail:
13524 dd->qos_shift = 1;
13525 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13526 }
13527
13528 static void init_rxe(struct hfi1_devdata *dd)
13529 {
13530 /* enable all receive errors */
13531 write_csr(dd, RCV_ERR_MASK, ~0ull);
13532 /* setup QPN map table - start where VL15 context leaves off */
13533 init_qos(
13534 dd,
13535 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13536 /*
13537 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13538 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13539 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13540 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13541 * Max_PayLoad_Size set to its minimum of 128.
13542 *
13543 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13544 * (64 bytes). Max_Payload_Size is possibly modified upward in
13545 * tune_pcie_caps() which is called after this routine.
13546 */
13547 }
13548
13549 static void init_other(struct hfi1_devdata *dd)
13550 {
13551 /* enable all CCE errors */
13552 write_csr(dd, CCE_ERR_MASK, ~0ull);
13553 /* enable *some* Misc errors */
13554 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13555 /* enable all DC errors, except LCB */
13556 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13557 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13558 }
13559
13560 /*
13561 * Fill out the given AU table using the given CU. A CU is defined in terms
13562 * AUs. The table is a an encoding: given the index, how many AUs does that
13563 * represent?
13564 *
13565 * NOTE: Assumes that the register layout is the same for the
13566 * local and remote tables.
13567 */
13568 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13569 u32 csr0to3, u32 csr4to7)
13570 {
13571 write_csr(dd, csr0to3,
13572 0ull <<
13573 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13574 | 1ull <<
13575 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13576 | 2ull * cu <<
13577 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13578 | 4ull * cu <<
13579 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13580 write_csr(dd, csr4to7,
13581 8ull * cu <<
13582 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13583 | 16ull * cu <<
13584 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13585 | 32ull * cu <<
13586 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13587 | 64ull * cu <<
13588 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13589
13590 }
13591
13592 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13593 {
13594 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13595 SEND_CM_LOCAL_AU_TABLE4_TO7);
13596 }
13597
13598 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13599 {
13600 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13601 SEND_CM_REMOTE_AU_TABLE4_TO7);
13602 }
13603
13604 static void init_txe(struct hfi1_devdata *dd)
13605 {
13606 int i;
13607
13608 /* enable all PIO, SDMA, general, and Egress errors */
13609 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13610 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13611 write_csr(dd, SEND_ERR_MASK, ~0ull);
13612 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13613
13614 /* enable all per-context and per-SDMA engine errors */
13615 for (i = 0; i < dd->chip_send_contexts; i++)
13616 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13617 for (i = 0; i < dd->chip_sdma_engines; i++)
13618 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13619
13620 /* set the local CU to AU mapping */
13621 assign_local_cm_au_table(dd, dd->vcu);
13622
13623 /*
13624 * Set reasonable default for Credit Return Timer
13625 * Don't set on Simulator - causes it to choke.
13626 */
13627 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13628 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13629 }
13630
13631 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13632 {
13633 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13634 unsigned sctxt;
13635 int ret = 0;
13636 u64 reg;
13637
13638 if (!rcd || !rcd->sc) {
13639 ret = -EINVAL;
13640 goto done;
13641 }
13642 sctxt = rcd->sc->hw_context;
13643 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13644 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13645 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13646 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13647 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13648 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13649 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13650 /*
13651 * Enable send-side J_KEY integrity check, unless this is A0 h/w
13652 */
13653 if (!is_ax(dd)) {
13654 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13655 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13656 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13657 }
13658
13659 /* Enable J_KEY check on receive context. */
13660 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13661 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13662 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13663 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13664 done:
13665 return ret;
13666 }
13667
13668 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13669 {
13670 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13671 unsigned sctxt;
13672 int ret = 0;
13673 u64 reg;
13674
13675 if (!rcd || !rcd->sc) {
13676 ret = -EINVAL;
13677 goto done;
13678 }
13679 sctxt = rcd->sc->hw_context;
13680 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13681 /*
13682 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13683 * This check would not have been enabled for A0 h/w, see
13684 * set_ctxt_jkey().
13685 */
13686 if (!is_ax(dd)) {
13687 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13688 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13689 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13690 }
13691 /* Turn off the J_KEY on the receive side */
13692 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13693 done:
13694 return ret;
13695 }
13696
13697 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13698 {
13699 struct hfi1_ctxtdata *rcd;
13700 unsigned sctxt;
13701 int ret = 0;
13702 u64 reg;
13703
13704 if (ctxt < dd->num_rcv_contexts)
13705 rcd = dd->rcd[ctxt];
13706 else {
13707 ret = -EINVAL;
13708 goto done;
13709 }
13710 if (!rcd || !rcd->sc) {
13711 ret = -EINVAL;
13712 goto done;
13713 }
13714 sctxt = rcd->sc->hw_context;
13715 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13716 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13717 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13718 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13719 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13720 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13721 done:
13722 return ret;
13723 }
13724
13725 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13726 {
13727 struct hfi1_ctxtdata *rcd;
13728 unsigned sctxt;
13729 int ret = 0;
13730 u64 reg;
13731
13732 if (ctxt < dd->num_rcv_contexts)
13733 rcd = dd->rcd[ctxt];
13734 else {
13735 ret = -EINVAL;
13736 goto done;
13737 }
13738 if (!rcd || !rcd->sc) {
13739 ret = -EINVAL;
13740 goto done;
13741 }
13742 sctxt = rcd->sc->hw_context;
13743 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13744 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13745 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13746 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13747 done:
13748 return ret;
13749 }
13750
13751 /*
13752 * Start doing the clean up the the chip. Our clean up happens in multiple
13753 * stages and this is just the first.
13754 */
13755 void hfi1_start_cleanup(struct hfi1_devdata *dd)
13756 {
13757 free_cntrs(dd);
13758 free_rcverr(dd);
13759 clean_up_interrupts(dd);
13760 }
13761
13762 #define HFI_BASE_GUID(dev) \
13763 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13764
13765 /*
13766 * Certain chip functions need to be initialized only once per asic
13767 * instead of per-device. This function finds the peer device and
13768 * checks whether that chip initialization needs to be done by this
13769 * device.
13770 */
13771 static void asic_should_init(struct hfi1_devdata *dd)
13772 {
13773 unsigned long flags;
13774 struct hfi1_devdata *tmp, *peer = NULL;
13775
13776 spin_lock_irqsave(&hfi1_devs_lock, flags);
13777 /* Find our peer device */
13778 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13779 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13780 dd->unit != tmp->unit) {
13781 peer = tmp;
13782 break;
13783 }
13784 }
13785
13786 /*
13787 * "Claim" the ASIC for initialization if it hasn't been
13788 " "claimed" yet.
13789 */
13790 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13791 dd->flags |= HFI1_DO_INIT_ASIC;
13792 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13793 }
13794
13795 /*
13796 * Set dd->boardname. Use a generic name if a name is not returned from
13797 * EFI variable space.
13798 *
13799 * Return 0 on success, -ENOMEM if space could not be allocated.
13800 */
13801 static int obtain_boardname(struct hfi1_devdata *dd)
13802 {
13803 /* generic board description */
13804 const char generic[] =
13805 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13806 unsigned long size;
13807 int ret;
13808
13809 ret = read_hfi1_efi_var(dd, "description", &size,
13810 (void **)&dd->boardname);
13811 if (ret) {
13812 dd_dev_err(dd, "Board description not found\n");
13813 /* use generic description */
13814 dd->boardname = kstrdup(generic, GFP_KERNEL);
13815 if (!dd->boardname)
13816 return -ENOMEM;
13817 }
13818 return 0;
13819 }
13820
13821 /**
13822 * Allocate and initialize the device structure for the hfi.
13823 * @dev: the pci_dev for hfi1_ib device
13824 * @ent: pci_device_id struct for this dev
13825 *
13826 * Also allocates, initializes, and returns the devdata struct for this
13827 * device instance
13828 *
13829 * This is global, and is called directly at init to set up the
13830 * chip-specific function pointers for later use.
13831 */
13832 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13833 const struct pci_device_id *ent)
13834 {
13835 struct hfi1_devdata *dd;
13836 struct hfi1_pportdata *ppd;
13837 u64 reg;
13838 int i, ret;
13839 static const char * const inames[] = { /* implementation names */
13840 "RTL silicon",
13841 "RTL VCS simulation",
13842 "RTL FPGA emulation",
13843 "Functional simulator"
13844 };
13845
13846 dd = hfi1_alloc_devdata(pdev,
13847 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
13848 if (IS_ERR(dd))
13849 goto bail;
13850 ppd = dd->pport;
13851 for (i = 0; i < dd->num_pports; i++, ppd++) {
13852 int vl;
13853 /* init common fields */
13854 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13855 /* DC supports 4 link widths */
13856 ppd->link_width_supported =
13857 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13858 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13859 ppd->link_width_downgrade_supported =
13860 ppd->link_width_supported;
13861 /* start out enabling only 4X */
13862 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13863 ppd->link_width_downgrade_enabled =
13864 ppd->link_width_downgrade_supported;
13865 /* link width active is 0 when link is down */
13866 /* link width downgrade active is 0 when link is down */
13867
13868 if (num_vls < HFI1_MIN_VLS_SUPPORTED
13869 || num_vls > HFI1_MAX_VLS_SUPPORTED) {
13870 hfi1_early_err(&pdev->dev,
13871 "Invalid num_vls %u, using %u VLs\n",
13872 num_vls, HFI1_MAX_VLS_SUPPORTED);
13873 num_vls = HFI1_MAX_VLS_SUPPORTED;
13874 }
13875 ppd->vls_supported = num_vls;
13876 ppd->vls_operational = ppd->vls_supported;
13877 /* Set the default MTU. */
13878 for (vl = 0; vl < num_vls; vl++)
13879 dd->vld[vl].mtu = hfi1_max_mtu;
13880 dd->vld[15].mtu = MAX_MAD_PACKET;
13881 /*
13882 * Set the initial values to reasonable default, will be set
13883 * for real when link is up.
13884 */
13885 ppd->lstate = IB_PORT_DOWN;
13886 ppd->overrun_threshold = 0x4;
13887 ppd->phy_error_threshold = 0xf;
13888 ppd->port_crc_mode_enabled = link_crc_mask;
13889 /* initialize supported LTP CRC mode */
13890 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
13891 /* initialize enabled LTP CRC mode */
13892 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
13893 /* start in offline */
13894 ppd->host_link_state = HLS_DN_OFFLINE;
13895 init_vl_arb_caches(ppd);
13896 }
13897
13898 dd->link_default = HLS_DN_POLL;
13899
13900 /*
13901 * Do remaining PCIe setup and save PCIe values in dd.
13902 * Any error printing is already done by the init code.
13903 * On return, we have the chip mapped.
13904 */
13905 ret = hfi1_pcie_ddinit(dd, pdev, ent);
13906 if (ret < 0)
13907 goto bail_free;
13908
13909 /* verify that reads actually work, save revision for reset check */
13910 dd->revision = read_csr(dd, CCE_REVISION);
13911 if (dd->revision == ~(u64)0) {
13912 dd_dev_err(dd, "cannot read chip CSRs\n");
13913 ret = -EINVAL;
13914 goto bail_cleanup;
13915 }
13916 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
13917 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
13918 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
13919 & CCE_REVISION_CHIP_REV_MINOR_MASK;
13920
13921 /* obtain the hardware ID - NOT related to unit, which is a
13922 software enumeration */
13923 reg = read_csr(dd, CCE_REVISION2);
13924 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
13925 & CCE_REVISION2_HFI_ID_MASK;
13926 /* the variable size will remove unwanted bits */
13927 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
13928 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
13929 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
13930 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
13931 (int)dd->irev);
13932
13933 /* speeds the hardware can support */
13934 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
13935 /* speeds allowed to run at */
13936 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
13937 /* give a reasonable active value, will be set on link up */
13938 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
13939
13940 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
13941 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
13942 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
13943 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
13944 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
13945 /* fix up link widths for emulation _p */
13946 ppd = dd->pport;
13947 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
13948 ppd->link_width_supported =
13949 ppd->link_width_enabled =
13950 ppd->link_width_downgrade_supported =
13951 ppd->link_width_downgrade_enabled =
13952 OPA_LINK_WIDTH_1X;
13953 }
13954 /* insure num_vls isn't larger than number of sdma engines */
13955 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
13956 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
13957 num_vls, dd->chip_sdma_engines);
13958 num_vls = dd->chip_sdma_engines;
13959 ppd->vls_supported = dd->chip_sdma_engines;
13960 }
13961
13962 /*
13963 * Convert the ns parameter to the 64 * cclocks used in the CSR.
13964 * Limit the max if larger than the field holds. If timeout is
13965 * non-zero, then the calculated field will be at least 1.
13966 *
13967 * Must be after icode is set up - the cclock rate depends
13968 * on knowing the hardware being used.
13969 */
13970 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
13971 if (dd->rcv_intr_timeout_csr >
13972 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
13973 dd->rcv_intr_timeout_csr =
13974 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
13975 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
13976 dd->rcv_intr_timeout_csr = 1;
13977
13978 /* needs to be done before we look for the peer device */
13979 read_guid(dd);
13980
13981 /* should this device init the ASIC block? */
13982 asic_should_init(dd);
13983
13984 /* obtain chip sizes, reset chip CSRs */
13985 init_chip(dd);
13986
13987 /* read in the PCIe link speed information */
13988 ret = pcie_speeds(dd);
13989 if (ret)
13990 goto bail_cleanup;
13991
13992 /* read in firmware */
13993 ret = hfi1_firmware_init(dd);
13994 if (ret)
13995 goto bail_cleanup;
13996
13997 /*
13998 * In general, the PCIe Gen3 transition must occur after the
13999 * chip has been idled (so it won't initiate any PCIe transactions
14000 * e.g. an interrupt) and before the driver changes any registers
14001 * (the transition will reset the registers).
14002 *
14003 * In particular, place this call after:
14004 * - init_chip() - the chip will not initiate any PCIe transactions
14005 * - pcie_speeds() - reads the current link speed
14006 * - hfi1_firmware_init() - the needed firmware is ready to be
14007 * downloaded
14008 */
14009 ret = do_pcie_gen3_transition(dd);
14010 if (ret)
14011 goto bail_cleanup;
14012
14013 /* start setting dd values and adjusting CSRs */
14014 init_early_variables(dd);
14015
14016 parse_platform_config(dd);
14017
14018 ret = obtain_boardname(dd);
14019 if (ret)
14020 goto bail_cleanup;
14021
14022 snprintf(dd->boardversion, BOARD_VERS_MAX,
14023 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14024 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14025 (u32)dd->majrev,
14026 (u32)dd->minrev,
14027 (dd->revision >> CCE_REVISION_SW_SHIFT)
14028 & CCE_REVISION_SW_MASK);
14029
14030 ret = set_up_context_variables(dd);
14031 if (ret)
14032 goto bail_cleanup;
14033
14034 /* set initial RXE CSRs */
14035 init_rxe(dd);
14036 /* set initial TXE CSRs */
14037 init_txe(dd);
14038 /* set initial non-RXE, non-TXE CSRs */
14039 init_other(dd);
14040 /* set up KDETH QP prefix in both RX and TX CSRs */
14041 init_kdeth_qp(dd);
14042
14043 /* send contexts must be set up before receive contexts */
14044 ret = init_send_contexts(dd);
14045 if (ret)
14046 goto bail_cleanup;
14047
14048 ret = hfi1_create_ctxts(dd);
14049 if (ret)
14050 goto bail_cleanup;
14051
14052 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14053 /*
14054 * rcd[0] is guaranteed to be valid by this point. Also, all
14055 * context are using the same value, as per the module parameter.
14056 */
14057 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14058
14059 ret = init_pervl_scs(dd);
14060 if (ret)
14061 goto bail_cleanup;
14062
14063 /* sdma init */
14064 for (i = 0; i < dd->num_pports; ++i) {
14065 ret = sdma_init(dd, i);
14066 if (ret)
14067 goto bail_cleanup;
14068 }
14069
14070 /* use contexts created by hfi1_create_ctxts */
14071 ret = set_up_interrupts(dd);
14072 if (ret)
14073 goto bail_cleanup;
14074
14075 /* set up LCB access - must be after set_up_interrupts() */
14076 init_lcb_access(dd);
14077
14078 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14079 dd->base_guid & 0xFFFFFF);
14080
14081 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14082 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14083 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14084
14085 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14086 if (ret)
14087 goto bail_clear_intr;
14088 check_fabric_firmware_versions(dd);
14089
14090 thermal_init(dd);
14091
14092 ret = init_cntrs(dd);
14093 if (ret)
14094 goto bail_clear_intr;
14095
14096 ret = init_rcverr(dd);
14097 if (ret)
14098 goto bail_free_cntrs;
14099
14100 ret = eprom_init(dd);
14101 if (ret)
14102 goto bail_free_rcverr;
14103
14104 goto bail;
14105
14106 bail_free_rcverr:
14107 free_rcverr(dd);
14108 bail_free_cntrs:
14109 free_cntrs(dd);
14110 bail_clear_intr:
14111 clean_up_interrupts(dd);
14112 bail_cleanup:
14113 hfi1_pcie_ddcleanup(dd);
14114 bail_free:
14115 hfi1_free_devdata(dd);
14116 dd = ERR_PTR(ret);
14117 bail:
14118 return dd;
14119 }
14120
14121 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14122 u32 dw_len)
14123 {
14124 u32 delta_cycles;
14125 u32 current_egress_rate = ppd->current_egress_rate;
14126 /* rates here are in units of 10^6 bits/sec */
14127
14128 if (desired_egress_rate == -1)
14129 return 0; /* shouldn't happen */
14130
14131 if (desired_egress_rate >= current_egress_rate)
14132 return 0; /* we can't help go faster, only slower */
14133
14134 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14135 egress_cycles(dw_len * 4, current_egress_rate);
14136
14137 return (u16)delta_cycles;
14138 }
14139
14140
14141 /**
14142 * create_pbc - build a pbc for transmission
14143 * @flags: special case flags or-ed in built pbc
14144 * @srate: static rate
14145 * @vl: vl
14146 * @dwlen: dword length (header words + data words + pbc words)
14147 *
14148 * Create a PBC with the given flags, rate, VL, and length.
14149 *
14150 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14151 * for verbs, which does not use this PSM feature. The lone other caller
14152 * is for the diagnostic interface which calls this if the user does not
14153 * supply their own PBC.
14154 */
14155 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14156 u32 dw_len)
14157 {
14158 u64 pbc, delay = 0;
14159
14160 if (unlikely(srate_mbs))
14161 delay = delay_cycles(ppd, srate_mbs, dw_len);
14162
14163 pbc = flags
14164 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14165 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14166 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14167 | (dw_len & PBC_LENGTH_DWS_MASK)
14168 << PBC_LENGTH_DWS_SHIFT;
14169
14170 return pbc;
14171 }
14172
14173 #define SBUS_THERMAL 0x4f
14174 #define SBUS_THERM_MONITOR_MODE 0x1
14175
14176 #define THERM_FAILURE(dev, ret, reason) \
14177 dd_dev_err((dd), \
14178 "Thermal sensor initialization failed: %s (%d)\n", \
14179 (reason), (ret))
14180
14181 /*
14182 * Initialize the Avago Thermal sensor.
14183 *
14184 * After initialization, enable polling of thermal sensor through
14185 * SBus interface. In order for this to work, the SBus Master
14186 * firmware has to be loaded due to the fact that the HW polling
14187 * logic uses SBus interrupts, which are not supported with
14188 * default firmware. Otherwise, no data will be returned through
14189 * the ASIC_STS_THERM CSR.
14190 */
14191 static int thermal_init(struct hfi1_devdata *dd)
14192 {
14193 int ret = 0;
14194
14195 if (dd->icode != ICODE_RTL_SILICON ||
14196 !(dd->flags & HFI1_DO_INIT_ASIC))
14197 return ret;
14198
14199 acquire_hw_mutex(dd);
14200 dd_dev_info(dd, "Initializing thermal sensor\n");
14201 /* Disable polling of thermal readings */
14202 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14203 msleep(100);
14204 /* Thermal Sensor Initialization */
14205 /* Step 1: Reset the Thermal SBus Receiver */
14206 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14207 RESET_SBUS_RECEIVER, 0);
14208 if (ret) {
14209 THERM_FAILURE(dd, ret, "Bus Reset");
14210 goto done;
14211 }
14212 /* Step 2: Set Reset bit in Thermal block */
14213 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14214 WRITE_SBUS_RECEIVER, 0x1);
14215 if (ret) {
14216 THERM_FAILURE(dd, ret, "Therm Block Reset");
14217 goto done;
14218 }
14219 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14220 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14221 WRITE_SBUS_RECEIVER, 0x32);
14222 if (ret) {
14223 THERM_FAILURE(dd, ret, "Write Clock Div");
14224 goto done;
14225 }
14226 /* Step 4: Select temperature mode */
14227 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14228 WRITE_SBUS_RECEIVER,
14229 SBUS_THERM_MONITOR_MODE);
14230 if (ret) {
14231 THERM_FAILURE(dd, ret, "Write Mode Sel");
14232 goto done;
14233 }
14234 /* Step 5: De-assert block reset and start conversion */
14235 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14236 WRITE_SBUS_RECEIVER, 0x2);
14237 if (ret) {
14238 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14239 goto done;
14240 }
14241 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14242 msleep(22);
14243
14244 /* Enable polling of thermal readings */
14245 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14246 done:
14247 release_hw_mutex(dd);
14248 return ret;
14249 }
14250
14251 static void handle_temp_err(struct hfi1_devdata *dd)
14252 {
14253 struct hfi1_pportdata *ppd = &dd->pport[0];
14254 /*
14255 * Thermal Critical Interrupt
14256 * Put the device into forced freeze mode, take link down to
14257 * offline, and put DC into reset.
14258 */
14259 dd_dev_emerg(dd,
14260 "Critical temperature reached! Forcing device into freeze mode!\n");
14261 dd->flags |= HFI1_FORCED_FREEZE;
14262 start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
14263 /*
14264 * Shut DC down as much and as quickly as possible.
14265 *
14266 * Step 1: Take the link down to OFFLINE. This will cause the
14267 * 8051 to put the Serdes in reset. However, we don't want to
14268 * go through the entire link state machine since we want to
14269 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14270 * but rather an attempt to save the chip.
14271 * Code below is almost the same as quiet_serdes() but avoids
14272 * all the extra work and the sleeps.
14273 */
14274 ppd->driver_link_ready = 0;
14275 ppd->link_enabled = 0;
14276 set_physical_link_state(dd, PLS_OFFLINE |
14277 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14278 /*
14279 * Step 2: Shutdown LCB and 8051
14280 * After shutdown, do not restore DC_CFG_RESET value.
14281 */
14282 dc_shutdown(dd);
14283 }
This page took 0.328842 seconds and 5 git commands to generate.