1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/pci.h>
23 #include <linux/netdevice.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_main.h"
30 #include "cn66xx_regs.h"
31 #include "cn66xx_device.h"
33 int lio_cn6xxx_soft_reset(struct octeon_device
*oct
)
35 octeon_write_csr64(oct
, CN6XXX_WIN_WR_MASK_REG
, 0xFF);
37 dev_dbg(&oct
->pci_dev
->dev
, "BIST enabled for soft reset\n");
39 lio_pci_writeq(oct
, 1, CN6XXX_CIU_SOFT_BIST
);
40 octeon_write_csr64(oct
, CN6XXX_SLI_SCRATCH1
, 0x1234ULL
);
42 lio_pci_readq(oct
, CN6XXX_CIU_SOFT_RST
);
43 lio_pci_writeq(oct
, 1, CN6XXX_CIU_SOFT_RST
);
45 /* make sure that the reset is written before starting timer */
48 /* Wait for 10ms as Octeon resets. */
51 if (octeon_read_csr64(oct
, CN6XXX_SLI_SCRATCH1
) == 0x1234ULL
) {
52 dev_err(&oct
->pci_dev
->dev
, "Soft reset failed\n");
56 dev_dbg(&oct
->pci_dev
->dev
, "Reset completed\n");
57 octeon_write_csr64(oct
, CN6XXX_WIN_WR_MASK_REG
, 0xFF);
62 void lio_cn6xxx_enable_error_reporting(struct octeon_device
*oct
)
66 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
67 if (val
& 0x000c0000) {
68 dev_err(&oct
->pci_dev
->dev
, "PCI-E Link error detected: 0x%08x\n",
72 val
|= 0xf; /* Enable Link error reporting */
74 dev_dbg(&oct
->pci_dev
->dev
, "Enabling PCI-E error reporting..\n");
75 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
78 void lio_cn6xxx_setup_pcie_mps(struct octeon_device
*oct
,
79 enum octeon_pcie_mps mps
)
84 /* Read config register for MPS */
85 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
87 if (mps
== PCIE_MPS_DEFAULT
) {
88 mps
= ((val
& (0x7 << 5)) >> 5);
90 val
&= ~(0x7 << 5); /* Turn off any MPS bits */
91 val
|= (mps
<< 5); /* Set MPS */
92 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
95 /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
96 r64
= lio_pci_readq(oct
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
98 lio_pci_writeq(oct
, r64
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
101 void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device
*oct
,
102 enum octeon_pcie_mrrs mrrs
)
107 /* Read config register for MRRS */
108 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
110 if (mrrs
== PCIE_MRRS_DEFAULT
) {
111 mrrs
= ((val
& (0x7 << 12)) >> 12);
113 val
&= ~(0x7 << 12); /* Turn off any MRRS bits */
114 val
|= (mrrs
<< 12); /* Set MRRS */
115 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
118 /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
119 r64
= octeon_read_csr64(oct
, CN6XXX_SLI_S2M_PORTX_CTL(oct
->pcie_port
));
121 octeon_write_csr64(oct
, CN6XXX_SLI_S2M_PORTX_CTL(oct
->pcie_port
), r64
);
123 /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
124 r64
= lio_pci_readq(oct
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
126 lio_pci_writeq(oct
, r64
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
129 u32
lio_cn6xxx_coprocessor_clock(struct octeon_device
*oct
)
131 /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
134 return ((lio_pci_readq(oct
, CN6XXX_MIO_RST_BOOT
) >> 24) & 0x3f) * 50;
137 u32
lio_cn6xxx_get_oq_ticks(struct octeon_device
*oct
,
140 /* This gives the SLI clock per microsec */
141 u32 oqticks_per_us
= lio_cn6xxx_coprocessor_clock(oct
);
143 /* core clock per us / oq ticks will be fractional. TO avoid that
144 * we use the method below.
147 /* This gives the clock cycles per millisecond */
148 oqticks_per_us
*= 1000;
150 /* This gives the oq ticks (1024 core clock cycles) per millisecond */
151 oqticks_per_us
/= 1024;
153 /* time_intr is in microseconds. The next 2 steps gives the oq ticks
154 * corressponding to time_intr.
156 oqticks_per_us
*= time_intr_in_us
;
157 oqticks_per_us
/= 1000;
159 return oqticks_per_us
;
162 void lio_cn6xxx_setup_global_input_regs(struct octeon_device
*oct
)
164 /* Select Round-Robin Arb, ES, RO, NS for Input Queues */
165 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INPUT_CONTROL
,
166 CN6XXX_INPUT_CTL_MASK
);
168 /* Instruction Read Size - Max 4 instructions per PCIE Read */
169 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_INSTR_RD_SIZE
,
170 0xFFFFFFFFFFFFFFFFULL
);
172 /* Select PCIE Port for all Input rings. */
173 octeon_write_csr64(oct
, CN6XXX_SLI_IN_PCIE_PORT
,
174 (oct
->pcie_port
* 0x5555555555555555ULL
));
177 static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device
*oct
)
181 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
183 pktctl
= octeon_read_csr64(oct
, CN6XXX_SLI_PKT_CTL
);
186 if (CFG_GET_OQ_MAX_Q(cn6xxx
->conf
) <= 4)
187 /* Disable RING_EN if only upto 4 rings are used. */
192 if (CFG_GET_IS_SLI_BP_ON(cn6xxx
->conf
))
195 /* Disable per-port backpressure. */
197 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_CTL
, pktctl
);
200 void lio_cn6xxx_setup_global_output_regs(struct octeon_device
*oct
)
203 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
205 /* / Select PCI-E Port for all Output queues */
206 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_PCIE_PORT64
,
207 (oct
->pcie_port
* 0x5555555555555555ULL
));
209 if (CFG_GET_IS_SLI_BP_ON(cn6xxx
->conf
)) {
210 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_WMARK
, 32);
212 /* / Set Output queue watermark to 0 to disable backpressure */
213 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_WMARK
, 0);
216 /* / Select Info Ptr for length & data */
217 octeon_write_csr(oct
, CN6XXX_SLI_PKT_IPTR
, 0xFFFFFFFF);
219 /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
220 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_BMODE
, 0);
222 /* Select ES, RO, NS setting from register for Output Queue Packet
225 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DPADDR
, 0xFFFFFFFF);
227 /* No Relaxed Ordering, No Snoop, 64-bit swap for Output
230 octeon_write_csr(oct
, CN6XXX_SLI_PKT_SLIST_ROR
, 0);
231 octeon_write_csr(oct
, CN6XXX_SLI_PKT_SLIST_NS
, 0);
233 /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
234 #ifdef __BIG_ENDIAN_BITFIELD
235 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_SLIST_ES64
,
236 0x5555555555555555ULL
);
238 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_SLIST_ES64
, 0ULL);
241 /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
242 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DATA_OUT_ROR
, 0);
243 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DATA_OUT_NS
, 0);
244 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_DATA_OUT_ES64
,
245 0x5555555555555555ULL
);
247 /* / Set up interrupt packet and time threshold */
248 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_PKTS
,
249 (u32
)CFG_GET_OQ_INTR_PKT(cn6xxx
->conf
));
251 lio_cn6xxx_get_oq_ticks(oct
, (u32
)
252 CFG_GET_OQ_INTR_TIME(cn6xxx
->conf
));
254 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_TIME
, time_threshold
);
257 static int lio_cn6xxx_setup_device_regs(struct octeon_device
*oct
)
259 lio_cn6xxx_setup_pcie_mps(oct
, PCIE_MPS_DEFAULT
);
260 lio_cn6xxx_setup_pcie_mrrs(oct
, PCIE_MRRS_512B
);
261 lio_cn6xxx_enable_error_reporting(oct
);
263 lio_cn6xxx_setup_global_input_regs(oct
);
264 lio_cn66xx_setup_pkt_ctl_regs(oct
);
265 lio_cn6xxx_setup_global_output_regs(oct
);
267 /* Default error timeout value should be 0x200000 to avoid host hang
268 * when reads invalid register
270 octeon_write_csr64(oct
, CN6XXX_SLI_WINDOW_CTL
, 0x200000ULL
);
274 void lio_cn6xxx_setup_iq_regs(struct octeon_device
*oct
, u32 iq_no
)
276 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_no
];
278 /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */
279 octeon_write_csr64(oct
, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no
), 0);
281 /* Write the start of the input queue's ring and its size */
282 octeon_write_csr64(oct
, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no
),
284 octeon_write_csr(oct
, CN6XXX_SLI_IQ_SIZE(iq_no
), iq
->max_count
);
286 /* Remember the doorbell & instruction count register addr for this
289 iq
->doorbell_reg
= oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_IQ_DOORBELL(iq_no
);
290 iq
->inst_cnt_reg
= oct
->mmio
[0].hw_addr
291 + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no
);
292 dev_dbg(&oct
->pci_dev
->dev
, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
293 iq_no
, iq
->doorbell_reg
, iq
->inst_cnt_reg
);
295 /* Store the current instruction counter
296 * (used in flush_iq calculation)
298 iq
->reset_instr_cnt
= readl(iq
->inst_cnt_reg
);
301 static void lio_cn66xx_setup_iq_regs(struct octeon_device
*oct
, u32 iq_no
)
303 lio_cn6xxx_setup_iq_regs(oct
, iq_no
);
305 /* Backpressure for this queue - WMARK set to all F's. This effectively
306 * disables the backpressure mechanism.
308 octeon_write_csr64(oct
, CN66XX_SLI_IQ_BP64(iq_no
),
309 (0xFFFFFFFFULL
<< 32));
312 void lio_cn6xxx_setup_oq_regs(struct octeon_device
*oct
, u32 oq_no
)
315 struct octeon_droq
*droq
= oct
->droq
[oq_no
];
317 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no
),
318 droq
->desc_ring_dma
);
319 octeon_write_csr(oct
, CN6XXX_SLI_OQ_SIZE(oq_no
), droq
->max_count
);
321 octeon_write_csr(oct
, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no
),
322 (droq
->buffer_size
| (OCT_RH_SIZE
<< 16)));
324 /* Get the mapped address of the pkt_sent and pkts_credit regs */
325 droq
->pkts_sent_reg
=
326 oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_OQ_PKTS_SENT(oq_no
);
327 droq
->pkts_credit_reg
=
328 oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no
);
330 /* Enable this output queue to generate Packet Timer Interrupt */
331 intr
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
);
332 intr
|= (1 << oq_no
);
333 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
, intr
);
335 /* Enable this output queue to generate Packet Timer Interrupt */
336 intr
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
);
337 intr
|= (1 << oq_no
);
338 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
, intr
);
341 void lio_cn6xxx_enable_io_queues(struct octeon_device
*oct
)
345 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_SIZE
);
346 mask
|= oct
->io_qmask
.iq64B
;
347 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_SIZE
, mask
);
349 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
);
350 mask
|= oct
->io_qmask
.iq
;
351 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, mask
);
353 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
);
354 mask
|= oct
->io_qmask
.oq
;
355 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, mask
);
358 void lio_cn6xxx_disable_io_queues(struct octeon_device
*oct
)
364 /* Reset the Enable bits for Input Queues. */
365 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
);
366 mask
^= oct
->io_qmask
.iq
;
367 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, mask
);
369 /* Wait until hardware indicates that the queues are out of reset. */
370 mask
= (u32
)oct
->io_qmask
.iq
;
371 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_IQ
);
372 while (((d32
& mask
) != mask
) && loop
--) {
373 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_IQ
);
374 schedule_timeout_uninterruptible(1);
377 /* Reset the doorbell register for each Input queue. */
378 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
379 if (!(oct
->io_qmask
.iq
& (1ULL << i
)))
381 octeon_write_csr(oct
, CN6XXX_SLI_IQ_DOORBELL(i
), 0xFFFFFFFF);
382 d32
= octeon_read_csr(oct
, CN6XXX_SLI_IQ_DOORBELL(i
));
385 /* Reset the Enable bits for Output Queues. */
386 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
);
387 mask
^= oct
->io_qmask
.oq
;
388 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, mask
);
390 /* Wait until hardware indicates that the queues are out of reset. */
392 mask
= (u32
)oct
->io_qmask
.oq
;
393 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_OQ
);
394 while (((d32
& mask
) != mask
) && loop
--) {
395 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_OQ
);
396 schedule_timeout_uninterruptible(1);
400 /* Reset the doorbell register for each Output queue. */
401 /* for (i = 0; i < oct->num_oqs; i++) { */
402 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
403 if (!(oct
->io_qmask
.oq
& (1ULL << i
)))
405 octeon_write_csr(oct
, CN6XXX_SLI_OQ_PKTS_CREDIT(i
), 0xFFFFFFFF);
406 d32
= octeon_read_csr(oct
, CN6XXX_SLI_OQ_PKTS_CREDIT(i
));
408 d32
= octeon_read_csr(oct
, CN6XXX_SLI_OQ_PKTS_SENT(i
));
409 octeon_write_csr(oct
, CN6XXX_SLI_OQ_PKTS_SENT(i
), d32
);
412 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
);
414 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
, d32
);
416 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
);
418 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
, d32
);
421 void lio_cn6xxx_reinit_regs(struct octeon_device
*oct
)
425 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
426 if (!(oct
->io_qmask
.iq
& (1ULL << i
)))
428 oct
->fn_list
.setup_iq_regs(oct
, i
);
431 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
432 if (!(oct
->io_qmask
.oq
& (1ULL << i
)))
434 oct
->fn_list
.setup_oq_regs(oct
, i
);
437 oct
->fn_list
.setup_device_regs(oct
);
439 oct
->fn_list
.enable_interrupt(oct
->chip
);
441 oct
->fn_list
.enable_io_queues(oct
);
443 /* for (i = 0; i < oct->num_oqs; i++) { */
444 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
445 if (!(oct
->io_qmask
.oq
& (1ULL << i
)))
447 writel(oct
->droq
[i
]->max_count
, oct
->droq
[i
]->pkts_credit_reg
);
452 lio_cn6xxx_bar1_idx_setup(struct octeon_device
*oct
,
460 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
461 lio_pci_writeq(oct
, (bar1
& 0xFFFFFFFEULL
),
462 CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
463 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
467 /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
470 lio_pci_writeq(oct
, (((core_addr
>> 22) << 4) | PCI_BAR1_MASK
),
471 CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
473 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
476 void lio_cn6xxx_bar1_idx_write(struct octeon_device
*oct
,
480 lio_pci_writeq(oct
, mask
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
483 u32
lio_cn6xxx_bar1_idx_read(struct octeon_device
*oct
, u32 idx
)
485 return (u32
)lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
489 lio_cn6xxx_update_read_index(struct octeon_instr_queue
*iq
)
491 u32 new_idx
= readl(iq
->inst_cnt_reg
);
493 /* The new instr cnt reg is a 32-bit counter that can roll over. We have
494 * noted the counter's initial value at init time into
497 if (iq
->reset_instr_cnt
< new_idx
)
498 new_idx
-= iq
->reset_instr_cnt
;
500 new_idx
+= (0xffffffff - iq
->reset_instr_cnt
) + 1;
502 /* Modulo of the new index with the IQ size will give us
505 new_idx
%= iq
->max_count
;
510 void lio_cn6xxx_enable_interrupt(void *chip
)
512 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)chip
;
513 u64 mask
= cn6xxx
->intr_mask64
| CN6XXX_INTR_DMA0_FORCE
;
515 /* Enable Interrupt */
516 writeq(mask
, cn6xxx
->intr_enb_reg64
);
519 void lio_cn6xxx_disable_interrupt(void *chip
)
521 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)chip
;
523 /* Disable Interrupts */
524 writeq(0, cn6xxx
->intr_enb_reg64
);
526 /* make sure interrupts are really disabled */
530 static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device
*oct
)
532 /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
533 * to determine the PCIE port #
535 oct
->pcie_port
= octeon_read_csr(oct
, CN6XXX_SLI_MAC_NUMBER
) & 0xff;
537 dev_dbg(&oct
->pci_dev
->dev
, "Using PCIE Port %d\n", oct
->pcie_port
);
541 lio_cn6xxx_process_pcie_error_intr(struct octeon_device
*oct
, u64 intr64
)
543 dev_err(&oct
->pci_dev
->dev
, "Error Intr: 0x%016llx\n",
547 static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device
*oct
)
549 struct octeon_droq
*droq
;
551 u32 pkt_count
, droq_time_mask
, droq_mask
, droq_int_enb
;
552 u32 droq_cnt_enb
, droq_cnt_mask
;
554 droq_cnt_enb
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
);
555 droq_cnt_mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
);
556 droq_mask
= droq_cnt_mask
& droq_cnt_enb
;
558 droq_time_mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
);
559 droq_int_enb
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
);
560 droq_mask
|= (droq_time_mask
& droq_int_enb
);
562 droq_mask
&= oct
->io_qmask
.oq
;
566 /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
567 for (oq_no
= 0; oq_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
); oq_no
++) {
568 if (!(droq_mask
& (1ULL << oq_no
)))
571 droq
= oct
->droq
[oq_no
];
572 pkt_count
= octeon_droq_check_hw_for_pkts(droq
);
574 oct
->droq_intr
|= (1ULL << oq_no
);
575 if (droq
->ops
.poll_mode
) {
579 struct octeon_cn6xxx
*cn6xxx
=
580 (struct octeon_cn6xxx
*)oct
->chip
;
582 /* disable interrupts for this droq */
584 (&cn6xxx
->lock_for_droq_int_enb_reg
);
585 reg
= CN6XXX_SLI_PKT_TIME_INT_ENB
;
586 value
= octeon_read_csr(oct
, reg
);
587 value
&= ~(1 << oq_no
);
588 octeon_write_csr(oct
, reg
, value
);
589 reg
= CN6XXX_SLI_PKT_CNT_INT_ENB
;
590 value
= octeon_read_csr(oct
, reg
);
591 value
&= ~(1 << oq_no
);
592 octeon_write_csr(oct
, reg
, value
);
594 /* Ensure that the enable register is written.
598 spin_unlock(&cn6xxx
->lock_for_droq_int_enb_reg
);
603 droq_time_mask
&= oct
->io_qmask
.oq
;
604 droq_cnt_mask
&= oct
->io_qmask
.oq
;
606 /* Reset the PKT_CNT/TIME_INT registers. */
608 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
, droq_time_mask
);
610 if (droq_cnt_mask
) /* reset PKT_CNT register:66xx */
611 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
, droq_cnt_mask
);
616 irqreturn_t
lio_cn6xxx_process_interrupt_regs(void *dev
)
618 struct octeon_device
*oct
= (struct octeon_device
*)dev
;
619 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
622 intr64
= readq(cn6xxx
->intr_sum_reg64
);
624 /* If our device has interrupted, then proceed.
625 * Also check for all f's if interrupt was triggered on an error
626 * and the PCI read fails.
628 if (!intr64
|| (intr64
== 0xFFFFFFFFFFFFFFFFULL
))
633 if (intr64
& CN6XXX_INTR_ERR
)
634 lio_cn6xxx_process_pcie_error_intr(oct
, intr64
);
636 if (intr64
& CN6XXX_INTR_PKT_DATA
) {
637 lio_cn6xxx_process_droq_intr_regs(oct
);
638 oct
->int_status
|= OCT_DEV_INTR_PKT_DATA
;
641 if (intr64
& CN6XXX_INTR_DMA0_FORCE
)
642 oct
->int_status
|= OCT_DEV_INTR_DMA0_FORCE
;
644 if (intr64
& CN6XXX_INTR_DMA1_FORCE
)
645 oct
->int_status
|= OCT_DEV_INTR_DMA1_FORCE
;
647 /* Clear the current interrupts */
648 writeq(intr64
, cn6xxx
->intr_sum_reg64
);
653 void lio_cn6xxx_setup_reg_address(struct octeon_device
*oct
,
655 struct octeon_reg_list
*reg_list
)
657 u8 __iomem
*bar0_pciaddr
= oct
->mmio
[0].hw_addr
;
658 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)chip
;
660 reg_list
->pci_win_wr_addr_hi
=
661 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR_HI
);
662 reg_list
->pci_win_wr_addr_lo
=
663 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR_LO
);
664 reg_list
->pci_win_wr_addr
=
665 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR64
);
667 reg_list
->pci_win_rd_addr_hi
=
668 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR_HI
);
669 reg_list
->pci_win_rd_addr_lo
=
670 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR_LO
);
671 reg_list
->pci_win_rd_addr
=
672 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR64
);
674 reg_list
->pci_win_wr_data_hi
=
675 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA_HI
);
676 reg_list
->pci_win_wr_data_lo
=
677 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA_LO
);
678 reg_list
->pci_win_wr_data
=
679 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA64
);
681 reg_list
->pci_win_rd_data_hi
=
682 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA_HI
);
683 reg_list
->pci_win_rd_data_lo
=
684 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA_LO
);
685 reg_list
->pci_win_rd_data
=
686 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA64
);
688 lio_cn6xxx_get_pcie_qlmport(oct
);
690 cn6xxx
->intr_sum_reg64
= bar0_pciaddr
+ CN6XXX_SLI_INT_SUM64
;
691 cn6xxx
->intr_mask64
= CN6XXX_INTR_MASK
;
692 cn6xxx
->intr_enb_reg64
=
693 bar0_pciaddr
+ CN6XXX_SLI_INT_ENB64(oct
->pcie_port
);
696 int lio_setup_cn66xx_octeon_device(struct octeon_device
*oct
)
698 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
700 if (octeon_map_pci_barx(oct
, 0, 0))
703 if (octeon_map_pci_barx(oct
, 1, MAX_BAR1_IOREMAP_SIZE
)) {
704 dev_err(&oct
->pci_dev
->dev
, "%s CN66XX BAR1 map failed\n",
706 octeon_unmap_pci_barx(oct
, 0);
710 spin_lock_init(&cn6xxx
->lock_for_droq_int_enb_reg
);
712 oct
->fn_list
.setup_iq_regs
= lio_cn66xx_setup_iq_regs
;
713 oct
->fn_list
.setup_oq_regs
= lio_cn6xxx_setup_oq_regs
;
715 oct
->fn_list
.soft_reset
= lio_cn6xxx_soft_reset
;
716 oct
->fn_list
.setup_device_regs
= lio_cn6xxx_setup_device_regs
;
717 oct
->fn_list
.reinit_regs
= lio_cn6xxx_reinit_regs
;
718 oct
->fn_list
.update_iq_read_idx
= lio_cn6xxx_update_read_index
;
720 oct
->fn_list
.bar1_idx_setup
= lio_cn6xxx_bar1_idx_setup
;
721 oct
->fn_list
.bar1_idx_write
= lio_cn6xxx_bar1_idx_write
;
722 oct
->fn_list
.bar1_idx_read
= lio_cn6xxx_bar1_idx_read
;
724 oct
->fn_list
.process_interrupt_regs
= lio_cn6xxx_process_interrupt_regs
;
725 oct
->fn_list
.enable_interrupt
= lio_cn6xxx_enable_interrupt
;
726 oct
->fn_list
.disable_interrupt
= lio_cn6xxx_disable_interrupt
;
728 oct
->fn_list
.enable_io_queues
= lio_cn6xxx_enable_io_queues
;
729 oct
->fn_list
.disable_io_queues
= lio_cn6xxx_disable_io_queues
;
731 lio_cn6xxx_setup_reg_address(oct
, oct
->chip
, &oct
->reg_list
);
733 cn6xxx
->conf
= (struct octeon_config
*)
734 oct_get_config_info(oct
, LIO_210SV
);
736 dev_err(&oct
->pci_dev
->dev
, "%s No Config found for CN66XX\n",
738 octeon_unmap_pci_barx(oct
, 0);
739 octeon_unmap_pci_barx(oct
, 1);
743 oct
->coproc_clock_rate
= 1000000ULL * lio_cn6xxx_coprocessor_clock(oct
);
748 int lio_validate_cn6xxx_config_info(struct octeon_device
*oct
,
749 struct octeon_config
*conf6xxx
)
751 /* int total_instrs = 0; */
753 if (CFG_GET_IQ_MAX_Q(conf6xxx
) > CN6XXX_MAX_INPUT_QUEUES
) {
754 dev_err(&oct
->pci_dev
->dev
, "%s: Num IQ (%d) exceeds Max (%d)\n",
755 __func__
, CFG_GET_IQ_MAX_Q(conf6xxx
),
756 CN6XXX_MAX_INPUT_QUEUES
);
760 if (CFG_GET_OQ_MAX_Q(conf6xxx
) > CN6XXX_MAX_OUTPUT_QUEUES
) {
761 dev_err(&oct
->pci_dev
->dev
, "%s: Num OQ (%d) exceeds Max (%d)\n",
762 __func__
, CFG_GET_OQ_MAX_Q(conf6xxx
),
763 CN6XXX_MAX_OUTPUT_QUEUES
);
767 if (CFG_GET_IQ_INSTR_TYPE(conf6xxx
) != OCTEON_32BYTE_INSTR
&&
768 CFG_GET_IQ_INSTR_TYPE(conf6xxx
) != OCTEON_64BYTE_INSTR
) {
769 dev_err(&oct
->pci_dev
->dev
, "%s: Invalid instr type for IQ\n",
773 if (!(CFG_GET_OQ_INFO_PTR(conf6xxx
)) ||
774 !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx
))) {
775 dev_err(&oct
->pci_dev
->dev
, "%s: Invalid parameter for OQ\n",
780 if (!(CFG_GET_OQ_INTR_TIME(conf6xxx
))) {
781 dev_err(&oct
->pci_dev
->dev
, "%s: No Time Interrupt for OQ\n",