Commit | Line | Data |
---|---|---|
f21fb3ed RV |
1 | /********************************************************************** |
2 | * Author: Cavium, Inc. | |
3 | * | |
4 | * Contact: support@cavium.com | |
5 | * Please include "LiquidIO" in the subject. | |
6 | * | |
7 | * Copyright (c) 2003-2015 Cavium, Inc. | |
8 | * | |
9 | * This file is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License, Version 2, as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This file is distributed in the hope that it will be useful, but | |
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty | |
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or | |
16 | * NONINFRINGEMENT. See the GNU General Public License for more | |
17 | * details. | |
18 | * | |
19 | * This file may also be available under a different license from Cavium. | |
20 | * Contact Cavium, Inc. for more information | |
21 | **********************************************************************/ | |
f21fb3ed | 22 | #include <linux/pci.h> |
f21fb3ed | 23 | #include <linux/netdevice.h> |
f21fb3ed RV |
24 | #include "liquidio_common.h" |
25 | #include "octeon_droq.h" | |
26 | #include "octeon_iq.h" | |
27 | #include "response_manager.h" | |
28 | #include "octeon_device.h" | |
f21fb3ed | 29 | #include "octeon_main.h" |
f21fb3ed RV |
30 | #include "cn66xx_regs.h" |
31 | #include "cn66xx_device.h" | |
f21fb3ed RV |
32 | |
33 | int lio_cn6xxx_soft_reset(struct octeon_device *oct) | |
34 | { | |
35 | octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); | |
36 | ||
37 | dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n"); | |
38 | ||
39 | lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST); | |
40 | octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL); | |
41 | ||
42 | lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST); | |
43 | lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST); | |
44 | ||
45 | /* make sure that the reset is written before starting timer */ | |
46 | mmiowb(); | |
47 | ||
48 | /* Wait for 10ms as Octeon resets. */ | |
49 | mdelay(100); | |
50 | ||
51 | if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) { | |
52 | dev_err(&oct->pci_dev->dev, "Soft reset failed\n"); | |
53 | return 1; | |
54 | } | |
55 | ||
56 | dev_dbg(&oct->pci_dev->dev, "Reset completed\n"); | |
57 | octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); | |
58 | ||
59 | return 0; | |
60 | } | |
61 | ||
62 | void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct) | |
63 | { | |
64 | u32 val; | |
65 | ||
66 | pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); | |
1e0d30fe | 67 | if (val & 0x000c0000) { |
f21fb3ed | 68 | dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n", |
1e0d30fe | 69 | val & 0x000c0000); |
f21fb3ed RV |
70 | } |
71 | ||
72 | val |= 0xf; /* Enable Link error reporting */ | |
73 | ||
74 | dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n"); | |
75 | pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); | |
76 | } | |
77 | ||
78 | void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct, | |
79 | enum octeon_pcie_mps mps) | |
80 | { | |
81 | u32 val; | |
82 | u64 r64; | |
83 | ||
84 | /* Read config register for MPS */ | |
85 | pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); | |
86 | ||
87 | if (mps == PCIE_MPS_DEFAULT) { | |
88 | mps = ((val & (0x7 << 5)) >> 5); | |
89 | } else { | |
90 | val &= ~(0x7 << 5); /* Turn off any MPS bits */ | |
91 | val |= (mps << 5); /* Set MPS */ | |
92 | pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); | |
93 | } | |
94 | ||
95 | /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */ | |
96 | r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); | |
97 | r64 |= (mps << 4); | |
98 | lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); | |
99 | } | |
100 | ||
101 | void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct, | |
102 | enum octeon_pcie_mrrs mrrs) | |
103 | { | |
104 | u32 val; | |
105 | u64 r64; | |
106 | ||
107 | /* Read config register for MRRS */ | |
108 | pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); | |
109 | ||
110 | if (mrrs == PCIE_MRRS_DEFAULT) { | |
111 | mrrs = ((val & (0x7 << 12)) >> 12); | |
112 | } else { | |
113 | val &= ~(0x7 << 12); /* Turn off any MRRS bits */ | |
114 | val |= (mrrs << 12); /* Set MRRS */ | |
115 | pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); | |
116 | } | |
117 | ||
118 | /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */ | |
119 | r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port)); | |
120 | r64 |= mrrs; | |
121 | octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64); | |
122 | ||
123 | /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */ | |
124 | r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); | |
125 | r64 |= mrrs; | |
126 | lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); | |
127 | } | |
128 | ||
129 | u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct) | |
130 | { | |
131 | /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier | |
132 | * for SLI. | |
133 | */ | |
134 | return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50; | |
135 | } | |
136 | ||
137 | u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, | |
138 | u32 time_intr_in_us) | |
139 | { | |
140 | /* This gives the SLI clock per microsec */ | |
141 | u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct); | |
142 | ||
143 | /* core clock per us / oq ticks will be fractional. TO avoid that | |
144 | * we use the method below. | |
145 | */ | |
146 | ||
147 | /* This gives the clock cycles per millisecond */ | |
148 | oqticks_per_us *= 1000; | |
149 | ||
150 | /* This gives the oq ticks (1024 core clock cycles) per millisecond */ | |
151 | oqticks_per_us /= 1024; | |
152 | ||
153 | /* time_intr is in microseconds. The next 2 steps gives the oq ticks | |
154 | * corressponding to time_intr. | |
155 | */ | |
156 | oqticks_per_us *= time_intr_in_us; | |
157 | oqticks_per_us /= 1000; | |
158 | ||
159 | return oqticks_per_us; | |
160 | } | |
161 | ||
162 | void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct) | |
163 | { | |
164 | /* Select Round-Robin Arb, ES, RO, NS for Input Queues */ | |
165 | octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL, | |
166 | CN6XXX_INPUT_CTL_MASK); | |
167 | ||
168 | /* Instruction Read Size - Max 4 instructions per PCIE Read */ | |
169 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE, | |
170 | 0xFFFFFFFFFFFFFFFFULL); | |
171 | ||
172 | /* Select PCIE Port for all Input rings. */ | |
173 | octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT, | |
174 | (oct->pcie_port * 0x5555555555555555ULL)); | |
175 | } | |
176 | ||
177 | static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct) | |
178 | { | |
179 | u64 pktctl; | |
180 | ||
181 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; | |
182 | ||
183 | pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL); | |
184 | ||
185 | /* 66XX SPECIFIC */ | |
186 | if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4) | |
187 | /* Disable RING_EN if only upto 4 rings are used. */ | |
188 | pktctl &= ~(1 << 4); | |
189 | else | |
190 | pktctl |= (1 << 4); | |
191 | ||
192 | if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) | |
193 | pktctl |= 0xF; | |
194 | else | |
195 | /* Disable per-port backpressure. */ | |
196 | pktctl &= ~0xF; | |
197 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl); | |
198 | } | |
199 | ||
200 | void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct) | |
201 | { | |
202 | u32 time_threshold; | |
203 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; | |
204 | ||
205 | /* / Select PCI-E Port for all Output queues */ | |
206 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64, | |
207 | (oct->pcie_port * 0x5555555555555555ULL)); | |
208 | ||
209 | if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) { | |
210 | octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32); | |
211 | } else { | |
212 | /* / Set Output queue watermark to 0 to disable backpressure */ | |
213 | octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0); | |
214 | } | |
215 | ||
216 | /* / Select Info Ptr for length & data */ | |
217 | octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF); | |
218 | ||
219 | /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */ | |
220 | octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0); | |
221 | ||
a2c64b67 | 222 | /* Select ES, RO, NS setting from register for Output Queue Packet |
f21fb3ed RV |
223 | * Address |
224 | */ | |
225 | octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF); | |
226 | ||
227 | /* No Relaxed Ordering, No Snoop, 64-bit swap for Output | |
228 | * Queue ScatterList | |
229 | */ | |
230 | octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0); | |
231 | octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0); | |
232 | ||
233 | /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */ | |
234 | #ifdef __BIG_ENDIAN_BITFIELD | |
235 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, | |
236 | 0x5555555555555555ULL); | |
237 | #else | |
238 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL); | |
239 | #endif | |
240 | ||
241 | /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */ | |
242 | octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0); | |
243 | octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0); | |
244 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64, | |
245 | 0x5555555555555555ULL); | |
246 | ||
247 | /* / Set up interrupt packet and time threshold */ | |
248 | octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, | |
249 | (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf)); | |
250 | time_threshold = | |
251 | lio_cn6xxx_get_oq_ticks(oct, (u32) | |
252 | CFG_GET_OQ_INTR_TIME(cn6xxx->conf)); | |
253 | ||
254 | octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold); | |
255 | } | |
256 | ||
257 | static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct) | |
258 | { | |
259 | lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT); | |
260 | lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B); | |
261 | lio_cn6xxx_enable_error_reporting(oct); | |
262 | ||
263 | lio_cn6xxx_setup_global_input_regs(oct); | |
264 | lio_cn66xx_setup_pkt_ctl_regs(oct); | |
265 | lio_cn6xxx_setup_global_output_regs(oct); | |
266 | ||
267 | /* Default error timeout value should be 0x200000 to avoid host hang | |
268 | * when reads invalid register | |
269 | */ | |
270 | octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL); | |
271 | return 0; | |
272 | } | |
273 | ||
274 | void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) | |
275 | { | |
276 | struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; | |
277 | ||
278 | /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */ | |
279 | octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0); | |
280 | ||
281 | /* Write the start of the input queue's ring and its size */ | |
282 | octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no), | |
283 | iq->base_addr_dma); | |
284 | octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count); | |
285 | ||
286 | /* Remember the doorbell & instruction count register addr for this | |
287 | * queue | |
288 | */ | |
289 | iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no); | |
290 | iq->inst_cnt_reg = oct->mmio[0].hw_addr | |
291 | + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no); | |
292 | dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n", | |
293 | iq_no, iq->doorbell_reg, iq->inst_cnt_reg); | |
294 | ||
295 | /* Store the current instruction counter | |
296 | * (used in flush_iq calculation) | |
297 | */ | |
298 | iq->reset_instr_cnt = readl(iq->inst_cnt_reg); | |
299 | } | |
300 | ||
301 | static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) | |
302 | { | |
303 | lio_cn6xxx_setup_iq_regs(oct, iq_no); | |
304 | ||
305 | /* Backpressure for this queue - WMARK set to all F's. This effectively | |
306 | * disables the backpressure mechanism. | |
307 | */ | |
308 | octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no), | |
309 | (0xFFFFFFFFULL << 32)); | |
310 | } | |
311 | ||
312 | void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no) | |
313 | { | |
314 | u32 intr; | |
315 | struct octeon_droq *droq = oct->droq[oq_no]; | |
316 | ||
317 | octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no), | |
318 | droq->desc_ring_dma); | |
319 | octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count); | |
320 | ||
321 | octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no), | |
322 | (droq->buffer_size | (OCT_RH_SIZE << 16))); | |
323 | ||
324 | /* Get the mapped address of the pkt_sent and pkts_credit regs */ | |
325 | droq->pkts_sent_reg = | |
326 | oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no); | |
327 | droq->pkts_credit_reg = | |
328 | oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no); | |
329 | ||
330 | /* Enable this output queue to generate Packet Timer Interrupt */ | |
331 | intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); | |
332 | intr |= (1 << oq_no); | |
333 | octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr); | |
334 | ||
335 | /* Enable this output queue to generate Packet Timer Interrupt */ | |
336 | intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); | |
337 | intr |= (1 << oq_no); | |
338 | octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr); | |
339 | } | |
340 | ||
341 | void lio_cn6xxx_enable_io_queues(struct octeon_device *oct) | |
342 | { | |
343 | u32 mask; | |
344 | ||
345 | mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE); | |
346 | mask |= oct->io_qmask.iq64B; | |
347 | octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask); | |
348 | ||
349 | mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB); | |
350 | mask |= oct->io_qmask.iq; | |
351 | octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); | |
352 | ||
353 | mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); | |
354 | mask |= oct->io_qmask.oq; | |
355 | octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask); | |
356 | } | |
357 | ||
358 | void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) | |
359 | { | |
63da8404 RV |
360 | int i; |
361 | u32 mask, loop = HZ; | |
f21fb3ed RV |
362 | u32 d32; |
363 | ||
364 | /* Reset the Enable bits for Input Queues. */ | |
365 | mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB); | |
366 | mask ^= oct->io_qmask.iq; | |
367 | octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); | |
368 | ||
369 | /* Wait until hardware indicates that the queues are out of reset. */ | |
63da8404 | 370 | mask = (u32)oct->io_qmask.iq; |
f21fb3ed RV |
371 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); |
372 | while (((d32 & mask) != mask) && loop--) { | |
373 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); | |
374 | schedule_timeout_uninterruptible(1); | |
375 | } | |
376 | ||
377 | /* Reset the doorbell register for each Input queue. */ | |
63da8404 RV |
378 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
379 | if (!(oct->io_qmask.iq & (1ULL << i))) | |
f21fb3ed RV |
380 | continue; |
381 | octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); | |
382 | d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); | |
383 | } | |
384 | ||
385 | /* Reset the Enable bits for Output Queues. */ | |
386 | mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); | |
387 | mask ^= oct->io_qmask.oq; | |
388 | octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask); | |
389 | ||
390 | /* Wait until hardware indicates that the queues are out of reset. */ | |
391 | loop = HZ; | |
63da8404 | 392 | mask = (u32)oct->io_qmask.oq; |
f21fb3ed RV |
393 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); |
394 | while (((d32 & mask) != mask) && loop--) { | |
395 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); | |
396 | schedule_timeout_uninterruptible(1); | |
397 | } | |
398 | ; | |
399 | ||
400 | /* Reset the doorbell register for each Output queue. */ | |
401 | /* for (i = 0; i < oct->num_oqs; i++) { */ | |
63da8404 RV |
402 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
403 | if (!(oct->io_qmask.oq & (1ULL << i))) | |
f21fb3ed RV |
404 | continue; |
405 | octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); | |
406 | d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); | |
407 | ||
408 | d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i)); | |
409 | octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32); | |
410 | } | |
411 | ||
412 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT); | |
413 | if (d32) | |
414 | octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32); | |
415 | ||
416 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT); | |
417 | if (d32) | |
418 | octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32); | |
419 | } | |
420 | ||
421 | void lio_cn6xxx_reinit_regs(struct octeon_device *oct) | |
422 | { | |
63da8404 | 423 | int i; |
f21fb3ed | 424 | |
63da8404 RV |
425 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
426 | if (!(oct->io_qmask.iq & (1ULL << i))) | |
f21fb3ed RV |
427 | continue; |
428 | oct->fn_list.setup_iq_regs(oct, i); | |
429 | } | |
430 | ||
63da8404 RV |
431 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
432 | if (!(oct->io_qmask.oq & (1ULL << i))) | |
f21fb3ed RV |
433 | continue; |
434 | oct->fn_list.setup_oq_regs(oct, i); | |
435 | } | |
436 | ||
437 | oct->fn_list.setup_device_regs(oct); | |
438 | ||
439 | oct->fn_list.enable_interrupt(oct->chip); | |
440 | ||
441 | oct->fn_list.enable_io_queues(oct); | |
442 | ||
443 | /* for (i = 0; i < oct->num_oqs; i++) { */ | |
63da8404 RV |
444 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
445 | if (!(oct->io_qmask.oq & (1ULL << i))) | |
f21fb3ed RV |
446 | continue; |
447 | writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg); | |
448 | } | |
449 | } | |
450 | ||
451 | void | |
452 | lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, | |
453 | u64 core_addr, | |
454 | u32 idx, | |
455 | int valid) | |
456 | { | |
457 | u64 bar1; | |
458 | ||
459 | if (valid == 0) { | |
460 | bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); | |
461 | lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL), | |
462 | CN6XXX_BAR1_REG(idx, oct->pcie_port)); | |
463 | bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); | |
464 | return; | |
465 | } | |
466 | ||
467 | /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of | |
468 | * the Core Addr | |
469 | */ | |
470 | lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK), | |
471 | CN6XXX_BAR1_REG(idx, oct->pcie_port)); | |
472 | ||
473 | bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); | |
474 | } | |
475 | ||
476 | void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, | |
477 | u32 idx, | |
478 | u32 mask) | |
479 | { | |
480 | lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port)); | |
481 | } | |
482 | ||
483 | u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx) | |
484 | { | |
485 | return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); | |
486 | } | |
487 | ||
488 | u32 | |
9a96bde4 | 489 | lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq) |
f21fb3ed RV |
490 | { |
491 | u32 new_idx = readl(iq->inst_cnt_reg); | |
492 | ||
493 | /* The new instr cnt reg is a 32-bit counter that can roll over. We have | |
494 | * noted the counter's initial value at init time into | |
495 | * reset_instr_cnt | |
496 | */ | |
497 | if (iq->reset_instr_cnt < new_idx) | |
498 | new_idx -= iq->reset_instr_cnt; | |
499 | else | |
500 | new_idx += (0xffffffff - iq->reset_instr_cnt) + 1; | |
501 | ||
502 | /* Modulo of the new index with the IQ size will give us | |
503 | * the new index. | |
504 | */ | |
505 | new_idx %= iq->max_count; | |
506 | ||
507 | return new_idx; | |
508 | } | |
509 | ||
510 | void lio_cn6xxx_enable_interrupt(void *chip) | |
511 | { | |
512 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; | |
513 | u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE; | |
514 | ||
515 | /* Enable Interrupt */ | |
516 | writeq(mask, cn6xxx->intr_enb_reg64); | |
517 | } | |
518 | ||
519 | void lio_cn6xxx_disable_interrupt(void *chip) | |
520 | { | |
521 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; | |
522 | ||
523 | /* Disable Interrupts */ | |
524 | writeq(0, cn6xxx->intr_enb_reg64); | |
525 | ||
526 | /* make sure interrupts are really disabled */ | |
527 | mmiowb(); | |
528 | } | |
529 | ||
5b173cf9 | 530 | static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct) |
f21fb3ed RV |
531 | { |
532 | /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register | |
533 | * to determine the PCIE port # | |
534 | */ | |
535 | oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff; | |
536 | ||
537 | dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port); | |
538 | } | |
539 | ||
a7d5a3dc | 540 | static void |
f21fb3ed RV |
541 | lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64) |
542 | { | |
543 | dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n", | |
544 | CVM_CAST64(intr64)); | |
545 | } | |
546 | ||
a7d5a3dc | 547 | static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) |
f21fb3ed RV |
548 | { |
549 | struct octeon_droq *droq; | |
63da8404 RV |
550 | int oq_no; |
551 | u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb; | |
f21fb3ed RV |
552 | u32 droq_cnt_enb, droq_cnt_mask; |
553 | ||
554 | droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); | |
555 | droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT); | |
556 | droq_mask = droq_cnt_mask & droq_cnt_enb; | |
557 | ||
558 | droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT); | |
559 | droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); | |
560 | droq_mask |= (droq_time_mask & droq_int_enb); | |
561 | ||
562 | droq_mask &= oct->io_qmask.oq; | |
563 | ||
564 | oct->droq_intr = 0; | |
565 | ||
566 | /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */ | |
63da8404 RV |
567 | for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) { |
568 | if (!(droq_mask & (1ULL << oq_no))) | |
f21fb3ed RV |
569 | continue; |
570 | ||
571 | droq = oct->droq[oq_no]; | |
a7d5a3dc | 572 | pkt_count = octeon_droq_check_hw_for_pkts(droq); |
f21fb3ed RV |
573 | if (pkt_count) { |
574 | oct->droq_intr |= (1ULL << oq_no); | |
575 | if (droq->ops.poll_mode) { | |
576 | u32 value; | |
577 | u32 reg; | |
578 | ||
579 | struct octeon_cn6xxx *cn6xxx = | |
580 | (struct octeon_cn6xxx *)oct->chip; | |
581 | ||
582 | /* disable interrupts for this droq */ | |
583 | spin_lock | |
584 | (&cn6xxx->lock_for_droq_int_enb_reg); | |
585 | reg = CN6XXX_SLI_PKT_TIME_INT_ENB; | |
586 | value = octeon_read_csr(oct, reg); | |
587 | value &= ~(1 << oq_no); | |
588 | octeon_write_csr(oct, reg, value); | |
589 | reg = CN6XXX_SLI_PKT_CNT_INT_ENB; | |
590 | value = octeon_read_csr(oct, reg); | |
591 | value &= ~(1 << oq_no); | |
592 | octeon_write_csr(oct, reg, value); | |
593 | ||
594 | /* Ensure that the enable register is written. | |
595 | */ | |
596 | mmiowb(); | |
597 | ||
598 | spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg); | |
599 | } | |
600 | } | |
601 | } | |
602 | ||
603 | droq_time_mask &= oct->io_qmask.oq; | |
604 | droq_cnt_mask &= oct->io_qmask.oq; | |
605 | ||
606 | /* Reset the PKT_CNT/TIME_INT registers. */ | |
607 | if (droq_time_mask) | |
608 | octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask); | |
609 | ||
610 | if (droq_cnt_mask) /* reset PKT_CNT register:66xx */ | |
611 | octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask); | |
612 | ||
613 | return 0; | |
614 | } | |
615 | ||
616 | irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev) | |
617 | { | |
618 | struct octeon_device *oct = (struct octeon_device *)dev; | |
619 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; | |
620 | u64 intr64; | |
621 | ||
622 | intr64 = readq(cn6xxx->intr_sum_reg64); | |
623 | ||
624 | /* If our device has interrupted, then proceed. | |
625 | * Also check for all f's if interrupt was triggered on an error | |
626 | * and the PCI read fails. | |
627 | */ | |
628 | if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL)) | |
629 | return IRQ_NONE; | |
630 | ||
631 | oct->int_status = 0; | |
632 | ||
633 | if (intr64 & CN6XXX_INTR_ERR) | |
634 | lio_cn6xxx_process_pcie_error_intr(oct, intr64); | |
635 | ||
636 | if (intr64 & CN6XXX_INTR_PKT_DATA) { | |
637 | lio_cn6xxx_process_droq_intr_regs(oct); | |
638 | oct->int_status |= OCT_DEV_INTR_PKT_DATA; | |
639 | } | |
640 | ||
641 | if (intr64 & CN6XXX_INTR_DMA0_FORCE) | |
642 | oct->int_status |= OCT_DEV_INTR_DMA0_FORCE; | |
643 | ||
644 | if (intr64 & CN6XXX_INTR_DMA1_FORCE) | |
645 | oct->int_status |= OCT_DEV_INTR_DMA1_FORCE; | |
646 | ||
647 | /* Clear the current interrupts */ | |
648 | writeq(intr64, cn6xxx->intr_sum_reg64); | |
649 | ||
650 | return IRQ_HANDLED; | |
651 | } | |
652 | ||
653 | void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, | |
654 | void *chip, | |
655 | struct octeon_reg_list *reg_list) | |
656 | { | |
657 | u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; | |
658 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; | |
659 | ||
660 | reg_list->pci_win_wr_addr_hi = | |
661 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI); | |
662 | reg_list->pci_win_wr_addr_lo = | |
663 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO); | |
664 | reg_list->pci_win_wr_addr = | |
665 | (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64); | |
666 | ||
667 | reg_list->pci_win_rd_addr_hi = | |
668 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI); | |
669 | reg_list->pci_win_rd_addr_lo = | |
670 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO); | |
671 | reg_list->pci_win_rd_addr = | |
672 | (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64); | |
673 | ||
674 | reg_list->pci_win_wr_data_hi = | |
675 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI); | |
676 | reg_list->pci_win_wr_data_lo = | |
677 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO); | |
678 | reg_list->pci_win_wr_data = | |
679 | (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64); | |
680 | ||
681 | reg_list->pci_win_rd_data_hi = | |
682 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI); | |
683 | reg_list->pci_win_rd_data_lo = | |
684 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO); | |
685 | reg_list->pci_win_rd_data = | |
686 | (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64); | |
687 | ||
688 | lio_cn6xxx_get_pcie_qlmport(oct); | |
689 | ||
690 | cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64; | |
691 | cn6xxx->intr_mask64 = CN6XXX_INTR_MASK; | |
692 | cn6xxx->intr_enb_reg64 = | |
693 | bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port); | |
694 | } | |
695 | ||
696 | int lio_setup_cn66xx_octeon_device(struct octeon_device *oct) | |
697 | { | |
698 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; | |
699 | ||
700 | if (octeon_map_pci_barx(oct, 0, 0)) | |
701 | return 1; | |
702 | ||
703 | if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { | |
704 | dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n", | |
705 | __func__); | |
706 | octeon_unmap_pci_barx(oct, 0); | |
707 | return 1; | |
708 | } | |
709 | ||
710 | spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg); | |
711 | ||
712 | oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs; | |
713 | oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs; | |
714 | ||
715 | oct->fn_list.soft_reset = lio_cn6xxx_soft_reset; | |
716 | oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs; | |
717 | oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs; | |
718 | oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index; | |
719 | ||
720 | oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup; | |
721 | oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write; | |
722 | oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read; | |
723 | ||
724 | oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs; | |
725 | oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt; | |
726 | oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt; | |
727 | ||
728 | oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues; | |
729 | oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues; | |
730 | ||
731 | lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); | |
732 | ||
733 | cn6xxx->conf = (struct octeon_config *) | |
734 | oct_get_config_info(oct, LIO_210SV); | |
735 | if (!cn6xxx->conf) { | |
736 | dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n", | |
737 | __func__); | |
738 | octeon_unmap_pci_barx(oct, 0); | |
739 | octeon_unmap_pci_barx(oct, 1); | |
740 | return 1; | |
741 | } | |
742 | ||
743 | oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct); | |
744 | ||
745 | return 0; | |
746 | } | |
747 | ||
748 | int lio_validate_cn6xxx_config_info(struct octeon_device *oct, | |
749 | struct octeon_config *conf6xxx) | |
750 | { | |
751 | /* int total_instrs = 0; */ | |
752 | ||
753 | if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) { | |
754 | dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n", | |
755 | __func__, CFG_GET_IQ_MAX_Q(conf6xxx), | |
756 | CN6XXX_MAX_INPUT_QUEUES); | |
757 | return 1; | |
758 | } | |
759 | ||
760 | if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) { | |
761 | dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n", | |
762 | __func__, CFG_GET_OQ_MAX_Q(conf6xxx), | |
763 | CN6XXX_MAX_OUTPUT_QUEUES); | |
764 | return 1; | |
765 | } | |
766 | ||
767 | if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR && | |
768 | CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) { | |
769 | dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n", | |
770 | __func__); | |
771 | return 1; | |
772 | } | |
773 | if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) || | |
774 | !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) { | |
775 | dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", | |
776 | __func__); | |
777 | return 1; | |
778 | } | |
779 | ||
780 | if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) { | |
781 | dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n", | |
782 | __func__); | |
783 | return 1; | |
784 | } | |
785 | ||
786 | return 0; | |
787 | } |