Commit | Line | Data |
---|---|---|
67620987 AK |
1 | /* |
2 | * This file is based on code from OCTEON SDK by Cavium Networks. | |
80ff0fd3 | 3 | * |
3368c784 | 4 | * Copyright (c) 2003-2010 Cavium Networks |
80ff0fd3 DD |
5 | * |
6 | * This file is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License, Version 2, as | |
8 | * published by the Free Software Foundation. | |
67620987 AK |
9 | */ |
10 | ||
80ff0fd3 DD |
11 | #include <linux/module.h> |
12 | #include <linux/kernel.h> | |
13 | #include <linux/cache.h> | |
3368c784 | 14 | #include <linux/cpumask.h> |
80ff0fd3 | 15 | #include <linux/netdevice.h> |
80ff0fd3 DD |
16 | #include <linux/etherdevice.h> |
17 | #include <linux/ip.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/prefetch.h> | |
7a2eaf93 | 20 | #include <linux/ratelimit.h> |
3368c784 | 21 | #include <linux/smp.h> |
dc890df0 | 22 | #include <linux/interrupt.h> |
80ff0fd3 DD |
23 | #include <net/dst.h> |
24 | #ifdef CONFIG_XFRM | |
25 | #include <linux/xfrm.h> | |
26 | #include <net/xfrm.h> | |
27 | #endif /* CONFIG_XFRM */ | |
28 | ||
80ff0fd3 DD |
29 | #include <asm/octeon/octeon.h> |
30 | ||
31 | #include "ethernet-defines.h" | |
80ff0fd3 | 32 | #include "ethernet-mem.h" |
3368c784 DD |
33 | #include "ethernet-rx.h" |
34 | #include "octeon-ethernet.h" | |
80ff0fd3 DD |
35 | #include "ethernet-util.h" |
36 | ||
af866496 DD |
37 | #include <asm/octeon/cvmx-helper.h> |
38 | #include <asm/octeon/cvmx-wqe.h> | |
39 | #include <asm/octeon/cvmx-fau.h> | |
40 | #include <asm/octeon/cvmx-pow.h> | |
41 | #include <asm/octeon/cvmx-pip.h> | |
42 | #include <asm/octeon/cvmx-scratch.h> | |
80ff0fd3 | 43 | |
af866496 | 44 | #include <asm/octeon/cvmx-gmxx-defs.h> |
80ff0fd3 | 45 | |
d48f10fc AK |
46 | static atomic_t oct_rx_ready = ATOMIC_INIT(0); |
47 | ||
785e9b7d | 48 | static struct oct_rx_group { |
9382cfe1 | 49 | int irq; |
942bab48 | 50 | int group; |
785e9b7d | 51 | struct napi_struct napi; |
e971a119 | 52 | } oct_rx_group[16]; |
80ff0fd3 | 53 | |
80ff0fd3 | 54 | /** |
ec977c5b | 55 | * cvm_oct_do_interrupt - interrupt handler. |
513ff863 | 56 | * @irq: Interrupt number. |
08712f9d | 57 | * @napi_id: Cookie to identify the NAPI instance. |
ec977c5b DD |
58 | * |
59 | * The interrupt occurs whenever the POW has packets in our group. | |
80ff0fd3 | 60 | * |
80ff0fd3 | 61 | */ |
08712f9d | 62 | static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id) |
80ff0fd3 | 63 | { |
3368c784 | 64 | /* Disable the IRQ and start napi_poll. */ |
513ff863 | 65 | disable_irq_nosync(irq); |
08712f9d | 66 | napi_schedule(napi_id); |
3368c784 DD |
67 | |
68 | return IRQ_HANDLED; | |
80ff0fd3 | 69 | } |
80ff0fd3 DD |
70 | |
71 | /** | |
ec977c5b | 72 | * cvm_oct_check_rcv_error - process receive errors |
80ff0fd3 | 73 | * @work: Work queue entry pointing to the packet. |
ec977c5b | 74 | * |
80ff0fd3 DD |
75 | * Returns Non-zero if the packet can be dropped, zero otherwise. |
76 | */ | |
77 | static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) | |
78 | { | |
f8023da8 JH |
79 | int port; |
80 | ||
81 | if (octeon_has_feature(OCTEON_FEATURE_PKND)) | |
82 | port = work->word0.pip.cn68xx.pknd; | |
83 | else | |
84 | port = work->word1.cn38xx.ipprt; | |
85 | ||
86 | if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) { | |
80ff0fd3 DD |
87 | /* |
88 | * Ignore length errors on min size packets. Some | |
89 | * equipment incorrectly pads packets to 64+4FCS | |
90 | * instead of 60+4FCS. Note these packets still get | |
91 | * counted as frame errors. | |
92 | */ | |
25efe08e AK |
93 | } else if (work->word2.snoip.err_code == 5 || |
94 | work->word2.snoip.err_code == 7) { | |
80ff0fd3 DD |
95 | /* |
96 | * We received a packet with either an alignment error | |
97 | * or a FCS error. This may be signalling that we are | |
215c47c9 | 98 | * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK] |
80ff0fd3 DD |
99 | * off. If this is the case we need to parse the |
100 | * packet to determine if we can remove a non spec | |
101 | * preamble and generate a correct packet. | |
102 | */ | |
f8023da8 JH |
103 | int interface = cvmx_helper_get_interface_num(port); |
104 | int index = cvmx_helper_get_interface_index_num(port); | |
80ff0fd3 | 105 | union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; |
85fdebc3 | 106 | |
80ff0fd3 DD |
107 | gmxx_rxx_frm_ctl.u64 = |
108 | cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); | |
109 | if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { | |
ec2c398e | 110 | u8 *ptr = |
80ff0fd3 DD |
111 | cvmx_phys_to_ptr(work->packet_ptr.s.addr); |
112 | int i = 0; | |
113 | ||
f8023da8 | 114 | while (i < work->word1.len - 1) { |
80ff0fd3 DD |
115 | if (*ptr != 0x55) |
116 | break; | |
117 | ptr++; | |
118 | i++; | |
119 | } | |
120 | ||
121 | if (*ptr == 0xd5) { | |
b4ede792 | 122 | /* Port received 0xd5 preamble */ |
80ff0fd3 | 123 | work->packet_ptr.s.addr += i + 1; |
f8023da8 | 124 | work->word1.len -= i + 5; |
80ff0fd3 | 125 | } else if ((*ptr & 0xf) == 0xd) { |
b4ede792 | 126 | /* Port received 0xd preamble */ |
80ff0fd3 | 127 | work->packet_ptr.s.addr += i; |
f8023da8 JH |
128 | work->word1.len -= i + 4; |
129 | for (i = 0; i < work->word1.len; i++) { | |
80ff0fd3 DD |
130 | *ptr = |
131 | ((*ptr & 0xf0) >> 4) | | |
132 | ((*(ptr + 1) & 0xf) << 4); | |
133 | ptr++; | |
134 | } | |
135 | } else { | |
61e15f01 | 136 | printk_ratelimited("Port %d unknown preamble, packet dropped\n", |
f8023da8 | 137 | port); |
80ff0fd3 DD |
138 | cvm_oct_free_work(work); |
139 | return 1; | |
140 | } | |
141 | } | |
142 | } else { | |
7a2eaf93 | 143 | printk_ratelimited("Port %d receive error code %d, packet dropped\n", |
f8023da8 | 144 | port, work->word2.snoip.err_code); |
80ff0fd3 DD |
145 | cvm_oct_free_work(work); |
146 | return 1; | |
147 | } | |
148 | ||
149 | return 0; | |
150 | } | |
151 | ||
942bab48 | 152 | static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) |
80ff0fd3 | 153 | { |
3368c784 | 154 | const int coreid = cvmx_get_core_num(); |
ec2c398e AO |
155 | u64 old_group_mask; |
156 | u64 old_scratch; | |
3368c784 DD |
157 | int rx_count = 0; |
158 | int did_work_request = 0; | |
159 | int packet_not_copied; | |
80ff0fd3 DD |
160 | |
161 | /* Prefetch cvm_oct_device since we know we need it soon */ | |
162 | prefetch(cvm_oct_device); | |
163 | ||
164 | if (USE_ASYNC_IOBDMA) { | |
165 | /* Save scratch in case userspace is using it */ | |
166 | CVMX_SYNCIOBDMA; | |
167 | old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); | |
168 | } | |
169 | ||
170 | /* Only allow work for our group (and preserve priorities) */ | |
f5cfc8db AK |
171 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { |
172 | old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); | |
173 | cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), | |
942bab48 | 174 | BIT(rx_group->group)); |
f5cfc8db AK |
175 | cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */ |
176 | } else { | |
177 | old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); | |
178 | cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), | |
ac05a587 | 179 | (old_group_mask & ~0xFFFFull) | |
942bab48 | 180 | BIT(rx_group->group)); |
f5cfc8db | 181 | } |
80ff0fd3 | 182 | |
3368c784 | 183 | if (USE_ASYNC_IOBDMA) { |
80ff0fd3 | 184 | cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); |
3368c784 DD |
185 | did_work_request = 1; |
186 | } | |
80ff0fd3 | 187 | |
3368c784 | 188 | while (rx_count < budget) { |
80ff0fd3 | 189 | struct sk_buff *skb = NULL; |
3368c784 | 190 | struct sk_buff **pskb = NULL; |
80ff0fd3 DD |
191 | int skb_in_hw; |
192 | cvmx_wqe_t *work; | |
f8023da8 | 193 | int port; |
80ff0fd3 | 194 | |
3368c784 | 195 | if (USE_ASYNC_IOBDMA && did_work_request) |
80ff0fd3 | 196 | work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); |
3368c784 DD |
197 | else |
198 | work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); | |
199 | ||
80ff0fd3 | 200 | prefetch(work); |
3368c784 | 201 | did_work_request = 0; |
e8a4e572 | 202 | if (!work) { |
bcbb1396 AK |
203 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { |
204 | cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS, | |
942bab48 | 205 | BIT(rx_group->group)); |
bcbb1396 | 206 | cvmx_write_csr(CVMX_SSO_WQ_INT, |
942bab48 | 207 | BIT(rx_group->group)); |
bcbb1396 AK |
208 | } else { |
209 | union cvmx_pow_wq_int wq_int; | |
85fdebc3 | 210 | |
bcbb1396 | 211 | wq_int.u64 = 0; |
942bab48 AK |
212 | wq_int.s.iq_dis = BIT(rx_group->group); |
213 | wq_int.s.wq_int = BIT(rx_group->group); | |
bcbb1396 AK |
214 | cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); |
215 | } | |
80ff0fd3 | 216 | break; |
3368c784 | 217 | } |
18f6970b LGL |
218 | pskb = (struct sk_buff **) |
219 | (cvm_oct_get_buffer_ptr(work->packet_ptr) - | |
f884625f | 220 | sizeof(void *)); |
3368c784 | 221 | prefetch(pskb); |
80ff0fd3 | 222 | |
3368c784 | 223 | if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { |
f884625f LB |
224 | cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, |
225 | CVMX_POW_NO_WAIT); | |
3368c784 DD |
226 | did_work_request = 1; |
227 | } | |
da029d0c | 228 | rx_count++; |
80ff0fd3 | 229 | |
3a990f39 | 230 | skb_in_hw = work->word2.s.bufs == 1; |
80ff0fd3 | 231 | if (likely(skb_in_hw)) { |
3368c784 | 232 | skb = *pskb; |
80ff0fd3 DD |
233 | prefetch(&skb->head); |
234 | prefetch(&skb->len); | |
235 | } | |
f8023da8 JH |
236 | |
237 | if (octeon_has_feature(OCTEON_FEATURE_PKND)) | |
238 | port = work->word0.pip.cn68xx.pknd; | |
239 | else | |
240 | port = work->word1.cn38xx.ipprt; | |
241 | ||
242 | prefetch(cvm_oct_device[port]); | |
80ff0fd3 | 243 | |
80ff0fd3 DD |
244 | /* Immediately throw away all packets with receive errors */ |
245 | if (unlikely(work->word2.snoip.rcv_error)) { | |
246 | if (cvm_oct_check_rcv_error(work)) | |
247 | continue; | |
248 | } | |
249 | ||
250 | /* | |
251 | * We can only use the zero copy path if skbuffs are | |
252 | * in the FPA pool and the packet fits in a single | |
253 | * buffer. | |
254 | */ | |
255 | if (likely(skb_in_hw)) { | |
f884625f LB |
256 | skb->data = skb->head + work->packet_ptr.s.addr - |
257 | cvmx_ptr_to_phys(skb->head); | |
80ff0fd3 | 258 | prefetch(skb->data); |
f8023da8 | 259 | skb->len = work->word1.len; |
80ff0fd3 DD |
260 | skb_set_tail_pointer(skb, skb->len); |
261 | packet_not_copied = 1; | |
262 | } else { | |
80ff0fd3 DD |
263 | /* |
264 | * We have to copy the packet. First allocate | |
265 | * an skbuff for it. | |
266 | */ | |
f8023da8 | 267 | skb = dev_alloc_skb(work->word1.len); |
80ff0fd3 | 268 | if (!skb) { |
80ff0fd3 DD |
269 | cvm_oct_free_work(work); |
270 | continue; | |
271 | } | |
272 | ||
273 | /* | |
274 | * Check if we've received a packet that was | |
6568a234 | 275 | * entirely stored in the work entry. |
80ff0fd3 DD |
276 | */ |
277 | if (unlikely(work->word2.s.bufs == 0)) { | |
ec2c398e | 278 | u8 *ptr = work->packet_data; |
80ff0fd3 DD |
279 | |
280 | if (likely(!work->word2.s.not_IP)) { | |
281 | /* | |
282 | * The beginning of the packet | |
283 | * moves for IP packets. | |
284 | */ | |
285 | if (work->word2.s.is_v6) | |
286 | ptr += 2; | |
287 | else | |
288 | ptr += 6; | |
289 | } | |
f8023da8 JH |
290 | memcpy(skb_put(skb, work->word1.len), ptr, |
291 | work->word1.len); | |
80ff0fd3 DD |
292 | /* No packet buffers to free */ |
293 | } else { | |
294 | int segments = work->word2.s.bufs; | |
f884625f LB |
295 | union cvmx_buf_ptr segment_ptr = |
296 | work->packet_ptr; | |
f8023da8 | 297 | int len = work->word1.len; |
80ff0fd3 DD |
298 | |
299 | while (segments--) { | |
300 | union cvmx_buf_ptr next_ptr = | |
18f6970b LGL |
301 | *(union cvmx_buf_ptr *) |
302 | cvmx_phys_to_ptr( | |
303 | segment_ptr.s.addr - 8); | |
6568a234 | 304 | |
80ff0fd3 DD |
305 | /* |
306 | * Octeon Errata PKI-100: The segment size is | |
307 | * wrong. Until it is fixed, calculate the | |
308 | * segment size based on the packet pool | |
309 | * buffer size. When it is fixed, the | |
310 | * following line should be replaced with this | |
311 | * one: int segment_size = | |
312 | * segment_ptr.s.size; | |
313 | */ | |
f884625f LB |
314 | int segment_size = |
315 | CVMX_FPA_PACKET_POOL_SIZE - | |
316 | (segment_ptr.s.addr - | |
317 | (((segment_ptr.s.addr >> 7) - | |
318 | segment_ptr.s.back) << 7)); | |
6568a234 DD |
319 | /* |
320 | * Don't copy more than what | |
321 | * is left in the packet. | |
322 | */ | |
80ff0fd3 DD |
323 | if (segment_size > len) |
324 | segment_size = len; | |
325 | /* Copy the data into the packet */ | |
326 | memcpy(skb_put(skb, segment_size), | |
18f6970b LGL |
327 | cvmx_phys_to_ptr( |
328 | segment_ptr.s.addr), | |
80ff0fd3 | 329 | segment_size); |
80ff0fd3 DD |
330 | len -= segment_size; |
331 | segment_ptr = next_ptr; | |
332 | } | |
333 | } | |
334 | packet_not_copied = 0; | |
335 | } | |
f8023da8 JH |
336 | if (likely((port < TOTAL_NUMBER_OF_PORTS) && |
337 | cvm_oct_device[port])) { | |
338 | struct net_device *dev = cvm_oct_device[port]; | |
80ff0fd3 DD |
339 | struct octeon_ethernet *priv = netdev_priv(dev); |
340 | ||
6568a234 DD |
341 | /* |
342 | * Only accept packets for devices that are | |
343 | * currently up. | |
344 | */ | |
80ff0fd3 DD |
345 | if (likely(dev->flags & IFF_UP)) { |
346 | skb->protocol = eth_type_trans(skb, dev); | |
347 | skb->dev = dev; | |
348 | ||
f884625f LB |
349 | if (unlikely(work->word2.s.not_IP || |
350 | work->word2.s.IP_exc || | |
351 | work->word2.s.L4_error || | |
352 | !work->word2.s.tcp_or_udp)) | |
80ff0fd3 DD |
353 | skb->ip_summed = CHECKSUM_NONE; |
354 | else | |
355 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
356 | ||
357 | /* Increment RX stats for virtual ports */ | |
f8023da8 | 358 | if (port >= CVMX_PIP_NUM_INPUT_PORTS) { |
dcf24f77 AK |
359 | priv->stats.rx_packets++; |
360 | priv->stats.rx_bytes += skb->len; | |
80ff0fd3 DD |
361 | } |
362 | netif_receive_skb(skb); | |
363 | } else { | |
364 | /* | |
b4ede792 LGL |
365 | * Drop any packet received for a device that |
366 | * isn't up. | |
367 | */ | |
dcf24f77 | 368 | priv->stats.rx_dropped++; |
80ff0fd3 DD |
369 | dev_kfree_skb_irq(skb); |
370 | } | |
371 | } else { | |
372 | /* | |
373 | * Drop any packet received for a device that | |
374 | * doesn't exist. | |
375 | */ | |
7a2eaf93 | 376 | printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", |
ac05a587 | 377 | port); |
80ff0fd3 DD |
378 | dev_kfree_skb_irq(skb); |
379 | } | |
380 | /* | |
381 | * Check to see if the skbuff and work share the same | |
382 | * packet buffer. | |
383 | */ | |
3a990f39 | 384 | if (likely(packet_not_copied)) { |
80ff0fd3 DD |
385 | /* |
386 | * This buffer needs to be replaced, increment | |
387 | * the number of buffers we need to free by | |
388 | * one. | |
389 | */ | |
390 | cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, | |
391 | 1); | |
392 | ||
c93b0e75 | 393 | cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); |
80ff0fd3 DD |
394 | } else { |
395 | cvm_oct_free_work(work); | |
396 | } | |
397 | } | |
80ff0fd3 | 398 | /* Restore the original POW group mask */ |
f5cfc8db AK |
399 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { |
400 | cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask); | |
401 | cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */ | |
402 | } else { | |
403 | cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); | |
404 | } | |
405 | ||
80ff0fd3 DD |
406 | if (USE_ASYNC_IOBDMA) { |
407 | /* Restore the scratch area */ | |
408 | cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); | |
409 | } | |
3368c784 | 410 | cvm_oct_rx_refill_pool(0); |
80ff0fd3 | 411 | |
b7d7dee5 AK |
412 | return rx_count; |
413 | } | |
414 | ||
415 | /** | |
416 | * cvm_oct_napi_poll - the NAPI poll function. | |
417 | * @napi: The NAPI instance. | |
418 | * @budget: Maximum number of packets to receive. | |
419 | * | |
420 | * Returns the number of packets processed. | |
421 | */ | |
422 | static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) | |
423 | { | |
9382cfe1 AK |
424 | struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group, |
425 | napi); | |
b7d7dee5 AK |
426 | int rx_count; |
427 | ||
942bab48 | 428 | rx_count = cvm_oct_poll(rx_group, budget); |
b7d7dee5 AK |
429 | |
430 | if (rx_count < budget) { | |
3368c784 DD |
431 | /* No more work */ |
432 | napi_complete(napi); | |
9382cfe1 | 433 | enable_irq(rx_group->irq); |
80ff0fd3 | 434 | } |
3368c784 DD |
435 | return rx_count; |
436 | } | |
437 | ||
438 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
439 | /** | |
ec977c5b | 440 | * cvm_oct_poll_controller - poll for receive packets |
3368c784 DD |
441 | * device. |
442 | * | |
443 | * @dev: Device to poll. Unused | |
444 | */ | |
445 | void cvm_oct_poll_controller(struct net_device *dev) | |
446 | { | |
e971a119 AK |
447 | int i; |
448 | ||
d48f10fc AK |
449 | if (!atomic_read(&oct_rx_ready)) |
450 | return; | |
451 | ||
e971a119 AK |
452 | for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) { |
453 | ||
454 | if (!(pow_receive_groups & BIT(i))) | |
455 | continue; | |
456 | ||
457 | cvm_oct_poll(&oct_rx_group[i], 16); | |
458 | ||
459 | } | |
80ff0fd3 | 460 | } |
3368c784 | 461 | #endif |
80ff0fd3 DD |
462 | |
463 | void cvm_oct_rx_initialize(void) | |
464 | { | |
465 | int i; | |
3368c784 | 466 | struct net_device *dev_for_napi = NULL; |
3368c784 DD |
467 | |
468 | for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { | |
469 | if (cvm_oct_device[i]) { | |
470 | dev_for_napi = cvm_oct_device[i]; | |
471 | break; | |
472 | } | |
473 | } | |
474 | ||
e8a4e572 | 475 | if (!dev_for_napi) |
3368c784 DD |
476 | panic("No net_devices were allocated."); |
477 | ||
e971a119 AK |
478 | for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) { |
479 | int ret; | |
030739f5 | 480 | |
e971a119 AK |
481 | if (!(pow_receive_groups & BIT(i))) |
482 | continue; | |
9382cfe1 | 483 | |
e971a119 AK |
484 | netif_napi_add(dev_for_napi, &oct_rx_group[i].napi, |
485 | cvm_oct_napi_poll, rx_napi_weight); | |
486 | napi_enable(&oct_rx_group[i].napi); | |
3368c784 | 487 | |
e971a119 AK |
488 | oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i; |
489 | oct_rx_group[i].group = i; | |
3368c784 | 490 | |
e971a119 AK |
491 | /* Register an IRQ handler to receive POW interrupts */ |
492 | ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0, | |
493 | "Ethernet", &oct_rx_group[i].napi); | |
494 | if (ret) | |
495 | panic("Could not acquire Ethernet IRQ %d\n", | |
496 | oct_rx_group[i].irq); | |
3368c784 | 497 | |
e971a119 AK |
498 | disable_irq_nosync(oct_rx_group[i].irq); |
499 | ||
500 | /* Enable POW interrupt when our port has at least one packet */ | |
501 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { | |
502 | union cvmx_sso_wq_int_thrx int_thr; | |
503 | union cvmx_pow_wq_int_pc int_pc; | |
504 | ||
505 | int_thr.u64 = 0; | |
506 | int_thr.s.tc_en = 1; | |
507 | int_thr.s.tc_thr = 1; | |
508 | cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64); | |
509 | ||
510 | int_pc.u64 = 0; | |
511 | int_pc.s.pc_thr = 5; | |
512 | cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64); | |
513 | } else { | |
514 | union cvmx_pow_wq_int_thrx int_thr; | |
515 | union cvmx_pow_wq_int_pc int_pc; | |
3368c784 | 516 | |
e971a119 AK |
517 | int_thr.u64 = 0; |
518 | int_thr.s.tc_en = 1; | |
519 | int_thr.s.tc_thr = 1; | |
520 | cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64); | |
521 | ||
522 | int_pc.u64 = 0; | |
523 | int_pc.s.pc_thr = 5; | |
524 | cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); | |
525 | } | |
526 | ||
527 | /* Schedule NAPI now. This will indirectly enable the | |
528 | * interrupt. | |
529 | */ | |
530 | napi_schedule(&oct_rx_group[i].napi); | |
531 | } | |
d48f10fc | 532 | atomic_inc(&oct_rx_ready); |
80ff0fd3 DD |
533 | } |
534 | ||
535 | void cvm_oct_rx_shutdown(void) | |
536 | { | |
e971a119 AK |
537 | int i; |
538 | ||
539 | for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) { | |
540 | ||
541 | if (!(pow_receive_groups & BIT(i))) | |
542 | continue; | |
287faa5e | 543 | |
e971a119 AK |
544 | /* Disable POW interrupt */ |
545 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | |
546 | cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0); | |
547 | else | |
548 | cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0); | |
549 | ||
550 | /* Free the interrupt handler */ | |
551 | free_irq(oct_rx_group[i].irq, cvm_oct_device); | |
287faa5e | 552 | |
e971a119 AK |
553 | netif_napi_del(&oct_rx_group[i].napi); |
554 | } | |
80ff0fd3 | 555 | } |