NET: Add Ethernet driver for Octeon MGMT devices.
[deliverable/linux.git] / drivers / staging / octeon / ethernet.c
CommitLineData
80ff0fd3
DD
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/delay.h>
33#include <linux/mii.h>
34
35#include <net/dst.h>
36
37#include <asm/octeon/octeon.h>
38
39#include "ethernet-defines.h"
a620c163 40#include "octeon-ethernet.h"
80ff0fd3
DD
41#include "ethernet-mem.h"
42#include "ethernet-rx.h"
43#include "ethernet-tx.h"
f696a108 44#include "ethernet-mdio.h"
80ff0fd3
DD
45#include "ethernet-util.h"
46#include "ethernet-proc.h"
a620c163 47
80ff0fd3
DD
48
49#include "cvmx-pip.h"
50#include "cvmx-pko.h"
51#include "cvmx-fau.h"
52#include "cvmx-ipd.h"
53#include "cvmx-helper.h"
54
f696a108 55#include "cvmx-gmxx-defs.h"
80ff0fd3
DD
56#include "cvmx-smix-defs.h"
57
58#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
59 && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
60int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
61#else
62int num_packet_buffers = 1024;
63#endif
64module_param(num_packet_buffers, int, 0444);
65MODULE_PARM_DESC(num_packet_buffers, "\n"
66 "\tNumber of packet buffers to allocate and store in the\n"
67 "\tFPA. By default, 1024 packet buffers are used unless\n"
68 "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
69
70int pow_receive_group = 15;
71module_param(pow_receive_group, int, 0444);
72MODULE_PARM_DESC(pow_receive_group, "\n"
73 "\tPOW group to receive packets from. All ethernet hardware\n"
74 "\twill be configured to send incomming packets to this POW\n"
75 "\tgroup. Also any other software can submit packets to this\n"
76 "\tgroup for the kernel to process.");
77
78int pow_send_group = -1;
79module_param(pow_send_group, int, 0644);
80MODULE_PARM_DESC(pow_send_group, "\n"
81 "\tPOW group to send packets to other software on. This\n"
82 "\tcontrols the creation of the virtual device pow0.\n"
83 "\talways_use_pow also depends on this value.");
84
85int always_use_pow;
86module_param(always_use_pow, int, 0444);
87MODULE_PARM_DESC(always_use_pow, "\n"
88 "\tWhen set, always send to the pow group. This will cause\n"
89 "\tpackets sent to real ethernet devices to be sent to the\n"
90 "\tPOW group instead of the hardware. Unless some other\n"
91 "\tapplication changes the config, packets will still be\n"
92 "\treceived from the low level hardware. Use this option\n"
93 "\tto allow a CVMX app to intercept all packets from the\n"
94 "\tlinux kernel. You must specify pow_send_group along with\n"
95 "\tthis option.");
96
97char pow_send_list[128] = "";
98module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
99MODULE_PARM_DESC(pow_send_list, "\n"
100 "\tComma separated list of ethernet devices that should use the\n"
101 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
102 "\tis a per port version of always_use_pow. always_use_pow takes\n"
103 "\tprecedence over this list. For example, setting this to\n"
104 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
105 "\tusing the pow_send_group.");
106
107static int disable_core_queueing = 1;
108module_param(disable_core_queueing, int, 0444);
109MODULE_PARM_DESC(disable_core_queueing, "\n"
110 "\tWhen set the networking core's tx_queue_len is set to zero. This\n"
111 "\tallows packets to be sent without lock contention in the packet\n"
112 "\tscheduler resulting in some cases in improved throughput.\n");
113
13c5939e
DD
114
115/*
116 * The offset from mac_addr_base that should be used for the next port
117 * that is configured. By convention, if any mgmt ports exist on the
118 * chip, they get the first mac addresses, The ports controlled by
119 * this driver are numbered sequencially following any mgmt addresses
120 * that may exist.
121 */
122static unsigned int cvm_oct_mac_addr_offset;
123
80ff0fd3
DD
124/**
125 * Periodic timer to check auto negotiation
126 */
127static struct timer_list cvm_oct_poll_timer;
128
129/**
130 * Array of every ethernet device owned by this driver indexed by
131 * the ipd input port number.
132 */
133struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
134
135extern struct semaphore mdio_sem;
136
137/**
138 * Periodic timer tick for slow management operations
139 *
140 * @arg: Device to check
141 */
142static void cvm_do_timer(unsigned long arg)
143{
a620c163
DD
144 int32_t skb_to_free, undo;
145 int queues_per_port;
146 int qos;
147 struct octeon_ethernet *priv;
80ff0fd3 148 static int port;
a620c163
DD
149
150 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
151 /*
152 * All ports have been polled. Start the next
153 * iteration through the ports in one second.
154 */
80ff0fd3 155 port = 0;
80ff0fd3 156 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
a620c163 157 return;
80ff0fd3 158 }
a620c163
DD
159 if (!cvm_oct_device[port])
160 goto out;
161
162 priv = netdev_priv(cvm_oct_device[port]);
163 if (priv->poll) {
164 /* skip polling if we don't get the lock */
165 if (!down_trylock(&mdio_sem)) {
166 priv->poll(cvm_oct_device[port]);
167 up(&mdio_sem);
168 }
169 }
170
171 queues_per_port = cvmx_pko_get_num_queues(port);
172 /* Drain any pending packets in the free list */
173 for (qos = 0; qos < queues_per_port; qos++) {
174 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
175 continue;
176 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
177 MAX_SKB_TO_FREE);
178 undo = skb_to_free > 0 ?
179 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
180 if (undo > 0)
181 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
182 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
183 MAX_SKB_TO_FREE : -skb_to_free;
184 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
185 }
186 cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
187
188out:
189 port++;
190 /* Poll the next port in a 50th of a second.
191 This spreads the polling of ports out a little bit */
192 mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
80ff0fd3
DD
193}
194
195/**
196 * Configure common hardware for all interfaces
197 */
198static __init void cvm_oct_configure_common_hw(void)
199{
200 int r;
201 /* Setup the FPA */
202 cvmx_fpa_enable();
203 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
204 num_packet_buffers);
205 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
206 num_packet_buffers);
207 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
208 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
209 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
210
211 if (USE_RED)
212 cvmx_helper_setup_red(num_packet_buffers / 4,
213 num_packet_buffers / 8);
214
215 /* Enable the MII interface */
216 if (!octeon_is_simulation())
217 cvmx_write_csr(CVMX_SMIX_EN(0), 1);
218
219 /* Register an IRQ hander for to receive POW interrupts */
220 r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
221 cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
222 cvm_oct_device);
223
224#if defined(CONFIG_SMP) && 0
225 if (USE_MULTICORE_RECEIVE) {
226 irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
227 cpu_online_mask);
228 }
229#endif
230}
231
232/**
233 * Free a work queue entry received in a intercept callback.
234 *
235 * @work_queue_entry:
236 * Work queue entry to free
237 * Returns Zero on success, Negative on failure.
238 */
239int cvm_oct_free_work(void *work_queue_entry)
240{
241 cvmx_wqe_t *work = work_queue_entry;
242
243 int segments = work->word2.s.bufs;
244 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
245
246 while (segments--) {
247 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
248 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
249 if (unlikely(!segment_ptr.s.i))
250 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
251 segment_ptr.s.pool,
252 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
253 128));
254 segment_ptr = next_ptr;
255 }
256 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
257
258 return 0;
259}
260EXPORT_SYMBOL(cvm_oct_free_work);
261
f696a108
DD
262/**
263 * Get the low level ethernet statistics
264 *
265 * @dev: Device to get the statistics from
266 * Returns Pointer to the statistics
267 */
268static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
269{
270 cvmx_pip_port_status_t rx_status;
271 cvmx_pko_port_status_t tx_status;
272 struct octeon_ethernet *priv = netdev_priv(dev);
273
274 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
275 if (octeon_is_simulation()) {
276 /* The simulator doesn't support statistics */
277 memset(&rx_status, 0, sizeof(rx_status));
278 memset(&tx_status, 0, sizeof(tx_status));
279 } else {
280 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
281 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
282 }
283
284 priv->stats.rx_packets += rx_status.inb_packets;
285 priv->stats.tx_packets += tx_status.packets;
286 priv->stats.rx_bytes += rx_status.inb_octets;
287 priv->stats.tx_bytes += tx_status.octets;
288 priv->stats.multicast += rx_status.multicast_packets;
289 priv->stats.rx_crc_errors += rx_status.inb_errors;
290 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
291
292 /*
293 * The drop counter must be incremented atomically
294 * since the RX tasklet also increments it.
295 */
296#ifdef CONFIG_64BIT
297 atomic64_add(rx_status.dropped_packets,
298 (atomic64_t *)&priv->stats.rx_dropped);
299#else
300 atomic_add(rx_status.dropped_packets,
301 (atomic_t *)&priv->stats.rx_dropped);
302#endif
303 }
304
305 return &priv->stats;
306}
307
308/**
309 * Change the link MTU. Unimplemented
310 *
311 * @dev: Device to change
312 * @new_mtu: The new MTU
313 *
314 * Returns Zero on success
315 */
316static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
317{
318 struct octeon_ethernet *priv = netdev_priv(dev);
319 int interface = INTERFACE(priv->port);
320 int index = INDEX(priv->port);
321#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
322 int vlan_bytes = 4;
323#else
324 int vlan_bytes = 0;
325#endif
326
327 /*
328 * Limit the MTU to make sure the ethernet packets are between
329 * 64 bytes and 65535 bytes.
330 */
331 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
332 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
333 pr_err("MTU must be between %d and %d.\n",
334 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
335 return -EINVAL;
336 }
337 dev->mtu = new_mtu;
338
339 if ((interface < 2)
340 && (cvmx_helper_interface_get_mode(interface) !=
341 CVMX_HELPER_INTERFACE_MODE_SPI)) {
342 /* Add ethernet header and FCS, and VLAN if configured. */
343 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
344
345 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
346 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
347 /* Signal errors on packets larger than the MTU */
348 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
349 max_packet);
350 } else {
351 /*
352 * Set the hardware to truncate packets larger
353 * than the MTU and smaller the 64 bytes.
354 */
355 union cvmx_pip_frm_len_chkx frm_len_chk;
356 frm_len_chk.u64 = 0;
357 frm_len_chk.s.minlen = 64;
358 frm_len_chk.s.maxlen = max_packet;
359 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
360 frm_len_chk.u64);
361 }
362 /*
363 * Set the hardware to truncate packets larger than
364 * the MTU. The jabber register must be set to a
365 * multiple of 8 bytes, so round up.
366 */
367 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
368 (max_packet + 7) & ~7u);
369 }
370 return 0;
371}
372
373/**
374 * Set the multicast list. Currently unimplemented.
375 *
376 * @dev: Device to work on
377 */
378static void cvm_oct_common_set_multicast_list(struct net_device *dev)
379{
380 union cvmx_gmxx_prtx_cfg gmx_cfg;
381 struct octeon_ethernet *priv = netdev_priv(dev);
382 int interface = INTERFACE(priv->port);
383 int index = INDEX(priv->port);
384
385 if ((interface < 2)
386 && (cvmx_helper_interface_get_mode(interface) !=
387 CVMX_HELPER_INTERFACE_MODE_SPI)) {
388 union cvmx_gmxx_rxx_adr_ctl control;
389 control.u64 = 0;
390 control.s.bcst = 1; /* Allow broadcast MAC addresses */
391
392 if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
393 (dev->flags & IFF_PROMISC))
394 /* Force accept multicast packets */
395 control.s.mcst = 2;
396 else
397 /* Force reject multicat packets */
398 control.s.mcst = 1;
399
400 if (dev->flags & IFF_PROMISC)
401 /*
402 * Reject matches if promisc. Since CAM is
403 * shut off, should accept everything.
404 */
405 control.s.cam_mode = 0;
406 else
407 /* Filter packets based on the CAM */
408 control.s.cam_mode = 1;
409
410 gmx_cfg.u64 =
411 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
412 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
413 gmx_cfg.u64 & ~1ull);
414
415 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
416 control.u64);
417 if (dev->flags & IFF_PROMISC)
418 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
419 (index, interface), 0);
420 else
421 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
422 (index, interface), 1);
423
424 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
425 gmx_cfg.u64);
426 }
427}
428
429/**
430 * Set the hardware MAC address for a device
431 *
432 * @dev: Device to change the MAC address for
433 * @addr: Address structure to change it too. MAC address is addr + 2.
434 * Returns Zero on success
435 */
436static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
437{
438 struct octeon_ethernet *priv = netdev_priv(dev);
439 union cvmx_gmxx_prtx_cfg gmx_cfg;
440 int interface = INTERFACE(priv->port);
441 int index = INDEX(priv->port);
442
443 memcpy(dev->dev_addr, addr + 2, 6);
444
445 if ((interface < 2)
446 && (cvmx_helper_interface_get_mode(interface) !=
447 CVMX_HELPER_INTERFACE_MODE_SPI)) {
448 int i;
449 uint8_t *ptr = addr;
450 uint64_t mac = 0;
451 for (i = 0; i < 6; i++)
452 mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
453
454 gmx_cfg.u64 =
455 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
456 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
457 gmx_cfg.u64 & ~1ull);
458
459 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
460 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
461 ptr[2]);
462 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
463 ptr[3]);
464 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
465 ptr[4]);
466 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
467 ptr[5]);
468 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
469 ptr[6]);
470 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
471 ptr[7]);
472 cvm_oct_common_set_multicast_list(dev);
473 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
474 gmx_cfg.u64);
475 }
476 return 0;
477}
478
479/**
480 * Per network device initialization
481 *
482 * @dev: Device to initialize
483 * Returns Zero on success
484 */
485int cvm_oct_common_init(struct net_device *dev)
486{
f696a108 487 struct octeon_ethernet *priv = netdev_priv(dev);
13c5939e
DD
488 struct sockaddr sa;
489 u64 mac = ((u64)(octeon_bootinfo->mac_addr_base[0] & 0xff) << 40) |
490 ((u64)(octeon_bootinfo->mac_addr_base[1] & 0xff) << 32) |
491 ((u64)(octeon_bootinfo->mac_addr_base[2] & 0xff) << 24) |
492 ((u64)(octeon_bootinfo->mac_addr_base[3] & 0xff) << 16) |
493 ((u64)(octeon_bootinfo->mac_addr_base[4] & 0xff) << 8) |
494 (u64)(octeon_bootinfo->mac_addr_base[5] & 0xff);
495
496 mac += cvm_oct_mac_addr_offset;
497 sa.sa_data[0] = (mac >> 40) & 0xff;
498 sa.sa_data[1] = (mac >> 32) & 0xff;
499 sa.sa_data[2] = (mac >> 24) & 0xff;
500 sa.sa_data[3] = (mac >> 16) & 0xff;
501 sa.sa_data[4] = (mac >> 8) & 0xff;
502 sa.sa_data[5] = mac & 0xff;
503
504 if (cvm_oct_mac_addr_offset >= octeon_bootinfo->mac_addr_count)
505 printk(KERN_DEBUG "%s: Using MAC outside of the assigned range:"
506 " %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
507 sa.sa_data[0] & 0xff, sa.sa_data[1] & 0xff,
508 sa.sa_data[2] & 0xff, sa.sa_data[3] & 0xff,
509 sa.sa_data[4] & 0xff, sa.sa_data[5] & 0xff);
510 cvm_oct_mac_addr_offset++;
f696a108
DD
511
512 /*
513 * Force the interface to use the POW send if always_use_pow
514 * was specified or it is in the pow send list.
515 */
516 if ((pow_send_group != -1)
517 && (always_use_pow || strstr(pow_send_list, dev->name)))
518 priv->queue = -1;
519
520 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
521 dev->features |= NETIF_F_IP_CSUM;
522
f696a108
DD
523 /* We do our own locking, Linux doesn't need to */
524 dev->features |= NETIF_F_LLTX;
525 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
526
527 cvm_oct_mdio_setup_device(dev);
13c5939e 528 dev->netdev_ops->ndo_set_mac_address(dev, &sa);
f696a108
DD
529 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
530
531 /*
532 * Zero out stats for port so we won't mistakenly show
533 * counters from the bootloader.
534 */
535 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
536 sizeof(struct net_device_stats));
537
538 return 0;
539}
540
541void cvm_oct_common_uninit(struct net_device *dev)
542{
543 /* Currently nothing to do */
544}
545
546static const struct net_device_ops cvm_oct_npi_netdev_ops = {
547 .ndo_init = cvm_oct_common_init,
548 .ndo_uninit = cvm_oct_common_uninit,
549 .ndo_start_xmit = cvm_oct_xmit,
550 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
551 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
552 .ndo_do_ioctl = cvm_oct_ioctl,
553 .ndo_change_mtu = cvm_oct_common_change_mtu,
554 .ndo_get_stats = cvm_oct_common_get_stats,
555#ifdef CONFIG_NET_POLL_CONTROLLER
556 .ndo_poll_controller = cvm_oct_poll_controller,
557#endif
558};
559static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
560 .ndo_init = cvm_oct_xaui_init,
561 .ndo_uninit = cvm_oct_xaui_uninit,
562 .ndo_open = cvm_oct_xaui_open,
563 .ndo_stop = cvm_oct_xaui_stop,
564 .ndo_start_xmit = cvm_oct_xmit,
565 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
566 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
567 .ndo_do_ioctl = cvm_oct_ioctl,
568 .ndo_change_mtu = cvm_oct_common_change_mtu,
569 .ndo_get_stats = cvm_oct_common_get_stats,
570#ifdef CONFIG_NET_POLL_CONTROLLER
571 .ndo_poll_controller = cvm_oct_poll_controller,
572#endif
573};
574static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
575 .ndo_init = cvm_oct_sgmii_init,
576 .ndo_uninit = cvm_oct_sgmii_uninit,
577 .ndo_open = cvm_oct_sgmii_open,
578 .ndo_stop = cvm_oct_sgmii_stop,
579 .ndo_start_xmit = cvm_oct_xmit,
580 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
581 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
582 .ndo_do_ioctl = cvm_oct_ioctl,
583 .ndo_change_mtu = cvm_oct_common_change_mtu,
584 .ndo_get_stats = cvm_oct_common_get_stats,
585#ifdef CONFIG_NET_POLL_CONTROLLER
586 .ndo_poll_controller = cvm_oct_poll_controller,
587#endif
588};
589static const struct net_device_ops cvm_oct_spi_netdev_ops = {
590 .ndo_init = cvm_oct_spi_init,
591 .ndo_uninit = cvm_oct_spi_uninit,
592 .ndo_start_xmit = cvm_oct_xmit,
593 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
594 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
595 .ndo_do_ioctl = cvm_oct_ioctl,
596 .ndo_change_mtu = cvm_oct_common_change_mtu,
597 .ndo_get_stats = cvm_oct_common_get_stats,
598#ifdef CONFIG_NET_POLL_CONTROLLER
599 .ndo_poll_controller = cvm_oct_poll_controller,
600#endif
601};
602static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
603 .ndo_init = cvm_oct_rgmii_init,
604 .ndo_uninit = cvm_oct_rgmii_uninit,
605 .ndo_open = cvm_oct_rgmii_open,
606 .ndo_stop = cvm_oct_rgmii_stop,
607 .ndo_start_xmit = cvm_oct_xmit,
608 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
609 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
610 .ndo_do_ioctl = cvm_oct_ioctl,
611 .ndo_change_mtu = cvm_oct_common_change_mtu,
612 .ndo_get_stats = cvm_oct_common_get_stats,
613#ifdef CONFIG_NET_POLL_CONTROLLER
614 .ndo_poll_controller = cvm_oct_poll_controller,
615#endif
616};
617static const struct net_device_ops cvm_oct_pow_netdev_ops = {
618 .ndo_init = cvm_oct_common_init,
619 .ndo_start_xmit = cvm_oct_xmit_pow,
620 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
621 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
622 .ndo_do_ioctl = cvm_oct_ioctl,
623 .ndo_change_mtu = cvm_oct_common_change_mtu,
624 .ndo_get_stats = cvm_oct_common_get_stats,
625#ifdef CONFIG_NET_POLL_CONTROLLER
626 .ndo_poll_controller = cvm_oct_poll_controller,
627#endif
628};
629
80ff0fd3
DD
630/**
631 * Module/ driver initialization. Creates the linux network
632 * devices.
633 *
634 * Returns Zero on success
635 */
636static int __init cvm_oct_init_module(void)
637{
638 int num_interfaces;
639 int interface;
640 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
641 int qos;
642
643 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
644
13c5939e
DD
645 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
646 cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
647 else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
648 cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
649 else
650 cvm_oct_mac_addr_offset = 0;
651
80ff0fd3
DD
652 cvm_oct_proc_initialize();
653 cvm_oct_rx_initialize();
654 cvm_oct_configure_common_hw();
655
656 cvmx_helper_initialize_packet_io_global();
657
658 /* Change the input group for all ports before input is enabled */
659 num_interfaces = cvmx_helper_get_number_of_interfaces();
660 for (interface = 0; interface < num_interfaces; interface++) {
661 int num_ports = cvmx_helper_ports_on_interface(interface);
662 int port;
663
664 for (port = cvmx_helper_get_ipd_port(interface, 0);
665 port < cvmx_helper_get_ipd_port(interface, num_ports);
666 port++) {
667 union cvmx_pip_prt_tagx pip_prt_tagx;
668 pip_prt_tagx.u64 =
669 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
670 pip_prt_tagx.s.grp = pow_receive_group;
671 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
672 pip_prt_tagx.u64);
673 }
674 }
675
676 cvmx_helper_ipd_and_packet_input_enable();
677
678 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
679
680 /*
681 * Initialize the FAU used for counting packet buffers that
682 * need to be freed.
683 */
684 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
685
686 if ((pow_send_group != -1)) {
687 struct net_device *dev;
688 pr_info("\tConfiguring device for POW only access\n");
689 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
690 if (dev) {
691 /* Initialize the device private structure. */
692 struct octeon_ethernet *priv = netdev_priv(dev);
693 memset(priv, 0, sizeof(struct octeon_ethernet));
694
f696a108 695 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
80ff0fd3
DD
696 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
697 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
698 priv->queue = -1;
699 strcpy(dev->name, "pow%d");
700 for (qos = 0; qos < 16; qos++)
701 skb_queue_head_init(&priv->tx_free_list[qos]);
702
703 if (register_netdev(dev) < 0) {
704 pr_err("Failed to register ethernet "
705 "device for POW\n");
706 kfree(dev);
707 } else {
708 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
709 pr_info("%s: POW send group %d, receive "
710 "group %d\n",
711 dev->name, pow_send_group,
712 pow_receive_group);
713 }
714 } else {
715 pr_err("Failed to allocate ethernet device "
716 "for POW\n");
717 }
718 }
719
720 num_interfaces = cvmx_helper_get_number_of_interfaces();
721 for (interface = 0; interface < num_interfaces; interface++) {
722 cvmx_helper_interface_mode_t imode =
723 cvmx_helper_interface_get_mode(interface);
724 int num_ports = cvmx_helper_ports_on_interface(interface);
725 int port;
726
727 for (port = cvmx_helper_get_ipd_port(interface, 0);
728 port < cvmx_helper_get_ipd_port(interface, num_ports);
729 port++) {
730 struct octeon_ethernet *priv;
731 struct net_device *dev =
732 alloc_etherdev(sizeof(struct octeon_ethernet));
733 if (!dev) {
734 pr_err("Failed to allocate ethernet device "
735 "for port %d\n", port);
736 continue;
737 }
738 if (disable_core_queueing)
739 dev->tx_queue_len = 0;
740
741 /* Initialize the device private structure. */
742 priv = netdev_priv(dev);
743 memset(priv, 0, sizeof(struct octeon_ethernet));
744
745 priv->imode = imode;
746 priv->port = port;
747 priv->queue = cvmx_pko_get_base_queue(priv->port);
748 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
749 for (qos = 0; qos < 16; qos++)
750 skb_queue_head_init(&priv->tx_free_list[qos]);
751 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
752 qos++)
753 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
754
755 switch (priv->imode) {
756
757 /* These types don't support ports to IPD/PKO */
758 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
759 case CVMX_HELPER_INTERFACE_MODE_PCIE:
760 case CVMX_HELPER_INTERFACE_MODE_PICMG:
761 break;
762
763 case CVMX_HELPER_INTERFACE_MODE_NPI:
f696a108 764 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
80ff0fd3
DD
765 strcpy(dev->name, "npi%d");
766 break;
767
768 case CVMX_HELPER_INTERFACE_MODE_XAUI:
f696a108 769 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
80ff0fd3
DD
770 strcpy(dev->name, "xaui%d");
771 break;
772
773 case CVMX_HELPER_INTERFACE_MODE_LOOP:
f696a108 774 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
80ff0fd3
DD
775 strcpy(dev->name, "loop%d");
776 break;
777
778 case CVMX_HELPER_INTERFACE_MODE_SGMII:
f696a108 779 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
80ff0fd3
DD
780 strcpy(dev->name, "eth%d");
781 break;
782
783 case CVMX_HELPER_INTERFACE_MODE_SPI:
f696a108 784 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
80ff0fd3
DD
785 strcpy(dev->name, "spi%d");
786 break;
787
788 case CVMX_HELPER_INTERFACE_MODE_RGMII:
789 case CVMX_HELPER_INTERFACE_MODE_GMII:
f696a108 790 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
80ff0fd3
DD
791 strcpy(dev->name, "eth%d");
792 break;
793 }
794
f696a108 795 if (!dev->netdev_ops) {
80ff0fd3
DD
796 kfree(dev);
797 } else if (register_netdev(dev) < 0) {
798 pr_err("Failed to register ethernet device "
799 "for interface %d, port %d\n",
800 interface, priv->port);
801 kfree(dev);
802 } else {
803 cvm_oct_device[priv->port] = dev;
804 fau -=
805 cvmx_pko_get_num_queues(priv->port) *
806 sizeof(uint32_t);
807 }
808 }
809 }
810
811 if (INTERRUPT_LIMIT) {
812 /*
813 * Set the POW timer rate to give an interrupt at most
814 * INTERRUPT_LIMIT times per second.
815 */
816 cvmx_write_csr(CVMX_POW_WQ_INT_PC,
817 octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
818 16 * 256) << 8);
819
820 /*
821 * Enable POW timer interrupt. It will count when
822 * there are packets available.
823 */
824 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
825 0x1ful << 24);
826 } else {
827 /* Enable POW interrupt when our port has at least one packet */
828 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
829 }
830
831 /* Enable the poll timer for checking RGMII status */
832 init_timer(&cvm_oct_poll_timer);
833 cvm_oct_poll_timer.data = 0;
834 cvm_oct_poll_timer.function = cvm_do_timer;
835 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
836
837 return 0;
838}
839
840/**
841 * Module / driver shutdown
842 *
843 * Returns Zero on success
844 */
845static void __exit cvm_oct_cleanup_module(void)
846{
847 int port;
848
849 /* Disable POW interrupt */
850 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
851
852 cvmx_ipd_disable();
853
854 /* Free the interrupt handler */
855 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
856
857 del_timer(&cvm_oct_poll_timer);
858 cvm_oct_rx_shutdown();
859 cvmx_pko_disable();
860
861 /* Free the ethernet devices */
862 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
863 if (cvm_oct_device[port]) {
864 cvm_oct_tx_shutdown(cvm_oct_device[port]);
865 unregister_netdev(cvm_oct_device[port]);
866 kfree(cvm_oct_device[port]);
867 cvm_oct_device[port] = NULL;
868 }
869 }
870
871 cvmx_pko_shutdown();
872 cvm_oct_proc_shutdown();
873
874 cvmx_ipd_free_ptr();
875
876 /* Free the HW pools */
877 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
878 num_packet_buffers);
879 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
880 num_packet_buffers);
881 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
882 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
883 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
884}
885
886MODULE_LICENSE("GPL");
887MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
888MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
889module_init(cvm_oct_init_module);
890module_exit(cvm_oct_cleanup_module);
This page took 0.154819 seconds and 5 git commands to generate.