Staging: slicoss: changes comparisons to NULL in slicoss.c
[deliverable/linux.git] / drivers / staging / octeon / ethernet.c
CommitLineData
67620987
AK
1/*
2 * This file is based on code from OCTEON SDK by Cavium Networks.
80ff0fd3
DD
3 *
4 * Copyright (c) 2003-2007 Cavium Networks
5 *
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
67620987
AK
9 */
10
df9244c5 11#include <linux/platform_device.h>
80ff0fd3 12#include <linux/kernel.h>
80ff0fd3
DD
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
f6ed1b3b 16#include <linux/phy.h>
5a0e3ad6 17#include <linux/slab.h>
dc890df0 18#include <linux/interrupt.h>
df9244c5 19#include <linux/of_net.h>
80ff0fd3
DD
20
21#include <net/dst.h>
22
23#include <asm/octeon/octeon.h>
24
25#include "ethernet-defines.h"
a620c163 26#include "octeon-ethernet.h"
80ff0fd3
DD
27#include "ethernet-mem.h"
28#include "ethernet-rx.h"
29#include "ethernet-tx.h"
f696a108 30#include "ethernet-mdio.h"
80ff0fd3 31#include "ethernet-util.h"
80ff0fd3 32
af866496
DD
33#include <asm/octeon/cvmx-pip.h>
34#include <asm/octeon/cvmx-pko.h>
35#include <asm/octeon/cvmx-fau.h>
36#include <asm/octeon/cvmx-ipd.h>
37#include <asm/octeon/cvmx-helper.h>
80ff0fd3 38
af866496
DD
39#include <asm/octeon/cvmx-gmxx-defs.h>
40#include <asm/octeon/cvmx-smix-defs.h>
80ff0fd3 41
90419615 42static int num_packet_buffers = 1024;
80ff0fd3
DD
43module_param(num_packet_buffers, int, 0444);
44MODULE_PARM_DESC(num_packet_buffers, "\n"
45 "\tNumber of packet buffers to allocate and store in the\n"
5ff8bebb 46 "\tFPA. By default, 1024 packet buffers are used.\n");
80ff0fd3
DD
47
48int pow_receive_group = 15;
49module_param(pow_receive_group, int, 0444);
50MODULE_PARM_DESC(pow_receive_group, "\n"
51 "\tPOW group to receive packets from. All ethernet hardware\n"
d82603c6 52 "\twill be configured to send incoming packets to this POW\n"
80ff0fd3
DD
53 "\tgroup. Also any other software can submit packets to this\n"
54 "\tgroup for the kernel to process.");
55
56int pow_send_group = -1;
57module_param(pow_send_group, int, 0644);
58MODULE_PARM_DESC(pow_send_group, "\n"
59 "\tPOW group to send packets to other software on. This\n"
60 "\tcontrols the creation of the virtual device pow0.\n"
61 "\talways_use_pow also depends on this value.");
62
63int always_use_pow;
64module_param(always_use_pow, int, 0444);
65MODULE_PARM_DESC(always_use_pow, "\n"
66 "\tWhen set, always send to the pow group. This will cause\n"
67 "\tpackets sent to real ethernet devices to be sent to the\n"
68 "\tPOW group instead of the hardware. Unless some other\n"
69 "\tapplication changes the config, packets will still be\n"
70 "\treceived from the low level hardware. Use this option\n"
71 "\tto allow a CVMX app to intercept all packets from the\n"
72 "\tlinux kernel. You must specify pow_send_group along with\n"
73 "\tthis option.");
74
75char pow_send_list[128] = "";
76module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
77MODULE_PARM_DESC(pow_send_list, "\n"
78 "\tComma separated list of ethernet devices that should use the\n"
79 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
80 "\tis a per port version of always_use_pow. always_use_pow takes\n"
81 "\tprecedence over this list. For example, setting this to\n"
82 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
83 "\tusing the pow_send_group.");
84
3368c784
DD
85int rx_napi_weight = 32;
86module_param(rx_napi_weight, int, 0444);
87MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
13c5939e 88
d0fbf9f3 89/*
f8c26486 90 * cvm_oct_poll_queue - Workqueue for polling operations.
80ff0fd3 91 */
f8c26486
DD
92struct workqueue_struct *cvm_oct_poll_queue;
93
d0fbf9f3 94/*
f8c26486
DD
95 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
96 *
97 * Set to one right before cvm_oct_poll_queue is destroyed.
80ff0fd3 98 */
f8c26486 99atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
80ff0fd3 100
d0fbf9f3 101/*
80ff0fd3
DD
102 * Array of every ethernet device owned by this driver indexed by
103 * the ipd input port number.
104 */
105struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
106
4898c560
DD
107u64 cvm_oct_tx_poll_interval;
108
f8c26486
DD
109static void cvm_oct_rx_refill_worker(struct work_struct *work);
110static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
111
112static void cvm_oct_rx_refill_worker(struct work_struct *work)
80ff0fd3 113{
f8c26486
DD
114 /*
115 * FPA 0 may have been drained, try to refill it if we need
116 * more than num_packet_buffers / 2, otherwise normal receive
117 * processing will refill it. If it were drained, no packets
118 * could be received so cvm_oct_napi_poll would never be
119 * invoked to do the refill.
120 */
121 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
a620c163 122
f8c26486
DD
123 if (!atomic_read(&cvm_oct_poll_queue_stopping))
124 queue_delayed_work(cvm_oct_poll_queue,
125 &cvm_oct_rx_refill_work, HZ);
80ff0fd3
DD
126}
127
4898c560 128static void cvm_oct_periodic_worker(struct work_struct *work)
f8c26486
DD
129{
130 struct octeon_ethernet *priv = container_of(work,
131 struct octeon_ethernet,
4898c560 132 port_periodic_work.work);
a620c163 133
f6ed1b3b 134 if (priv->poll)
f8c26486 135 priv->poll(cvm_oct_device[priv->port]);
a620c163 136
b186410d
NH
137 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
138 cvm_oct_device[priv->port]);
4898c560 139
f8c26486 140 if (!atomic_read(&cvm_oct_poll_queue_stopping))
b186410d
NH
141 queue_delayed_work(cvm_oct_poll_queue,
142 &priv->port_periodic_work, HZ);
851ec8cd 143}
80ff0fd3 144
4f240906 145static void cvm_oct_configure_common_hw(void)
80ff0fd3 146{
80ff0fd3
DD
147 /* Setup the FPA */
148 cvmx_fpa_enable();
149 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
150 num_packet_buffers);
151 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
152 num_packet_buffers);
153 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
154 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
d5f9bc73 155 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
80ff0fd3 156
8a5cc923
PM
157#ifdef __LITTLE_ENDIAN
158 {
159 union cvmx_ipd_ctl_status ipd_ctl_status;
4bc8ff74 160
8a5cc923
PM
161 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
162 ipd_ctl_status.s.pkt_lend = 1;
163 ipd_ctl_status.s.wqe_lend = 1;
164 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
165 }
166#endif
167
cccdb277 168 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
80ff0fd3
DD
169}
170
171/**
ec977c5b
DD
172 * cvm_oct_free_work- Free a work queue entry
173 *
174 * @work_queue_entry: Work queue entry to free
80ff0fd3 175 *
80ff0fd3
DD
176 * Returns Zero on success, Negative on failure.
177 */
178int cvm_oct_free_work(void *work_queue_entry)
179{
180 cvmx_wqe_t *work = work_queue_entry;
181
182 int segments = work->word2.s.bufs;
183 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
184
185 while (segments--) {
186 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
187 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
188 if (unlikely(!segment_ptr.s.i))
189 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
190 segment_ptr.s.pool,
c93b0e75 191 CVMX_FPA_PACKET_POOL_SIZE / 128);
80ff0fd3
DD
192 segment_ptr = next_ptr;
193 }
c93b0e75 194 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
80ff0fd3
DD
195
196 return 0;
197}
198EXPORT_SYMBOL(cvm_oct_free_work);
199
f696a108 200/**
ec977c5b 201 * cvm_oct_common_get_stats - get the low level ethernet statistics
f696a108 202 * @dev: Device to get the statistics from
ec977c5b 203 *
f696a108
DD
204 * Returns Pointer to the statistics
205 */
206static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
207{
208 cvmx_pip_port_status_t rx_status;
209 cvmx_pko_port_status_t tx_status;
210 struct octeon_ethernet *priv = netdev_priv(dev);
211
212 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
213 if (octeon_is_simulation()) {
214 /* The simulator doesn't support statistics */
215 memset(&rx_status, 0, sizeof(rx_status));
216 memset(&tx_status, 0, sizeof(tx_status));
217 } else {
218 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
219 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
220 }
221
222 priv->stats.rx_packets += rx_status.inb_packets;
223 priv->stats.tx_packets += tx_status.packets;
224 priv->stats.rx_bytes += rx_status.inb_octets;
225 priv->stats.tx_bytes += tx_status.octets;
226 priv->stats.multicast += rx_status.multicast_packets;
227 priv->stats.rx_crc_errors += rx_status.inb_errors;
228 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
dcf24f77 229 priv->stats.rx_dropped += rx_status.dropped_packets;
f696a108
DD
230 }
231
232 return &priv->stats;
233}
234
235/**
ec977c5b 236 * cvm_oct_common_change_mtu - change the link MTU
f696a108
DD
237 * @dev: Device to change
238 * @new_mtu: The new MTU
239 *
240 * Returns Zero on success
241 */
242static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
243{
244 struct octeon_ethernet *priv = netdev_priv(dev);
245 int interface = INTERFACE(priv->port);
246 int index = INDEX(priv->port);
247#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
248 int vlan_bytes = 4;
249#else
250 int vlan_bytes = 0;
251#endif
252
253 /*
254 * Limit the MTU to make sure the ethernet packets are between
255 * 64 bytes and 65535 bytes.
256 */
257 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
258 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
259 pr_err("MTU must be between %d and %d.\n",
260 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
261 return -EINVAL;
262 }
263 dev->mtu = new_mtu;
264
265 if ((interface < 2)
266 && (cvmx_helper_interface_get_mode(interface) !=
267 CVMX_HELPER_INTERFACE_MODE_SPI)) {
268 /* Add ethernet header and FCS, and VLAN if configured. */
269 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
270
271 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
272 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
273 /* Signal errors on packets larger than the MTU */
274 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
275 max_packet);
276 } else {
277 /*
278 * Set the hardware to truncate packets larger
279 * than the MTU and smaller the 64 bytes.
280 */
281 union cvmx_pip_frm_len_chkx frm_len_chk;
39bc7513 282
f696a108
DD
283 frm_len_chk.u64 = 0;
284 frm_len_chk.s.minlen = 64;
285 frm_len_chk.s.maxlen = max_packet;
286 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
287 frm_len_chk.u64);
288 }
289 /*
290 * Set the hardware to truncate packets larger than
291 * the MTU. The jabber register must be set to a
292 * multiple of 8 bytes, so round up.
293 */
294 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
295 (max_packet + 7) & ~7u);
296 }
297 return 0;
298}
299
300/**
ec977c5b 301 * cvm_oct_common_set_multicast_list - set the multicast list
f696a108
DD
302 * @dev: Device to work on
303 */
304static void cvm_oct_common_set_multicast_list(struct net_device *dev)
305{
306 union cvmx_gmxx_prtx_cfg gmx_cfg;
307 struct octeon_ethernet *priv = netdev_priv(dev);
308 int interface = INTERFACE(priv->port);
309 int index = INDEX(priv->port);
310
311 if ((interface < 2)
312 && (cvmx_helper_interface_get_mode(interface) !=
313 CVMX_HELPER_INTERFACE_MODE_SPI)) {
314 union cvmx_gmxx_rxx_adr_ctl control;
39bc7513 315
f696a108
DD
316 control.u64 = 0;
317 control.s.bcst = 1; /* Allow broadcast MAC addresses */
318
d5907942 319 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
f696a108
DD
320 (dev->flags & IFF_PROMISC))
321 /* Force accept multicast packets */
322 control.s.mcst = 2;
323 else
215c47c9 324 /* Force reject multicast packets */
f696a108
DD
325 control.s.mcst = 1;
326
327 if (dev->flags & IFF_PROMISC)
328 /*
329 * Reject matches if promisc. Since CAM is
330 * shut off, should accept everything.
331 */
332 control.s.cam_mode = 0;
333 else
334 /* Filter packets based on the CAM */
335 control.s.cam_mode = 1;
336
337 gmx_cfg.u64 =
338 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
339 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
340 gmx_cfg.u64 & ~1ull);
341
342 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
343 control.u64);
344 if (dev->flags & IFF_PROMISC)
345 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
346 (index, interface), 0);
347 else
348 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
349 (index, interface), 1);
350
351 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
352 gmx_cfg.u64);
353 }
354}
355
df9244c5 356static int cvm_oct_set_mac_filter(struct net_device *dev)
f696a108
DD
357{
358 struct octeon_ethernet *priv = netdev_priv(dev);
359 union cvmx_gmxx_prtx_cfg gmx_cfg;
360 int interface = INTERFACE(priv->port);
361 int index = INDEX(priv->port);
362
f696a108
DD
363 if ((interface < 2)
364 && (cvmx_helper_interface_get_mode(interface) !=
365 CVMX_HELPER_INTERFACE_MODE_SPI)) {
366 int i;
ec2c398e
AO
367 u8 *ptr = dev->dev_addr;
368 u64 mac = 0;
39bc7513 369
f696a108 370 for (i = 0; i < 6; i++)
ec2c398e 371 mac = (mac << 8) | (u64)ptr[i];
f696a108
DD
372
373 gmx_cfg.u64 =
374 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
375 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
376 gmx_cfg.u64 & ~1ull);
377
378 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
379 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
df9244c5 380 ptr[0]);
f696a108 381 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
df9244c5 382 ptr[1]);
f696a108 383 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
df9244c5 384 ptr[2]);
f696a108 385 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
df9244c5 386 ptr[3]);
f696a108 387 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
df9244c5 388 ptr[4]);
f696a108 389 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
df9244c5 390 ptr[5]);
f696a108
DD
391 cvm_oct_common_set_multicast_list(dev);
392 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
393 gmx_cfg.u64);
394 }
395 return 0;
396}
397
90590750
CM
398/**
399 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
400 * @dev: The device in question.
401 * @addr: Socket address.
402 *
403 * Returns Zero on success
404 */
df9244c5
DD
405static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
406{
407 int r = eth_mac_addr(dev, addr);
408
409 if (r)
410 return r;
411 return cvm_oct_set_mac_filter(dev);
412}
413
f696a108 414/**
ec977c5b 415 * cvm_oct_common_init - per network device initialization
f696a108 416 * @dev: Device to initialize
ec977c5b 417 *
f696a108
DD
418 * Returns Zero on success
419 */
420int cvm_oct_common_init(struct net_device *dev)
421{
f696a108 422 struct octeon_ethernet *priv = netdev_priv(dev);
df9244c5
DD
423 const u8 *mac = NULL;
424
425 if (priv->of_node)
426 mac = of_get_mac_address(priv->of_node);
427
4d978452 428 if (mac)
6c71ea54 429 ether_addr_copy(dev->dev_addr, mac);
15c6ff3b 430 else
df9244c5 431 eth_hw_addr_random(dev);
f696a108
DD
432
433 /*
434 * Force the interface to use the POW send if always_use_pow
435 * was specified or it is in the pow send list.
436 */
437 if ((pow_send_group != -1)
438 && (always_use_pow || strstr(pow_send_list, dev->name)))
439 priv->queue = -1;
440
6646baf7
AK
441 if (priv->queue != -1)
442 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
f696a108 443
f696a108
DD
444 /* We do our own locking, Linux doesn't need to */
445 dev->features |= NETIF_F_LLTX;
7ad24ea4 446 dev->ethtool_ops = &cvm_oct_ethtool_ops;
f696a108 447
df9244c5 448 cvm_oct_set_mac_filter(dev);
f696a108
DD
449 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
450
451 /*
452 * Zero out stats for port so we won't mistakenly show
453 * counters from the bootloader.
454 */
455 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
456 sizeof(struct net_device_stats));
457
be76400c
AK
458 if (dev->netdev_ops->ndo_stop)
459 dev->netdev_ops->ndo_stop(dev);
460
f696a108
DD
461 return 0;
462}
463
464void cvm_oct_common_uninit(struct net_device *dev)
465{
f6ed1b3b
DD
466 struct octeon_ethernet *priv = netdev_priv(dev);
467
468 if (priv->phydev)
469 phy_disconnect(priv->phydev);
f696a108
DD
470}
471
9e3ae4f9 472int cvm_oct_common_open(struct net_device *dev,
2c265f74 473 void (*link_poll)(struct net_device *))
9e3ae4f9
AK
474{
475 union cvmx_gmxx_prtx_cfg gmx_cfg;
476 struct octeon_ethernet *priv = netdev_priv(dev);
477 int interface = INTERFACE(priv->port);
478 int index = INDEX(priv->port);
479 cvmx_helper_link_info_t link_info;
480 int rv;
481
482 rv = cvm_oct_phy_setup_device(dev);
483 if (rv)
484 return rv;
485
486 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
487 gmx_cfg.s.en = 1;
488 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
489
490 if (octeon_is_simulation())
491 return 0;
492
493 if (priv->phydev) {
494 int r = phy_read_status(priv->phydev);
495
496 if (r == 0 && priv->phydev->link == 0)
497 netif_carrier_off(dev);
498 cvm_oct_adjust_link(dev);
499 } else {
500 link_info = cvmx_helper_link_get(priv->port);
501 if (!link_info.s.link_up)
502 netif_carrier_off(dev);
503 priv->poll = link_poll;
2c265f74 504 link_poll(dev);
9e3ae4f9
AK
505 }
506
507 return 0;
508}
509
a8d2e817
AK
510void cvm_oct_link_poll(struct net_device *dev)
511{
512 struct octeon_ethernet *priv = netdev_priv(dev);
513 cvmx_helper_link_info_t link_info;
514
515 link_info = cvmx_helper_link_get(priv->port);
516 if (link_info.u64 == priv->link_info)
517 return;
518
519 link_info = cvmx_helper_link_autoconf(priv->port);
520 priv->link_info = link_info.u64;
521
522 if (link_info.s.link_up) {
523 if (!netif_carrier_ok(dev))
524 netif_carrier_on(dev);
525 } else if (netif_carrier_ok(dev)) {
526 netif_carrier_off(dev);
527 }
528 cvm_oct_note_carrier(priv, link_info);
529}
530
d566e690
AK
531static int cvm_oct_xaui_open(struct net_device *dev)
532{
533 return cvm_oct_common_open(dev, cvm_oct_link_poll);
534}
535
f696a108
DD
536static const struct net_device_ops cvm_oct_npi_netdev_ops = {
537 .ndo_init = cvm_oct_common_init,
538 .ndo_uninit = cvm_oct_common_uninit,
539 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 540 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
541 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
542 .ndo_do_ioctl = cvm_oct_ioctl,
543 .ndo_change_mtu = cvm_oct_common_change_mtu,
544 .ndo_get_stats = cvm_oct_common_get_stats,
545#ifdef CONFIG_NET_POLL_CONTROLLER
546 .ndo_poll_controller = cvm_oct_poll_controller,
547#endif
548};
549static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
41cb5786 550 .ndo_init = cvm_oct_common_init,
3c339145 551 .ndo_uninit = cvm_oct_common_uninit,
f696a108 552 .ndo_open = cvm_oct_xaui_open,
96217ebf 553 .ndo_stop = cvm_oct_common_stop,
f696a108 554 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 555 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
556 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
557 .ndo_do_ioctl = cvm_oct_ioctl,
558 .ndo_change_mtu = cvm_oct_common_change_mtu,
559 .ndo_get_stats = cvm_oct_common_get_stats,
560#ifdef CONFIG_NET_POLL_CONTROLLER
561 .ndo_poll_controller = cvm_oct_poll_controller,
562#endif
563};
564static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
565 .ndo_init = cvm_oct_sgmii_init,
3c339145 566 .ndo_uninit = cvm_oct_common_uninit,
f696a108 567 .ndo_open = cvm_oct_sgmii_open,
96217ebf 568 .ndo_stop = cvm_oct_common_stop,
f696a108 569 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 570 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
571 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
572 .ndo_do_ioctl = cvm_oct_ioctl,
573 .ndo_change_mtu = cvm_oct_common_change_mtu,
574 .ndo_get_stats = cvm_oct_common_get_stats,
575#ifdef CONFIG_NET_POLL_CONTROLLER
576 .ndo_poll_controller = cvm_oct_poll_controller,
577#endif
578};
579static const struct net_device_ops cvm_oct_spi_netdev_ops = {
580 .ndo_init = cvm_oct_spi_init,
581 .ndo_uninit = cvm_oct_spi_uninit,
582 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 583 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
584 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
585 .ndo_do_ioctl = cvm_oct_ioctl,
586 .ndo_change_mtu = cvm_oct_common_change_mtu,
587 .ndo_get_stats = cvm_oct_common_get_stats,
588#ifdef CONFIG_NET_POLL_CONTROLLER
589 .ndo_poll_controller = cvm_oct_poll_controller,
590#endif
591};
592static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
710086db
AK
593 .ndo_init = cvm_oct_common_init,
594 .ndo_uninit = cvm_oct_common_uninit,
f696a108 595 .ndo_open = cvm_oct_rgmii_open,
96217ebf 596 .ndo_stop = cvm_oct_common_stop,
f696a108 597 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 598 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
599 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
600 .ndo_do_ioctl = cvm_oct_ioctl,
601 .ndo_change_mtu = cvm_oct_common_change_mtu,
602 .ndo_get_stats = cvm_oct_common_get_stats,
603#ifdef CONFIG_NET_POLL_CONTROLLER
604 .ndo_poll_controller = cvm_oct_poll_controller,
605#endif
606};
607static const struct net_device_ops cvm_oct_pow_netdev_ops = {
608 .ndo_init = cvm_oct_common_init,
609 .ndo_start_xmit = cvm_oct_xmit_pow,
afc4b13d 610 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
611 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
612 .ndo_do_ioctl = cvm_oct_ioctl,
613 .ndo_change_mtu = cvm_oct_common_change_mtu,
614 .ndo_get_stats = cvm_oct_common_get_stats,
615#ifdef CONFIG_NET_POLL_CONTROLLER
616 .ndo_poll_controller = cvm_oct_poll_controller,
617#endif
618};
619
b186410d
NH
620static struct device_node *cvm_oct_of_get_child(
621 const struct device_node *parent, int reg_val)
df9244c5
DD
622{
623 struct device_node *node = NULL;
624 int size;
625 const __be32 *addr;
626
627 for (;;) {
628 node = of_get_next_child(parent, node);
629 if (!node)
630 break;
631 addr = of_get_property(node, "reg", &size);
632 if (addr && (be32_to_cpu(*addr) == reg_val))
633 break;
634 }
635 return node;
636}
637
4f240906 638static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
b186410d 639 int interface, int port)
df9244c5
DD
640{
641 struct device_node *ni, *np;
642
643 ni = cvm_oct_of_get_child(pip, interface);
644 if (!ni)
645 return NULL;
646
647 np = cvm_oct_of_get_child(ni, port);
648 of_node_put(ni);
649
650 return np;
651}
652
4f240906 653static int cvm_oct_probe(struct platform_device *pdev)
80ff0fd3
DD
654{
655 int num_interfaces;
656 int interface;
657 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
658 int qos;
df9244c5 659 struct device_node *pip;
80ff0fd3 660
f6ed1b3b 661 octeon_mdiobus_force_mod_depencency();
80ff0fd3 662
df9244c5
DD
663 pip = pdev->dev.of_node;
664 if (!pip) {
665 pr_err("Error: No 'pip' in /aliases\n");
666 return -EINVAL;
667 }
13c5939e 668
f8c26486 669 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
192b6a84 670 if (!cvm_oct_poll_queue) {
f8c26486
DD
671 pr_err("octeon-ethernet: Cannot create workqueue");
672 return -ENOMEM;
673 }
674
80ff0fd3
DD
675 cvm_oct_configure_common_hw();
676
677 cvmx_helper_initialize_packet_io_global();
678
679 /* Change the input group for all ports before input is enabled */
680 num_interfaces = cvmx_helper_get_number_of_interfaces();
681 for (interface = 0; interface < num_interfaces; interface++) {
682 int num_ports = cvmx_helper_ports_on_interface(interface);
683 int port;
684
685 for (port = cvmx_helper_get_ipd_port(interface, 0);
686 port < cvmx_helper_get_ipd_port(interface, num_ports);
687 port++) {
688 union cvmx_pip_prt_tagx pip_prt_tagx;
39bc7513 689
80ff0fd3
DD
690 pip_prt_tagx.u64 =
691 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
692 pip_prt_tagx.s.grp = pow_receive_group;
693 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
694 pip_prt_tagx.u64);
695 }
696 }
697
698 cvmx_helper_ipd_and_packet_input_enable();
699
700 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
701
702 /*
703 * Initialize the FAU used for counting packet buffers that
704 * need to be freed.
705 */
706 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
707
4898c560
DD
708 /* Initialize the FAU used for counting tx SKBs that need to be freed */
709 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
710
80ff0fd3
DD
711 if ((pow_send_group != -1)) {
712 struct net_device *dev;
39bc7513 713
80ff0fd3
DD
714 pr_info("\tConfiguring device for POW only access\n");
715 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
716 if (dev) {
717 /* Initialize the device private structure. */
718 struct octeon_ethernet *priv = netdev_priv(dev);
80ff0fd3 719
f696a108 720 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
80ff0fd3
DD
721 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
722 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
723 priv->queue = -1;
724 strcpy(dev->name, "pow%d");
725 for (qos = 0; qos < 16; qos++)
726 skb_queue_head_init(&priv->tx_free_list[qos]);
727
728 if (register_netdev(dev) < 0) {
6568a234 729 pr_err("Failed to register ethernet device for POW\n");
c4711c3a 730 free_netdev(dev);
80ff0fd3
DD
731 } else {
732 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
6568a234
DD
733 pr_info("%s: POW send group %d, receive group %d\n",
734 dev->name, pow_send_group,
735 pow_receive_group);
80ff0fd3
DD
736 }
737 } else {
6568a234 738 pr_err("Failed to allocate ethernet device for POW\n");
80ff0fd3
DD
739 }
740 }
741
742 num_interfaces = cvmx_helper_get_number_of_interfaces();
743 for (interface = 0; interface < num_interfaces; interface++) {
744 cvmx_helper_interface_mode_t imode =
745 cvmx_helper_interface_get_mode(interface);
746 int num_ports = cvmx_helper_ports_on_interface(interface);
747 int port;
df9244c5 748 int port_index;
80ff0fd3 749
b186410d
NH
750 for (port_index = 0,
751 port = cvmx_helper_get_ipd_port(interface, 0);
80ff0fd3 752 port < cvmx_helper_get_ipd_port(interface, num_ports);
df9244c5 753 port_index++, port++) {
80ff0fd3
DD
754 struct octeon_ethernet *priv;
755 struct net_device *dev =
756 alloc_etherdev(sizeof(struct octeon_ethernet));
757 if (!dev) {
99f8dbc5
EA
758 pr_err("Failed to allocate ethernet device for port %d\n",
759 port);
80ff0fd3
DD
760 continue;
761 }
80ff0fd3
DD
762
763 /* Initialize the device private structure. */
764 priv = netdev_priv(dev);
ec3a2207 765 priv->netdev = dev;
b186410d
NH
766 priv->of_node = cvm_oct_node_for_port(pip, interface,
767 port_index);
80ff0fd3 768
4898c560
DD
769 INIT_DELAYED_WORK(&priv->port_periodic_work,
770 cvm_oct_periodic_worker);
80ff0fd3
DD
771 priv->imode = imode;
772 priv->port = port;
773 priv->queue = cvmx_pko_get_base_queue(priv->port);
774 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
775 for (qos = 0; qos < 16; qos++)
776 skb_queue_head_init(&priv->tx_free_list[qos]);
777 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
778 qos++)
779 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
780
781 switch (priv->imode) {
80ff0fd3
DD
782 /* These types don't support ports to IPD/PKO */
783 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
784 case CVMX_HELPER_INTERFACE_MODE_PCIE:
785 case CVMX_HELPER_INTERFACE_MODE_PICMG:
786 break;
787
788 case CVMX_HELPER_INTERFACE_MODE_NPI:
f696a108 789 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
80ff0fd3
DD
790 strcpy(dev->name, "npi%d");
791 break;
792
793 case CVMX_HELPER_INTERFACE_MODE_XAUI:
f696a108 794 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
80ff0fd3
DD
795 strcpy(dev->name, "xaui%d");
796 break;
797
798 case CVMX_HELPER_INTERFACE_MODE_LOOP:
f696a108 799 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
80ff0fd3
DD
800 strcpy(dev->name, "loop%d");
801 break;
802
803 case CVMX_HELPER_INTERFACE_MODE_SGMII:
f696a108 804 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
80ff0fd3
DD
805 strcpy(dev->name, "eth%d");
806 break;
807
808 case CVMX_HELPER_INTERFACE_MODE_SPI:
f696a108 809 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
80ff0fd3
DD
810 strcpy(dev->name, "spi%d");
811 break;
812
813 case CVMX_HELPER_INTERFACE_MODE_RGMII:
814 case CVMX_HELPER_INTERFACE_MODE_GMII:
f696a108 815 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
80ff0fd3
DD
816 strcpy(dev->name, "eth%d");
817 break;
818 }
819
f696a108 820 if (!dev->netdev_ops) {
c4711c3a 821 free_netdev(dev);
80ff0fd3 822 } else if (register_netdev(dev) < 0) {
0a5fcc6b 823 pr_err("Failed to register ethernet device for interface %d, port %d\n",
80ff0fd3 824 interface, priv->port);
c4711c3a 825 free_netdev(dev);
80ff0fd3
DD
826 } else {
827 cvm_oct_device[priv->port] = dev;
828 fau -=
829 cvmx_pko_get_num_queues(priv->port) *
ec2c398e 830 sizeof(u32);
f8c26486 831 queue_delayed_work(cvm_oct_poll_queue,
b186410d 832 &priv->port_periodic_work, HZ);
80ff0fd3
DD
833 }
834 }
835 }
836
4898c560 837 cvm_oct_tx_initialize();
3368c784 838 cvm_oct_rx_initialize();
80ff0fd3 839
4898c560 840 /*
f5801a81 841 * 150 uS: about 10 1500-byte packets at 1GE.
4898c560
DD
842 */
843 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
80ff0fd3 844
f8c26486 845 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
80ff0fd3
DD
846
847 return 0;
848}
849
f7e2f350 850static int cvm_oct_remove(struct platform_device *pdev)
80ff0fd3
DD
851{
852 int port;
853
854 /* Disable POW interrupt */
bcbb1396
AK
855 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
856 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
857 else
858 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
80ff0fd3
DD
859
860 cvmx_ipd_disable();
861
862 /* Free the interrupt handler */
863 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
864
f8c26486
DD
865 atomic_inc_return(&cvm_oct_poll_queue_stopping);
866 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
867
80ff0fd3 868 cvm_oct_rx_shutdown();
4898c560
DD
869 cvm_oct_tx_shutdown();
870
80ff0fd3
DD
871 cvmx_pko_disable();
872
873 /* Free the ethernet devices */
874 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
875 if (cvm_oct_device[port]) {
f8c26486
DD
876 struct net_device *dev = cvm_oct_device[port];
877 struct octeon_ethernet *priv = netdev_priv(dev);
39bc7513 878
4898c560 879 cancel_delayed_work_sync(&priv->port_periodic_work);
f8c26486 880
4898c560 881 cvm_oct_tx_shutdown_dev(dev);
f8c26486 882 unregister_netdev(dev);
c4711c3a 883 free_netdev(dev);
80ff0fd3
DD
884 cvm_oct_device[port] = NULL;
885 }
886 }
887
f8c26486
DD
888 destroy_workqueue(cvm_oct_poll_queue);
889
80ff0fd3 890 cvmx_pko_shutdown();
80ff0fd3
DD
891
892 cvmx_ipd_free_ptr();
893
894 /* Free the HW pools */
895 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
896 num_packet_buffers);
897 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
898 num_packet_buffers);
899 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
900 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
901 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
df9244c5 902 return 0;
80ff0fd3
DD
903}
904
87794575 905static const struct of_device_id cvm_oct_match[] = {
df9244c5
DD
906 {
907 .compatible = "cavium,octeon-3860-pip",
908 },
909 {},
910};
911MODULE_DEVICE_TABLE(of, cvm_oct_match);
912
913static struct platform_driver cvm_oct_driver = {
914 .probe = cvm_oct_probe,
095d0bb5 915 .remove = cvm_oct_remove,
df9244c5 916 .driver = {
df9244c5
DD
917 .name = KBUILD_MODNAME,
918 .of_match_table = cvm_oct_match,
919 },
920};
921
922module_platform_driver(cvm_oct_driver);
923
80ff0fd3
DD
924MODULE_LICENSE("GPL");
925MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
926MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
This page took 0.813307 seconds and 5 git commands to generate.