Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / octeon / ethernet.c
CommitLineData
67620987
AK
1/*
2 * This file is based on code from OCTEON SDK by Cavium Networks.
80ff0fd3
DD
3 *
4 * Copyright (c) 2003-2007 Cavium Networks
5 *
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
67620987
AK
9 */
10
df9244c5 11#include <linux/platform_device.h>
80ff0fd3 12#include <linux/kernel.h>
80ff0fd3
DD
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
f6ed1b3b 16#include <linux/phy.h>
5a0e3ad6 17#include <linux/slab.h>
dc890df0 18#include <linux/interrupt.h>
df9244c5 19#include <linux/of_net.h>
80ff0fd3
DD
20
21#include <net/dst.h>
22
23#include <asm/octeon/octeon.h>
24
25#include "ethernet-defines.h"
a620c163 26#include "octeon-ethernet.h"
80ff0fd3
DD
27#include "ethernet-mem.h"
28#include "ethernet-rx.h"
29#include "ethernet-tx.h"
f696a108 30#include "ethernet-mdio.h"
80ff0fd3 31#include "ethernet-util.h"
80ff0fd3 32
af866496
DD
33#include <asm/octeon/cvmx-pip.h>
34#include <asm/octeon/cvmx-pko.h>
35#include <asm/octeon/cvmx-fau.h>
36#include <asm/octeon/cvmx-ipd.h>
37#include <asm/octeon/cvmx-helper.h>
ef2d4f6c 38#include <asm/octeon/cvmx-asxx-defs.h>
af866496
DD
39#include <asm/octeon/cvmx-gmxx-defs.h>
40#include <asm/octeon/cvmx-smix-defs.h>
80ff0fd3 41
90419615 42static int num_packet_buffers = 1024;
80ff0fd3
DD
43module_param(num_packet_buffers, int, 0444);
44MODULE_PARM_DESC(num_packet_buffers, "\n"
45 "\tNumber of packet buffers to allocate and store in the\n"
5ff8bebb 46 "\tFPA. By default, 1024 packet buffers are used.\n");
80ff0fd3 47
e971a119 48static int pow_receive_group = 15;
80ff0fd3
DD
49module_param(pow_receive_group, int, 0444);
50MODULE_PARM_DESC(pow_receive_group, "\n"
51 "\tPOW group to receive packets from. All ethernet hardware\n"
d82603c6 52 "\twill be configured to send incoming packets to this POW\n"
80ff0fd3
DD
53 "\tgroup. Also any other software can submit packets to this\n"
54 "\tgroup for the kernel to process.");
55
5cf9b1ca
AK
56static int receive_group_order;
57module_param(receive_group_order, int, 0444);
58MODULE_PARM_DESC(receive_group_order, "\n"
59 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
60 "\twill be configured to send incoming packets to multiple POW\n"
61 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
62 "\tgroups are taken into use and groups are allocated starting\n"
63 "\tfrom 0. By default, a single group is used.\n");
64
80ff0fd3
DD
65int pow_send_group = -1;
66module_param(pow_send_group, int, 0644);
67MODULE_PARM_DESC(pow_send_group, "\n"
68 "\tPOW group to send packets to other software on. This\n"
69 "\tcontrols the creation of the virtual device pow0.\n"
70 "\talways_use_pow also depends on this value.");
71
72int always_use_pow;
73module_param(always_use_pow, int, 0444);
74MODULE_PARM_DESC(always_use_pow, "\n"
75 "\tWhen set, always send to the pow group. This will cause\n"
76 "\tpackets sent to real ethernet devices to be sent to the\n"
77 "\tPOW group instead of the hardware. Unless some other\n"
78 "\tapplication changes the config, packets will still be\n"
79 "\treceived from the low level hardware. Use this option\n"
80 "\tto allow a CVMX app to intercept all packets from the\n"
81 "\tlinux kernel. You must specify pow_send_group along with\n"
82 "\tthis option.");
83
84char pow_send_list[128] = "";
85module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
86MODULE_PARM_DESC(pow_send_list, "\n"
87 "\tComma separated list of ethernet devices that should use the\n"
88 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
89 "\tis a per port version of always_use_pow. always_use_pow takes\n"
90 "\tprecedence over this list. For example, setting this to\n"
91 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
92 "\tusing the pow_send_group.");
93
3368c784
DD
94int rx_napi_weight = 32;
95module_param(rx_napi_weight, int, 0444);
96MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
13c5939e 97
e971a119
AK
98/* Mask indicating which receive groups are in use. */
99int pow_receive_groups;
f8c26486 100
d0fbf9f3 101/*
f8c26486
DD
102 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
103 *
104 * Set to one right before cvm_oct_poll_queue is destroyed.
80ff0fd3 105 */
f8c26486 106atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
80ff0fd3 107
d0fbf9f3 108/*
80ff0fd3
DD
109 * Array of every ethernet device owned by this driver indexed by
110 * the ipd input port number.
111 */
112struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
113
4898c560
DD
114u64 cvm_oct_tx_poll_interval;
115
f8c26486
DD
116static void cvm_oct_rx_refill_worker(struct work_struct *work);
117static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
118
119static void cvm_oct_rx_refill_worker(struct work_struct *work)
80ff0fd3 120{
f8c26486
DD
121 /*
122 * FPA 0 may have been drained, try to refill it if we need
123 * more than num_packet_buffers / 2, otherwise normal receive
124 * processing will refill it. If it were drained, no packets
125 * could be received so cvm_oct_napi_poll would never be
126 * invoked to do the refill.
127 */
128 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
a620c163 129
f8c26486 130 if (!atomic_read(&cvm_oct_poll_queue_stopping))
6fe5efa1 131 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
80ff0fd3
DD
132}
133
4898c560 134static void cvm_oct_periodic_worker(struct work_struct *work)
f8c26486
DD
135{
136 struct octeon_ethernet *priv = container_of(work,
137 struct octeon_ethernet,
4898c560 138 port_periodic_work.work);
a620c163 139
f6ed1b3b 140 if (priv->poll)
f8c26486 141 priv->poll(cvm_oct_device[priv->port]);
a620c163 142
b186410d
NH
143 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
144 cvm_oct_device[priv->port]);
4898c560 145
f8c26486 146 if (!atomic_read(&cvm_oct_poll_queue_stopping))
6fe5efa1 147 schedule_delayed_work(&priv->port_periodic_work, HZ);
851ec8cd 148}
80ff0fd3 149
4f240906 150static void cvm_oct_configure_common_hw(void)
80ff0fd3 151{
80ff0fd3
DD
152 /* Setup the FPA */
153 cvmx_fpa_enable();
154 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
155 num_packet_buffers);
156 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
157 num_packet_buffers);
158 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
159 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
d5f9bc73 160 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
80ff0fd3 161
8a5cc923
PM
162#ifdef __LITTLE_ENDIAN
163 {
164 union cvmx_ipd_ctl_status ipd_ctl_status;
4bc8ff74 165
8a5cc923
PM
166 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
167 ipd_ctl_status.s.pkt_lend = 1;
168 ipd_ctl_status.s.wqe_lend = 1;
169 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
170 }
171#endif
172
cccdb277 173 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
80ff0fd3
DD
174}
175
176/**
ec977c5b
DD
177 * cvm_oct_free_work- Free a work queue entry
178 *
179 * @work_queue_entry: Work queue entry to free
80ff0fd3 180 *
80ff0fd3
DD
181 * Returns Zero on success, Negative on failure.
182 */
183int cvm_oct_free_work(void *work_queue_entry)
184{
185 cvmx_wqe_t *work = work_queue_entry;
186
187 int segments = work->word2.s.bufs;
188 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
189
190 while (segments--) {
191 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
192 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
193 if (unlikely(!segment_ptr.s.i))
194 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
195 segment_ptr.s.pool,
c93b0e75 196 CVMX_FPA_PACKET_POOL_SIZE / 128);
80ff0fd3
DD
197 segment_ptr = next_ptr;
198 }
c93b0e75 199 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
80ff0fd3
DD
200
201 return 0;
202}
203EXPORT_SYMBOL(cvm_oct_free_work);
204
f696a108 205/**
ec977c5b 206 * cvm_oct_common_get_stats - get the low level ethernet statistics
f696a108 207 * @dev: Device to get the statistics from
ec977c5b 208 *
f696a108
DD
209 * Returns Pointer to the statistics
210 */
211static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
212{
213 cvmx_pip_port_status_t rx_status;
214 cvmx_pko_port_status_t tx_status;
215 struct octeon_ethernet *priv = netdev_priv(dev);
216
217 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
218 if (octeon_is_simulation()) {
219 /* The simulator doesn't support statistics */
220 memset(&rx_status, 0, sizeof(rx_status));
221 memset(&tx_status, 0, sizeof(tx_status));
222 } else {
223 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
224 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
225 }
226
227 priv->stats.rx_packets += rx_status.inb_packets;
228 priv->stats.tx_packets += tx_status.packets;
229 priv->stats.rx_bytes += rx_status.inb_octets;
230 priv->stats.tx_bytes += tx_status.octets;
231 priv->stats.multicast += rx_status.multicast_packets;
232 priv->stats.rx_crc_errors += rx_status.inb_errors;
233 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
dcf24f77 234 priv->stats.rx_dropped += rx_status.dropped_packets;
f696a108
DD
235 }
236
237 return &priv->stats;
238}
239
240/**
ec977c5b 241 * cvm_oct_common_change_mtu - change the link MTU
f696a108
DD
242 * @dev: Device to change
243 * @new_mtu: The new MTU
244 *
245 * Returns Zero on success
246 */
247static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
248{
249 struct octeon_ethernet *priv = netdev_priv(dev);
250 int interface = INTERFACE(priv->port);
aa652b1c 251#if IS_ENABLED(CONFIG_VLAN_8021Q)
f696a108
DD
252 int vlan_bytes = 4;
253#else
254 int vlan_bytes = 0;
255#endif
256
257 /*
258 * Limit the MTU to make sure the ethernet packets are between
259 * 64 bytes and 65535 bytes.
260 */
7636941e
LGL
261 if ((new_mtu + 14 + 4 + vlan_bytes < 64) ||
262 (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
f696a108
DD
263 pr_err("MTU must be between %d and %d.\n",
264 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
265 return -EINVAL;
266 }
267 dev->mtu = new_mtu;
268
7636941e
LGL
269 if ((interface < 2) &&
270 (cvmx_helper_interface_get_mode(interface) !=
f696a108 271 CVMX_HELPER_INTERFACE_MODE_SPI)) {
0ad1ed99 272 int index = INDEX(priv->port);
f696a108
DD
273 /* Add ethernet header and FCS, and VLAN if configured. */
274 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
275
7636941e
LGL
276 if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
277 OCTEON_IS_MODEL(OCTEON_CN58XX)) {
f696a108
DD
278 /* Signal errors on packets larger than the MTU */
279 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
280 max_packet);
281 } else {
282 /*
283 * Set the hardware to truncate packets larger
284 * than the MTU and smaller the 64 bytes.
285 */
286 union cvmx_pip_frm_len_chkx frm_len_chk;
39bc7513 287
f696a108
DD
288 frm_len_chk.u64 = 0;
289 frm_len_chk.s.minlen = 64;
290 frm_len_chk.s.maxlen = max_packet;
291 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
292 frm_len_chk.u64);
293 }
294 /*
295 * Set the hardware to truncate packets larger than
296 * the MTU. The jabber register must be set to a
297 * multiple of 8 bytes, so round up.
298 */
299 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
300 (max_packet + 7) & ~7u);
301 }
302 return 0;
303}
304
305/**
ec977c5b 306 * cvm_oct_common_set_multicast_list - set the multicast list
f696a108
DD
307 * @dev: Device to work on
308 */
309static void cvm_oct_common_set_multicast_list(struct net_device *dev)
310{
311 union cvmx_gmxx_prtx_cfg gmx_cfg;
312 struct octeon_ethernet *priv = netdev_priv(dev);
313 int interface = INTERFACE(priv->port);
f696a108 314
7636941e
LGL
315 if ((interface < 2) &&
316 (cvmx_helper_interface_get_mode(interface) !=
f696a108
DD
317 CVMX_HELPER_INTERFACE_MODE_SPI)) {
318 union cvmx_gmxx_rxx_adr_ctl control;
0ad1ed99 319 int index = INDEX(priv->port);
39bc7513 320
f696a108
DD
321 control.u64 = 0;
322 control.s.bcst = 1; /* Allow broadcast MAC addresses */
323
d5907942 324 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
f696a108
DD
325 (dev->flags & IFF_PROMISC))
326 /* Force accept multicast packets */
327 control.s.mcst = 2;
328 else
215c47c9 329 /* Force reject multicast packets */
f696a108
DD
330 control.s.mcst = 1;
331
332 if (dev->flags & IFF_PROMISC)
333 /*
334 * Reject matches if promisc. Since CAM is
335 * shut off, should accept everything.
336 */
337 control.s.cam_mode = 0;
338 else
339 /* Filter packets based on the CAM */
340 control.s.cam_mode = 1;
341
342 gmx_cfg.u64 =
343 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
344 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
345 gmx_cfg.u64 & ~1ull);
346
347 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
348 control.u64);
349 if (dev->flags & IFF_PROMISC)
350 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
351 (index, interface), 0);
352 else
353 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
354 (index, interface), 1);
355
356 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
357 gmx_cfg.u64);
358 }
359}
360
df9244c5 361static int cvm_oct_set_mac_filter(struct net_device *dev)
f696a108
DD
362{
363 struct octeon_ethernet *priv = netdev_priv(dev);
364 union cvmx_gmxx_prtx_cfg gmx_cfg;
365 int interface = INTERFACE(priv->port);
f696a108 366
7636941e
LGL
367 if ((interface < 2) &&
368 (cvmx_helper_interface_get_mode(interface) !=
f696a108
DD
369 CVMX_HELPER_INTERFACE_MODE_SPI)) {
370 int i;
ec2c398e
AO
371 u8 *ptr = dev->dev_addr;
372 u64 mac = 0;
0ad1ed99 373 int index = INDEX(priv->port);
39bc7513 374
f696a108 375 for (i = 0; i < 6; i++)
ec2c398e 376 mac = (mac << 8) | (u64)ptr[i];
f696a108
DD
377
378 gmx_cfg.u64 =
379 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
380 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
381 gmx_cfg.u64 & ~1ull);
382
383 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
384 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
df9244c5 385 ptr[0]);
f696a108 386 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
df9244c5 387 ptr[1]);
f696a108 388 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
df9244c5 389 ptr[2]);
f696a108 390 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
df9244c5 391 ptr[3]);
f696a108 392 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
df9244c5 393 ptr[4]);
f696a108 394 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
df9244c5 395 ptr[5]);
f696a108
DD
396 cvm_oct_common_set_multicast_list(dev);
397 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
398 gmx_cfg.u64);
399 }
400 return 0;
401}
402
90590750
CM
403/**
404 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
405 * @dev: The device in question.
406 * @addr: Socket address.
407 *
408 * Returns Zero on success
409 */
df9244c5
DD
410static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
411{
412 int r = eth_mac_addr(dev, addr);
413
414 if (r)
415 return r;
416 return cvm_oct_set_mac_filter(dev);
417}
418
f696a108 419/**
ec977c5b 420 * cvm_oct_common_init - per network device initialization
f696a108 421 * @dev: Device to initialize
ec977c5b 422 *
f696a108
DD
423 * Returns Zero on success
424 */
425int cvm_oct_common_init(struct net_device *dev)
426{
f696a108 427 struct octeon_ethernet *priv = netdev_priv(dev);
df9244c5
DD
428 const u8 *mac = NULL;
429
430 if (priv->of_node)
431 mac = of_get_mac_address(priv->of_node);
432
4d978452 433 if (mac)
6c71ea54 434 ether_addr_copy(dev->dev_addr, mac);
15c6ff3b 435 else
df9244c5 436 eth_hw_addr_random(dev);
f696a108
DD
437
438 /*
439 * Force the interface to use the POW send if always_use_pow
440 * was specified or it is in the pow send list.
441 */
7636941e
LGL
442 if ((pow_send_group != -1) &&
443 (always_use_pow || strstr(pow_send_list, dev->name)))
f696a108
DD
444 priv->queue = -1;
445
6646baf7
AK
446 if (priv->queue != -1)
447 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
f696a108 448
f696a108
DD
449 /* We do our own locking, Linux doesn't need to */
450 dev->features |= NETIF_F_LLTX;
7ad24ea4 451 dev->ethtool_ops = &cvm_oct_ethtool_ops;
f696a108 452
df9244c5 453 cvm_oct_set_mac_filter(dev);
f696a108
DD
454 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
455
456 /*
457 * Zero out stats for port so we won't mistakenly show
458 * counters from the bootloader.
459 */
460 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
461 sizeof(struct net_device_stats));
462
be76400c
AK
463 if (dev->netdev_ops->ndo_stop)
464 dev->netdev_ops->ndo_stop(dev);
465
f696a108
DD
466 return 0;
467}
468
469void cvm_oct_common_uninit(struct net_device *dev)
470{
5d99db13
PR
471 if (dev->phydev)
472 phy_disconnect(dev->phydev);
f696a108
DD
473}
474
9e3ae4f9 475int cvm_oct_common_open(struct net_device *dev,
2c265f74 476 void (*link_poll)(struct net_device *))
9e3ae4f9
AK
477{
478 union cvmx_gmxx_prtx_cfg gmx_cfg;
479 struct octeon_ethernet *priv = netdev_priv(dev);
480 int interface = INTERFACE(priv->port);
481 int index = INDEX(priv->port);
482 cvmx_helper_link_info_t link_info;
483 int rv;
484
485 rv = cvm_oct_phy_setup_device(dev);
486 if (rv)
487 return rv;
488
489 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
490 gmx_cfg.s.en = 1;
ce060d8a
AK
491 if (octeon_has_feature(OCTEON_FEATURE_PKND))
492 gmx_cfg.s.pknd = priv->port;
9e3ae4f9
AK
493 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
494
495 if (octeon_is_simulation())
496 return 0;
497
5d99db13
PR
498 if (dev->phydev) {
499 int r = phy_read_status(dev->phydev);
9e3ae4f9 500
5d99db13 501 if (r == 0 && dev->phydev->link == 0)
9e3ae4f9
AK
502 netif_carrier_off(dev);
503 cvm_oct_adjust_link(dev);
504 } else {
505 link_info = cvmx_helper_link_get(priv->port);
506 if (!link_info.s.link_up)
507 netif_carrier_off(dev);
508 priv->poll = link_poll;
2c265f74 509 link_poll(dev);
9e3ae4f9
AK
510 }
511
512 return 0;
513}
514
a8d2e817
AK
515void cvm_oct_link_poll(struct net_device *dev)
516{
517 struct octeon_ethernet *priv = netdev_priv(dev);
518 cvmx_helper_link_info_t link_info;
519
520 link_info = cvmx_helper_link_get(priv->port);
521 if (link_info.u64 == priv->link_info)
522 return;
523
60dcf58a
AK
524 if (cvmx_helper_link_set(priv->port, link_info))
525 link_info.u64 = priv->link_info;
526 else
527 priv->link_info = link_info.u64;
a8d2e817
AK
528
529 if (link_info.s.link_up) {
530 if (!netif_carrier_ok(dev))
531 netif_carrier_on(dev);
532 } else if (netif_carrier_ok(dev)) {
533 netif_carrier_off(dev);
534 }
535 cvm_oct_note_carrier(priv, link_info);
536}
537
d566e690
AK
538static int cvm_oct_xaui_open(struct net_device *dev)
539{
540 return cvm_oct_common_open(dev, cvm_oct_link_poll);
541}
542
f696a108
DD
543static const struct net_device_ops cvm_oct_npi_netdev_ops = {
544 .ndo_init = cvm_oct_common_init,
545 .ndo_uninit = cvm_oct_common_uninit,
546 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 547 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
548 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
549 .ndo_do_ioctl = cvm_oct_ioctl,
550 .ndo_change_mtu = cvm_oct_common_change_mtu,
551 .ndo_get_stats = cvm_oct_common_get_stats,
552#ifdef CONFIG_NET_POLL_CONTROLLER
553 .ndo_poll_controller = cvm_oct_poll_controller,
554#endif
555};
0e350e17 556
f696a108 557static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
41cb5786 558 .ndo_init = cvm_oct_common_init,
3c339145 559 .ndo_uninit = cvm_oct_common_uninit,
f696a108 560 .ndo_open = cvm_oct_xaui_open,
96217ebf 561 .ndo_stop = cvm_oct_common_stop,
f696a108 562 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 563 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
564 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
565 .ndo_do_ioctl = cvm_oct_ioctl,
566 .ndo_change_mtu = cvm_oct_common_change_mtu,
567 .ndo_get_stats = cvm_oct_common_get_stats,
568#ifdef CONFIG_NET_POLL_CONTROLLER
569 .ndo_poll_controller = cvm_oct_poll_controller,
570#endif
571};
0e350e17 572
f696a108
DD
573static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
574 .ndo_init = cvm_oct_sgmii_init,
3c339145 575 .ndo_uninit = cvm_oct_common_uninit,
f696a108 576 .ndo_open = cvm_oct_sgmii_open,
96217ebf 577 .ndo_stop = cvm_oct_common_stop,
f696a108 578 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 579 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
580 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
581 .ndo_do_ioctl = cvm_oct_ioctl,
582 .ndo_change_mtu = cvm_oct_common_change_mtu,
583 .ndo_get_stats = cvm_oct_common_get_stats,
584#ifdef CONFIG_NET_POLL_CONTROLLER
585 .ndo_poll_controller = cvm_oct_poll_controller,
586#endif
587};
0e350e17 588
f696a108
DD
589static const struct net_device_ops cvm_oct_spi_netdev_ops = {
590 .ndo_init = cvm_oct_spi_init,
591 .ndo_uninit = cvm_oct_spi_uninit,
592 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 593 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
594 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
595 .ndo_do_ioctl = cvm_oct_ioctl,
596 .ndo_change_mtu = cvm_oct_common_change_mtu,
597 .ndo_get_stats = cvm_oct_common_get_stats,
598#ifdef CONFIG_NET_POLL_CONTROLLER
599 .ndo_poll_controller = cvm_oct_poll_controller,
600#endif
601};
0e350e17 602
f696a108 603static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
710086db
AK
604 .ndo_init = cvm_oct_common_init,
605 .ndo_uninit = cvm_oct_common_uninit,
f696a108 606 .ndo_open = cvm_oct_rgmii_open,
96217ebf 607 .ndo_stop = cvm_oct_common_stop,
f696a108 608 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 609 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
610 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
611 .ndo_do_ioctl = cvm_oct_ioctl,
612 .ndo_change_mtu = cvm_oct_common_change_mtu,
613 .ndo_get_stats = cvm_oct_common_get_stats,
614#ifdef CONFIG_NET_POLL_CONTROLLER
615 .ndo_poll_controller = cvm_oct_poll_controller,
616#endif
617};
0e350e17 618
f696a108
DD
619static const struct net_device_ops cvm_oct_pow_netdev_ops = {
620 .ndo_init = cvm_oct_common_init,
621 .ndo_start_xmit = cvm_oct_xmit_pow,
afc4b13d 622 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
623 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
624 .ndo_do_ioctl = cvm_oct_ioctl,
625 .ndo_change_mtu = cvm_oct_common_change_mtu,
626 .ndo_get_stats = cvm_oct_common_get_stats,
627#ifdef CONFIG_NET_POLL_CONTROLLER
628 .ndo_poll_controller = cvm_oct_poll_controller,
629#endif
630};
631
b186410d
NH
632static struct device_node *cvm_oct_of_get_child(
633 const struct device_node *parent, int reg_val)
df9244c5
DD
634{
635 struct device_node *node = NULL;
636 int size;
637 const __be32 *addr;
638
639 for (;;) {
640 node = of_get_next_child(parent, node);
641 if (!node)
642 break;
643 addr = of_get_property(node, "reg", &size);
644 if (addr && (be32_to_cpu(*addr) == reg_val))
645 break;
646 }
647 return node;
648}
649
4f240906 650static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
ac05a587 651 int interface, int port)
df9244c5
DD
652{
653 struct device_node *ni, *np;
654
655 ni = cvm_oct_of_get_child(pip, interface);
656 if (!ni)
657 return NULL;
658
659 np = cvm_oct_of_get_child(ni, port);
660 of_node_put(ni);
661
662 return np;
663}
664
ef2d4f6c
AK
665static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
666{
667 u32 delay_value;
668
669 if (!of_property_read_u32(np, "rx-delay", &delay_value))
670 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
671 if (!of_property_read_u32(np, "tx-delay", &delay_value))
672 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
673}
674
4f240906 675static int cvm_oct_probe(struct platform_device *pdev)
80ff0fd3
DD
676{
677 int num_interfaces;
678 int interface;
679 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
680 int qos;
df9244c5 681 struct device_node *pip;
80ff0fd3 682
f6ed1b3b 683 octeon_mdiobus_force_mod_depencency();
80ff0fd3 684
df9244c5
DD
685 pip = pdev->dev.of_node;
686 if (!pip) {
687 pr_err("Error: No 'pip' in /aliases\n");
688 return -EINVAL;
689 }
13c5939e 690
f8c26486 691
80ff0fd3
DD
692 cvm_oct_configure_common_hw();
693
694 cvmx_helper_initialize_packet_io_global();
695
5cf9b1ca
AK
696 if (receive_group_order) {
697 if (receive_group_order > 4)
698 receive_group_order = 4;
699 pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
700 } else {
701 pow_receive_groups = BIT(pow_receive_group);
702 }
e971a119 703
80ff0fd3
DD
704 /* Change the input group for all ports before input is enabled */
705 num_interfaces = cvmx_helper_get_number_of_interfaces();
706 for (interface = 0; interface < num_interfaces; interface++) {
707 int num_ports = cvmx_helper_ports_on_interface(interface);
708 int port;
709
710 for (port = cvmx_helper_get_ipd_port(interface, 0);
711 port < cvmx_helper_get_ipd_port(interface, num_ports);
712 port++) {
713 union cvmx_pip_prt_tagx pip_prt_tagx;
39bc7513 714
80ff0fd3
DD
715 pip_prt_tagx.u64 =
716 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
5cf9b1ca
AK
717
718 if (receive_group_order) {
719 int tag_mask;
720
721 /* We support only 16 groups at the moment, so
722 * always disable the two additional "hidden"
723 * tag_mask bits on CN68XX.
724 */
725 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
726 pip_prt_tagx.u64 |= 0x3ull << 44;
727
728 tag_mask = ~((1 << receive_group_order) - 1);
729 pip_prt_tagx.s.grptagbase = 0;
730 pip_prt_tagx.s.grptagmask = tag_mask;
731 pip_prt_tagx.s.grptag = 1;
732 pip_prt_tagx.s.tag_mode = 0;
733 pip_prt_tagx.s.inc_prt_flag = 1;
734 pip_prt_tagx.s.ip6_dprt_flag = 1;
735 pip_prt_tagx.s.ip4_dprt_flag = 1;
736 pip_prt_tagx.s.ip6_sprt_flag = 1;
737 pip_prt_tagx.s.ip4_sprt_flag = 1;
738 pip_prt_tagx.s.ip6_dst_flag = 1;
739 pip_prt_tagx.s.ip4_dst_flag = 1;
740 pip_prt_tagx.s.ip6_src_flag = 1;
741 pip_prt_tagx.s.ip4_src_flag = 1;
742 pip_prt_tagx.s.grp = 0;
743 } else {
744 pip_prt_tagx.s.grptag = 0;
745 pip_prt_tagx.s.grp = pow_receive_group;
746 }
747
80ff0fd3
DD
748 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
749 pip_prt_tagx.u64);
750 }
751 }
752
753 cvmx_helper_ipd_and_packet_input_enable();
754
755 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
756
757 /*
758 * Initialize the FAU used for counting packet buffers that
759 * need to be freed.
760 */
761 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
762
4898c560
DD
763 /* Initialize the FAU used for counting tx SKBs that need to be freed */
764 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
765
80ff0fd3
DD
766 if ((pow_send_group != -1)) {
767 struct net_device *dev;
39bc7513 768
80ff0fd3
DD
769 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
770 if (dev) {
771 /* Initialize the device private structure. */
772 struct octeon_ethernet *priv = netdev_priv(dev);
80ff0fd3 773
f696a108 774 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
80ff0fd3
DD
775 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
776 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
777 priv->queue = -1;
778 strcpy(dev->name, "pow%d");
779 for (qos = 0; qos < 16; qos++)
780 skb_queue_head_init(&priv->tx_free_list[qos]);
781
782 if (register_netdev(dev) < 0) {
6568a234 783 pr_err("Failed to register ethernet device for POW\n");
c4711c3a 784 free_netdev(dev);
80ff0fd3
DD
785 } else {
786 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
6568a234
DD
787 pr_info("%s: POW send group %d, receive group %d\n",
788 dev->name, pow_send_group,
789 pow_receive_group);
80ff0fd3
DD
790 }
791 } else {
6568a234 792 pr_err("Failed to allocate ethernet device for POW\n");
80ff0fd3
DD
793 }
794 }
795
796 num_interfaces = cvmx_helper_get_number_of_interfaces();
797 for (interface = 0; interface < num_interfaces; interface++) {
798 cvmx_helper_interface_mode_t imode =
799 cvmx_helper_interface_get_mode(interface);
800 int num_ports = cvmx_helper_ports_on_interface(interface);
801 int port;
df9244c5 802 int port_index;
80ff0fd3 803
b186410d
NH
804 for (port_index = 0,
805 port = cvmx_helper_get_ipd_port(interface, 0);
80ff0fd3 806 port < cvmx_helper_get_ipd_port(interface, num_ports);
df9244c5 807 port_index++, port++) {
80ff0fd3
DD
808 struct octeon_ethernet *priv;
809 struct net_device *dev =
810 alloc_etherdev(sizeof(struct octeon_ethernet));
811 if (!dev) {
99f8dbc5
EA
812 pr_err("Failed to allocate ethernet device for port %d\n",
813 port);
80ff0fd3
DD
814 continue;
815 }
80ff0fd3
DD
816
817 /* Initialize the device private structure. */
818 priv = netdev_priv(dev);
ec3a2207 819 priv->netdev = dev;
b186410d
NH
820 priv->of_node = cvm_oct_node_for_port(pip, interface,
821 port_index);
80ff0fd3 822
4898c560
DD
823 INIT_DELAYED_WORK(&priv->port_periodic_work,
824 cvm_oct_periodic_worker);
80ff0fd3
DD
825 priv->imode = imode;
826 priv->port = port;
827 priv->queue = cvmx_pko_get_base_queue(priv->port);
828 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
829 for (qos = 0; qos < 16; qos++)
830 skb_queue_head_init(&priv->tx_free_list[qos]);
831 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
832 qos++)
833 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
834
835 switch (priv->imode) {
80ff0fd3
DD
836 /* These types don't support ports to IPD/PKO */
837 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
838 case CVMX_HELPER_INTERFACE_MODE_PCIE:
839 case CVMX_HELPER_INTERFACE_MODE_PICMG:
840 break;
841
842 case CVMX_HELPER_INTERFACE_MODE_NPI:
f696a108 843 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
80ff0fd3
DD
844 strcpy(dev->name, "npi%d");
845 break;
846
847 case CVMX_HELPER_INTERFACE_MODE_XAUI:
f696a108 848 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
80ff0fd3
DD
849 strcpy(dev->name, "xaui%d");
850 break;
851
852 case CVMX_HELPER_INTERFACE_MODE_LOOP:
f696a108 853 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
80ff0fd3
DD
854 strcpy(dev->name, "loop%d");
855 break;
856
857 case CVMX_HELPER_INTERFACE_MODE_SGMII:
f696a108 858 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
80ff0fd3
DD
859 strcpy(dev->name, "eth%d");
860 break;
861
862 case CVMX_HELPER_INTERFACE_MODE_SPI:
f696a108 863 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
80ff0fd3
DD
864 strcpy(dev->name, "spi%d");
865 break;
866
867 case CVMX_HELPER_INTERFACE_MODE_RGMII:
868 case CVMX_HELPER_INTERFACE_MODE_GMII:
f696a108 869 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
80ff0fd3 870 strcpy(dev->name, "eth%d");
ef2d4f6c
AK
871 cvm_set_rgmii_delay(priv->of_node, interface,
872 port_index);
80ff0fd3
DD
873 break;
874 }
875
f696a108 876 if (!dev->netdev_ops) {
c4711c3a 877 free_netdev(dev);
80ff0fd3 878 } else if (register_netdev(dev) < 0) {
0a5fcc6b 879 pr_err("Failed to register ethernet device for interface %d, port %d\n",
ac05a587 880 interface, priv->port);
c4711c3a 881 free_netdev(dev);
80ff0fd3
DD
882 } else {
883 cvm_oct_device[priv->port] = dev;
884 fau -=
885 cvmx_pko_get_num_queues(priv->port) *
ec2c398e 886 sizeof(u32);
6fe5efa1 887 schedule_delayed_work(&priv->port_periodic_work, HZ);
80ff0fd3
DD
888 }
889 }
890 }
891
4898c560 892 cvm_oct_tx_initialize();
3368c784 893 cvm_oct_rx_initialize();
80ff0fd3 894
4898c560 895 /*
f5801a81 896 * 150 uS: about 10 1500-byte packets at 1GE.
4898c560
DD
897 */
898 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
80ff0fd3 899
6fe5efa1 900 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
80ff0fd3
DD
901
902 return 0;
903}
904
f7e2f350 905static int cvm_oct_remove(struct platform_device *pdev)
80ff0fd3
DD
906{
907 int port;
908
80ff0fd3
DD
909 cvmx_ipd_disable();
910
f8c26486
DD
911 atomic_inc_return(&cvm_oct_poll_queue_stopping);
912 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
913
80ff0fd3 914 cvm_oct_rx_shutdown();
4898c560
DD
915 cvm_oct_tx_shutdown();
916
80ff0fd3
DD
917 cvmx_pko_disable();
918
919 /* Free the ethernet devices */
920 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
921 if (cvm_oct_device[port]) {
f8c26486
DD
922 struct net_device *dev = cvm_oct_device[port];
923 struct octeon_ethernet *priv = netdev_priv(dev);
39bc7513 924
4898c560 925 cancel_delayed_work_sync(&priv->port_periodic_work);
f8c26486 926
4898c560 927 cvm_oct_tx_shutdown_dev(dev);
f8c26486 928 unregister_netdev(dev);
c4711c3a 929 free_netdev(dev);
80ff0fd3
DD
930 cvm_oct_device[port] = NULL;
931 }
932 }
933
f8c26486 934
80ff0fd3 935 cvmx_pko_shutdown();
80ff0fd3
DD
936
937 cvmx_ipd_free_ptr();
938
939 /* Free the HW pools */
940 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
941 num_packet_buffers);
942 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
943 num_packet_buffers);
944 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
945 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
946 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
df9244c5 947 return 0;
80ff0fd3
DD
948}
949
87794575 950static const struct of_device_id cvm_oct_match[] = {
df9244c5
DD
951 {
952 .compatible = "cavium,octeon-3860-pip",
953 },
954 {},
955};
956MODULE_DEVICE_TABLE(of, cvm_oct_match);
957
958static struct platform_driver cvm_oct_driver = {
959 .probe = cvm_oct_probe,
095d0bb5 960 .remove = cvm_oct_remove,
df9244c5 961 .driver = {
df9244c5
DD
962 .name = KBUILD_MODNAME,
963 .of_match_table = cvm_oct_match,
964 },
965};
966
967module_platform_driver(cvm_oct_driver);
968
80ff0fd3
DD
969MODULE_LICENSE("GPL");
970MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
971MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
This page took 0.955682 seconds and 5 git commands to generate.