1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include <linux/topology.h>
23 #include "net_driver.h"
31 #define EFX_MAX_MTU (9 * 1024)
33 /* RX slow fill workqueue. If memory allocation fails in the fast path,
34 * a work item is pushed onto this work queue to retry the allocation later,
35 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
36 * workqueue, there is nothing to be gained in making it per NIC
38 static struct workqueue_struct
*refill_workqueue
;
40 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
41 * queued onto this work queue. This is not a per-nic work queue, because
42 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
44 static struct workqueue_struct
*reset_workqueue
;
46 /**************************************************************************
50 *************************************************************************/
53 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
55 * This sets the default for new devices. It can be controlled later
58 static int lro
= true;
59 module_param(lro
, int, 0644);
60 MODULE_PARM_DESC(lro
, "Large receive offload acceleration");
63 * Use separate channels for TX and RX events
65 * Set this to 1 to use separate channels for TX and RX. It allows us
66 * to control interrupt affinity separately for TX and RX.
68 * This is only used in MSI-X interrupt mode
70 static unsigned int separate_tx_channels
;
71 module_param(separate_tx_channels
, uint
, 0644);
72 MODULE_PARM_DESC(separate_tx_channels
,
73 "Use separate channels for TX and RX");
75 /* This is the weight assigned to each of the (per-channel) virtual
78 static int napi_weight
= 64;
80 /* This is the time (in jiffies) between invocations of the hardware
81 * monitor, which checks for known hardware bugs and resets the
82 * hardware and driver as necessary.
84 unsigned int efx_monitor_interval
= 1 * HZ
;
86 /* This controls whether or not the driver will initialise devices
87 * with invalid MAC addresses stored in the EEPROM or flash. If true,
88 * such devices will be initialised with a random locally-generated
89 * MAC address. This allows for loading the sfc_mtd driver to
90 * reprogram the flash, even if the flash contents (including the MAC
91 * address) have previously been erased.
93 static unsigned int allow_bad_hwaddr
;
95 /* Initial interrupt moderation settings. They can be modified after
96 * module load with ethtool.
98 * The default for RX should strike a balance between increasing the
99 * round-trip latency and reducing overhead.
101 static unsigned int rx_irq_mod_usec
= 60;
103 /* Initial interrupt moderation settings. They can be modified after
104 * module load with ethtool.
106 * This default is chosen to ensure that a 10G link does not go idle
107 * while a TX queue is stopped after it has become full. A queue is
108 * restarted when it drops below half full. The time this takes (assuming
109 * worst case 3 descriptors per packet and 1024 descriptors) is
110 * 512 / 3 * 1.2 = 205 usec.
112 static unsigned int tx_irq_mod_usec
= 150;
114 /* This is the first interrupt mode to try out of:
119 static unsigned int interrupt_mode
;
121 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
122 * i.e. the number of CPUs among which we may distribute simultaneous
123 * interrupt handling.
125 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
126 * The default (0) means to assign an interrupt to each package (level II cache)
128 static unsigned int rss_cpus
;
129 module_param(rss_cpus
, uint
, 0444);
130 MODULE_PARM_DESC(rss_cpus
, "Number of CPUs to use for Receive-Side Scaling");
132 static int phy_flash_cfg
;
133 module_param(phy_flash_cfg
, int, 0644);
134 MODULE_PARM_DESC(phy_flash_cfg
, "Set PHYs into reflash mode initially");
136 static unsigned irq_adapt_low_thresh
= 10000;
137 module_param(irq_adapt_low_thresh
, uint
, 0644);
138 MODULE_PARM_DESC(irq_adapt_low_thresh
,
139 "Threshold score for reducing IRQ moderation");
141 static unsigned irq_adapt_high_thresh
= 20000;
142 module_param(irq_adapt_high_thresh
, uint
, 0644);
143 MODULE_PARM_DESC(irq_adapt_high_thresh
,
144 "Threshold score for increasing IRQ moderation");
146 /**************************************************************************
148 * Utility functions and prototypes
150 *************************************************************************/
151 static void efx_remove_channel(struct efx_channel
*channel
);
152 static void efx_remove_port(struct efx_nic
*efx
);
153 static void efx_fini_napi(struct efx_nic
*efx
);
154 static void efx_fini_channels(struct efx_nic
*efx
);
156 #define EFX_ASSERT_RESET_SERIALISED(efx) \
158 if (efx->state == STATE_RUNNING) \
162 /**************************************************************************
164 * Event queue processing
166 *************************************************************************/
168 /* Process channel's event queue
170 * This function is responsible for processing the event queue of a
171 * single channel. The caller must guarantee that this function will
172 * never be concurrently called more than once on the same channel,
173 * though different channels may be being processed concurrently.
175 static int efx_process_channel(struct efx_channel
*channel
, int rx_quota
)
177 struct efx_nic
*efx
= channel
->efx
;
180 if (unlikely(efx
->reset_pending
!= RESET_TYPE_NONE
||
184 rx_packets
= falcon_process_eventq(channel
, rx_quota
);
188 /* Deliver last RX packet. */
189 if (channel
->rx_pkt
) {
190 __efx_rx_packet(channel
, channel
->rx_pkt
,
191 channel
->rx_pkt_csummed
);
192 channel
->rx_pkt
= NULL
;
195 efx_rx_strategy(channel
);
197 efx_fast_push_rx_descriptors(&efx
->rx_queue
[channel
->channel
]);
202 /* Mark channel as finished processing
204 * Note that since we will not receive further interrupts for this
205 * channel before we finish processing and call the eventq_read_ack()
206 * method, there is no need to use the interrupt hold-off timers.
208 static inline void efx_channel_processed(struct efx_channel
*channel
)
210 /* The interrupt handler for this channel may set work_pending
211 * as soon as we acknowledge the events we've seen. Make sure
212 * it's cleared before then. */
213 channel
->work_pending
= false;
216 falcon_eventq_read_ack(channel
);
221 * NAPI guarantees serialisation of polls of the same device, which
222 * provides the guarantee required by efx_process_channel().
224 static int efx_poll(struct napi_struct
*napi
, int budget
)
226 struct efx_channel
*channel
=
227 container_of(napi
, struct efx_channel
, napi_str
);
230 EFX_TRACE(channel
->efx
, "channel %d NAPI poll executing on CPU %d\n",
231 channel
->channel
, raw_smp_processor_id());
233 rx_packets
= efx_process_channel(channel
, budget
);
235 if (rx_packets
< budget
) {
236 struct efx_nic
*efx
= channel
->efx
;
238 if (channel
->used_flags
& EFX_USED_BY_RX
&&
239 efx
->irq_rx_adaptive
&&
240 unlikely(++channel
->irq_count
== 1000)) {
241 unsigned old_irq_moderation
= channel
->irq_moderation
;
243 if (unlikely(channel
->irq_mod_score
<
244 irq_adapt_low_thresh
)) {
245 channel
->irq_moderation
=
247 channel
->irq_moderation
-
248 FALCON_IRQ_MOD_RESOLUTION
,
249 FALCON_IRQ_MOD_RESOLUTION
);
250 } else if (unlikely(channel
->irq_mod_score
>
251 irq_adapt_high_thresh
)) {
252 channel
->irq_moderation
=
253 min(channel
->irq_moderation
+
254 FALCON_IRQ_MOD_RESOLUTION
,
255 efx
->irq_rx_moderation
);
258 if (channel
->irq_moderation
!= old_irq_moderation
)
259 falcon_set_int_moderation(channel
);
261 channel
->irq_count
= 0;
262 channel
->irq_mod_score
= 0;
265 /* There is no race here; although napi_disable() will
266 * only wait for napi_complete(), this isn't a problem
267 * since efx_channel_processed() will have no effect if
268 * interrupts have already been disabled.
271 efx_channel_processed(channel
);
277 /* Process the eventq of the specified channel immediately on this CPU
279 * Disable hardware generated interrupts, wait for any existing
280 * processing to finish, then directly poll (and ack ) the eventq.
281 * Finally reenable NAPI and interrupts.
283 * Since we are touching interrupts the caller should hold the suspend lock
285 void efx_process_channel_now(struct efx_channel
*channel
)
287 struct efx_nic
*efx
= channel
->efx
;
289 BUG_ON(!channel
->used_flags
);
290 BUG_ON(!channel
->enabled
);
292 /* Disable interrupts and wait for ISRs to complete */
293 falcon_disable_interrupts(efx
);
295 synchronize_irq(efx
->legacy_irq
);
297 synchronize_irq(channel
->irq
);
299 /* Wait for any NAPI processing to complete */
300 napi_disable(&channel
->napi_str
);
302 /* Poll the channel */
303 efx_process_channel(channel
, efx
->type
->evq_size
);
305 /* Ack the eventq. This may cause an interrupt to be generated
306 * when they are reenabled */
307 efx_channel_processed(channel
);
309 napi_enable(&channel
->napi_str
);
310 falcon_enable_interrupts(efx
);
313 /* Create event queue
314 * Event queue memory allocations are done only once. If the channel
315 * is reset, the memory buffer will be reused; this guards against
316 * errors during channel reset and also simplifies interrupt handling.
318 static int efx_probe_eventq(struct efx_channel
*channel
)
320 EFX_LOG(channel
->efx
, "chan %d create event queue\n", channel
->channel
);
322 return falcon_probe_eventq(channel
);
325 /* Prepare channel's event queue */
326 static void efx_init_eventq(struct efx_channel
*channel
)
328 EFX_LOG(channel
->efx
, "chan %d init event queue\n", channel
->channel
);
330 channel
->eventq_read_ptr
= 0;
332 falcon_init_eventq(channel
);
335 static void efx_fini_eventq(struct efx_channel
*channel
)
337 EFX_LOG(channel
->efx
, "chan %d fini event queue\n", channel
->channel
);
339 falcon_fini_eventq(channel
);
342 static void efx_remove_eventq(struct efx_channel
*channel
)
344 EFX_LOG(channel
->efx
, "chan %d remove event queue\n", channel
->channel
);
346 falcon_remove_eventq(channel
);
349 /**************************************************************************
353 *************************************************************************/
355 static int efx_probe_channel(struct efx_channel
*channel
)
357 struct efx_tx_queue
*tx_queue
;
358 struct efx_rx_queue
*rx_queue
;
361 EFX_LOG(channel
->efx
, "creating channel %d\n", channel
->channel
);
363 rc
= efx_probe_eventq(channel
);
367 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
368 rc
= efx_probe_tx_queue(tx_queue
);
373 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
374 rc
= efx_probe_rx_queue(rx_queue
);
379 channel
->n_rx_frm_trunc
= 0;
384 efx_for_each_channel_rx_queue(rx_queue
, channel
)
385 efx_remove_rx_queue(rx_queue
);
387 efx_for_each_channel_tx_queue(tx_queue
, channel
)
388 efx_remove_tx_queue(tx_queue
);
394 static void efx_set_channel_names(struct efx_nic
*efx
)
396 struct efx_channel
*channel
;
397 const char *type
= "";
400 efx_for_each_channel(channel
, efx
) {
401 number
= channel
->channel
;
402 if (efx
->n_channels
> efx
->n_rx_queues
) {
403 if (channel
->channel
< efx
->n_rx_queues
) {
407 number
-= efx
->n_rx_queues
;
410 snprintf(channel
->name
, sizeof(channel
->name
),
411 "%s%s-%d", efx
->name
, type
, number
);
415 /* Channels are shutdown and reinitialised whilst the NIC is running
416 * to propagate configuration changes (mtu, checksum offload), or
417 * to clear hardware error conditions
419 static void efx_init_channels(struct efx_nic
*efx
)
421 struct efx_tx_queue
*tx_queue
;
422 struct efx_rx_queue
*rx_queue
;
423 struct efx_channel
*channel
;
425 /* Calculate the rx buffer allocation parameters required to
426 * support the current MTU, including padding for header
427 * alignment and overruns.
429 efx
->rx_buffer_len
= (max(EFX_PAGE_IP_ALIGN
, NET_IP_ALIGN
) +
430 EFX_MAX_FRAME_LEN(efx
->net_dev
->mtu
) +
431 efx
->type
->rx_buffer_padding
);
432 efx
->rx_buffer_order
= get_order(efx
->rx_buffer_len
);
434 /* Initialise the channels */
435 efx_for_each_channel(channel
, efx
) {
436 EFX_LOG(channel
->efx
, "init chan %d\n", channel
->channel
);
438 efx_init_eventq(channel
);
440 efx_for_each_channel_tx_queue(tx_queue
, channel
)
441 efx_init_tx_queue(tx_queue
);
443 /* The rx buffer allocation strategy is MTU dependent */
444 efx_rx_strategy(channel
);
446 efx_for_each_channel_rx_queue(rx_queue
, channel
)
447 efx_init_rx_queue(rx_queue
);
449 WARN_ON(channel
->rx_pkt
!= NULL
);
450 efx_rx_strategy(channel
);
454 /* This enables event queue processing and packet transmission.
456 * Note that this function is not allowed to fail, since that would
457 * introduce too much complexity into the suspend/resume path.
459 static void efx_start_channel(struct efx_channel
*channel
)
461 struct efx_rx_queue
*rx_queue
;
463 EFX_LOG(channel
->efx
, "starting chan %d\n", channel
->channel
);
465 if (!(channel
->efx
->net_dev
->flags
& IFF_UP
))
466 netif_napi_add(channel
->napi_dev
, &channel
->napi_str
,
467 efx_poll
, napi_weight
);
469 /* The interrupt handler for this channel may set work_pending
470 * as soon as we enable it. Make sure it's cleared before
471 * then. Similarly, make sure it sees the enabled flag set. */
472 channel
->work_pending
= false;
473 channel
->enabled
= true;
476 napi_enable(&channel
->napi_str
);
478 /* Load up RX descriptors */
479 efx_for_each_channel_rx_queue(rx_queue
, channel
)
480 efx_fast_push_rx_descriptors(rx_queue
);
483 /* This disables event queue processing and packet transmission.
484 * This function does not guarantee that all queue processing
485 * (e.g. RX refill) is complete.
487 static void efx_stop_channel(struct efx_channel
*channel
)
489 struct efx_rx_queue
*rx_queue
;
491 if (!channel
->enabled
)
494 EFX_LOG(channel
->efx
, "stop chan %d\n", channel
->channel
);
496 channel
->enabled
= false;
497 napi_disable(&channel
->napi_str
);
499 /* Ensure that any worker threads have exited or will be no-ops */
500 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
501 spin_lock_bh(&rx_queue
->add_lock
);
502 spin_unlock_bh(&rx_queue
->add_lock
);
506 static void efx_fini_channels(struct efx_nic
*efx
)
508 struct efx_channel
*channel
;
509 struct efx_tx_queue
*tx_queue
;
510 struct efx_rx_queue
*rx_queue
;
513 EFX_ASSERT_RESET_SERIALISED(efx
);
514 BUG_ON(efx
->port_enabled
);
516 rc
= falcon_flush_queues(efx
);
518 EFX_ERR(efx
, "failed to flush queues\n");
520 EFX_LOG(efx
, "successfully flushed all queues\n");
522 efx_for_each_channel(channel
, efx
) {
523 EFX_LOG(channel
->efx
, "shut down chan %d\n", channel
->channel
);
525 efx_for_each_channel_rx_queue(rx_queue
, channel
)
526 efx_fini_rx_queue(rx_queue
);
527 efx_for_each_channel_tx_queue(tx_queue
, channel
)
528 efx_fini_tx_queue(tx_queue
);
529 efx_fini_eventq(channel
);
533 static void efx_remove_channel(struct efx_channel
*channel
)
535 struct efx_tx_queue
*tx_queue
;
536 struct efx_rx_queue
*rx_queue
;
538 EFX_LOG(channel
->efx
, "destroy chan %d\n", channel
->channel
);
540 efx_for_each_channel_rx_queue(rx_queue
, channel
)
541 efx_remove_rx_queue(rx_queue
);
542 efx_for_each_channel_tx_queue(tx_queue
, channel
)
543 efx_remove_tx_queue(tx_queue
);
544 efx_remove_eventq(channel
);
546 channel
->used_flags
= 0;
549 void efx_schedule_slow_fill(struct efx_rx_queue
*rx_queue
, int delay
)
551 queue_delayed_work(refill_workqueue
, &rx_queue
->work
, delay
);
554 /**************************************************************************
558 **************************************************************************/
560 /* This ensures that the kernel is kept informed (via
561 * netif_carrier_on/off) of the link status, and also maintains the
562 * link status's stop on the port's TX queue.
564 static void efx_link_status_changed(struct efx_nic
*efx
)
566 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
567 * that no events are triggered between unregister_netdev() and the
568 * driver unloading. A more general condition is that NETDEV_CHANGE
569 * can only be generated between NETDEV_UP and NETDEV_DOWN */
570 if (!netif_running(efx
->net_dev
))
573 if (efx
->port_inhibited
) {
574 netif_carrier_off(efx
->net_dev
);
578 if (efx
->link_up
!= netif_carrier_ok(efx
->net_dev
)) {
579 efx
->n_link_state_changes
++;
582 netif_carrier_on(efx
->net_dev
);
584 netif_carrier_off(efx
->net_dev
);
587 /* Status message for kernel log */
589 EFX_INFO(efx
, "link up at %uMbps %s-duplex (MTU %d)%s\n",
590 efx
->link_speed
, efx
->link_fd
? "full" : "half",
592 (efx
->promiscuous
? " [PROMISC]" : ""));
594 EFX_INFO(efx
, "link down\n");
599 static void efx_fini_port(struct efx_nic
*efx
);
601 /* This call reinitialises the MAC to pick up new PHY settings. The
602 * caller must hold the mac_lock */
603 void __efx_reconfigure_port(struct efx_nic
*efx
)
605 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
607 EFX_LOG(efx
, "reconfiguring MAC from PHY settings on CPU %d\n",
608 raw_smp_processor_id());
610 /* Serialise the promiscuous flag with efx_set_multicast_list. */
611 if (efx_dev_registered(efx
)) {
612 netif_addr_lock_bh(efx
->net_dev
);
613 netif_addr_unlock_bh(efx
->net_dev
);
616 falcon_deconfigure_mac_wrapper(efx
);
618 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
619 if (LOOPBACK_INTERNAL(efx
))
620 efx
->phy_mode
|= PHY_MODE_TX_DISABLED
;
622 efx
->phy_mode
&= ~PHY_MODE_TX_DISABLED
;
623 efx
->phy_op
->reconfigure(efx
);
625 if (falcon_switch_mac(efx
))
628 efx
->mac_op
->reconfigure(efx
);
630 /* Inform kernel of loss/gain of carrier */
631 efx_link_status_changed(efx
);
635 EFX_ERR(efx
, "failed to reconfigure MAC\n");
636 efx
->port_enabled
= false;
640 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
642 void efx_reconfigure_port(struct efx_nic
*efx
)
644 EFX_ASSERT_RESET_SERIALISED(efx
);
646 mutex_lock(&efx
->mac_lock
);
647 __efx_reconfigure_port(efx
);
648 mutex_unlock(&efx
->mac_lock
);
651 /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
652 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
653 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
654 static void efx_phy_work(struct work_struct
*data
)
656 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, phy_work
);
658 mutex_lock(&efx
->mac_lock
);
659 if (efx
->port_enabled
)
660 __efx_reconfigure_port(efx
);
661 mutex_unlock(&efx
->mac_lock
);
664 static void efx_mac_work(struct work_struct
*data
)
666 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, mac_work
);
668 mutex_lock(&efx
->mac_lock
);
669 if (efx
->port_enabled
)
670 efx
->mac_op
->irq(efx
);
671 mutex_unlock(&efx
->mac_lock
);
674 static int efx_probe_port(struct efx_nic
*efx
)
678 EFX_LOG(efx
, "create port\n");
680 /* Connect up MAC/PHY operations table and read MAC address */
681 rc
= falcon_probe_port(efx
);
686 efx
->phy_mode
= PHY_MODE_SPECIAL
;
688 /* Sanity check MAC address */
689 if (is_valid_ether_addr(efx
->mac_address
)) {
690 memcpy(efx
->net_dev
->dev_addr
, efx
->mac_address
, ETH_ALEN
);
692 EFX_ERR(efx
, "invalid MAC address %pM\n",
694 if (!allow_bad_hwaddr
) {
698 random_ether_addr(efx
->net_dev
->dev_addr
);
699 EFX_INFO(efx
, "using locally-generated MAC %pM\n",
700 efx
->net_dev
->dev_addr
);
706 efx_remove_port(efx
);
710 static int efx_init_port(struct efx_nic
*efx
)
714 EFX_LOG(efx
, "init port\n");
716 rc
= efx
->phy_op
->init(efx
);
719 mutex_lock(&efx
->mac_lock
);
720 efx
->phy_op
->reconfigure(efx
);
721 rc
= falcon_switch_mac(efx
);
722 mutex_unlock(&efx
->mac_lock
);
725 efx
->mac_op
->reconfigure(efx
);
727 efx
->port_initialized
= true;
728 efx_stats_enable(efx
);
732 efx
->phy_op
->fini(efx
);
736 /* Allow efx_reconfigure_port() to be scheduled, and close the window
737 * between efx_stop_port and efx_flush_all whereby a previously scheduled
738 * efx_phy_work()/efx_mac_work() may have been cancelled */
739 static void efx_start_port(struct efx_nic
*efx
)
741 EFX_LOG(efx
, "start port\n");
742 BUG_ON(efx
->port_enabled
);
744 mutex_lock(&efx
->mac_lock
);
745 efx
->port_enabled
= true;
746 __efx_reconfigure_port(efx
);
747 efx
->mac_op
->irq(efx
);
748 mutex_unlock(&efx
->mac_lock
);
751 /* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing,
752 * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work
753 * and efx_mac_work may still be scheduled via NAPI processing until
754 * efx_flush_all() is called */
755 static void efx_stop_port(struct efx_nic
*efx
)
757 EFX_LOG(efx
, "stop port\n");
759 mutex_lock(&efx
->mac_lock
);
760 efx
->port_enabled
= false;
761 mutex_unlock(&efx
->mac_lock
);
763 /* Serialise against efx_set_multicast_list() */
764 if (efx_dev_registered(efx
)) {
765 netif_addr_lock_bh(efx
->net_dev
);
766 netif_addr_unlock_bh(efx
->net_dev
);
770 static void efx_fini_port(struct efx_nic
*efx
)
772 EFX_LOG(efx
, "shut down port\n");
774 if (!efx
->port_initialized
)
777 efx_stats_disable(efx
);
778 efx
->phy_op
->fini(efx
);
779 efx
->port_initialized
= false;
781 efx
->link_up
= false;
782 efx_link_status_changed(efx
);
785 static void efx_remove_port(struct efx_nic
*efx
)
787 EFX_LOG(efx
, "destroying port\n");
789 falcon_remove_port(efx
);
792 /**************************************************************************
796 **************************************************************************/
798 /* This configures the PCI device to enable I/O and DMA. */
799 static int efx_init_io(struct efx_nic
*efx
)
801 struct pci_dev
*pci_dev
= efx
->pci_dev
;
802 dma_addr_t dma_mask
= efx
->type
->max_dma_mask
;
805 EFX_LOG(efx
, "initialising I/O\n");
807 rc
= pci_enable_device(pci_dev
);
809 EFX_ERR(efx
, "failed to enable PCI device\n");
813 pci_set_master(pci_dev
);
815 /* Set the PCI DMA mask. Try all possibilities from our
816 * genuine mask down to 32 bits, because some architectures
817 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
818 * masks event though they reject 46 bit masks.
820 while (dma_mask
> 0x7fffffffUL
) {
821 if (pci_dma_supported(pci_dev
, dma_mask
) &&
822 ((rc
= pci_set_dma_mask(pci_dev
, dma_mask
)) == 0))
827 EFX_ERR(efx
, "could not find a suitable DMA mask\n");
830 EFX_LOG(efx
, "using DMA mask %llx\n", (unsigned long long) dma_mask
);
831 rc
= pci_set_consistent_dma_mask(pci_dev
, dma_mask
);
833 /* pci_set_consistent_dma_mask() is not *allowed* to
834 * fail with a mask that pci_set_dma_mask() accepted,
835 * but just in case...
837 EFX_ERR(efx
, "failed to set consistent DMA mask\n");
841 efx
->membase_phys
= pci_resource_start(efx
->pci_dev
,
843 rc
= pci_request_region(pci_dev
, efx
->type
->mem_bar
, "sfc");
845 EFX_ERR(efx
, "request for memory BAR failed\n");
849 efx
->membase
= ioremap_nocache(efx
->membase_phys
,
850 efx
->type
->mem_map_size
);
852 EFX_ERR(efx
, "could not map memory BAR %d at %llx+%x\n",
854 (unsigned long long)efx
->membase_phys
,
855 efx
->type
->mem_map_size
);
859 EFX_LOG(efx
, "memory BAR %u at %llx+%x (virtual %p)\n",
860 efx
->type
->mem_bar
, (unsigned long long)efx
->membase_phys
,
861 efx
->type
->mem_map_size
, efx
->membase
);
866 pci_release_region(efx
->pci_dev
, efx
->type
->mem_bar
);
868 efx
->membase_phys
= 0;
870 pci_disable_device(efx
->pci_dev
);
875 static void efx_fini_io(struct efx_nic
*efx
)
877 EFX_LOG(efx
, "shutting down I/O\n");
880 iounmap(efx
->membase
);
884 if (efx
->membase_phys
) {
885 pci_release_region(efx
->pci_dev
, efx
->type
->mem_bar
);
886 efx
->membase_phys
= 0;
889 pci_disable_device(efx
->pci_dev
);
892 /* Get number of RX queues wanted. Return number of online CPU
893 * packages in the expectation that an IRQ balancer will spread
894 * interrupts across them. */
895 static int efx_wanted_rx_queues(void)
897 cpumask_var_t core_mask
;
901 if (!alloc_cpumask_var(&core_mask
, GFP_KERNEL
)) {
903 "efx.c: allocation failure, irq balancing hobbled\n");
907 cpumask_clear(core_mask
);
909 for_each_online_cpu(cpu
) {
910 if (!cpumask_test_cpu(cpu
, core_mask
)) {
912 cpumask_or(core_mask
, core_mask
,
913 topology_core_cpumask(cpu
));
917 free_cpumask_var(core_mask
);
921 /* Probe the number and type of interrupts we are able to obtain, and
922 * the resulting numbers of channels and RX queues.
924 static void efx_probe_interrupts(struct efx_nic
*efx
)
927 min_t(int, efx
->type
->phys_addr_channels
, EFX_MAX_CHANNELS
);
930 if (efx
->interrupt_mode
== EFX_INT_MODE_MSIX
) {
931 struct msix_entry xentries
[EFX_MAX_CHANNELS
];
935 /* We want one RX queue and interrupt per CPU package
936 * (or as specified by the rss_cpus module parameter).
937 * We will need one channel per interrupt.
939 rx_queues
= rss_cpus
? rss_cpus
: efx_wanted_rx_queues();
940 wanted_ints
= rx_queues
+ (separate_tx_channels
? 1 : 0);
941 wanted_ints
= min(wanted_ints
, max_channels
);
943 for (i
= 0; i
< wanted_ints
; i
++)
944 xentries
[i
].entry
= i
;
945 rc
= pci_enable_msix(efx
->pci_dev
, xentries
, wanted_ints
);
947 EFX_ERR(efx
, "WARNING: Insufficient MSI-X vectors"
948 " available (%d < %d).\n", rc
, wanted_ints
);
949 EFX_ERR(efx
, "WARNING: Performance may be reduced.\n");
950 EFX_BUG_ON_PARANOID(rc
>= wanted_ints
);
952 rc
= pci_enable_msix(efx
->pci_dev
, xentries
,
957 efx
->n_rx_queues
= min(rx_queues
, wanted_ints
);
958 efx
->n_channels
= wanted_ints
;
959 for (i
= 0; i
< wanted_ints
; i
++)
960 efx
->channel
[i
].irq
= xentries
[i
].vector
;
962 /* Fall back to single channel MSI */
963 efx
->interrupt_mode
= EFX_INT_MODE_MSI
;
964 EFX_ERR(efx
, "could not enable MSI-X\n");
968 /* Try single interrupt MSI */
969 if (efx
->interrupt_mode
== EFX_INT_MODE_MSI
) {
970 efx
->n_rx_queues
= 1;
972 rc
= pci_enable_msi(efx
->pci_dev
);
974 efx
->channel
[0].irq
= efx
->pci_dev
->irq
;
976 EFX_ERR(efx
, "could not enable MSI\n");
977 efx
->interrupt_mode
= EFX_INT_MODE_LEGACY
;
981 /* Assume legacy interrupts */
982 if (efx
->interrupt_mode
== EFX_INT_MODE_LEGACY
) {
983 efx
->n_rx_queues
= 1;
984 efx
->n_channels
= 1 + (separate_tx_channels
? 1 : 0);
985 efx
->legacy_irq
= efx
->pci_dev
->irq
;
989 static void efx_remove_interrupts(struct efx_nic
*efx
)
991 struct efx_channel
*channel
;
993 /* Remove MSI/MSI-X interrupts */
994 efx_for_each_channel(channel
, efx
)
996 pci_disable_msi(efx
->pci_dev
);
997 pci_disable_msix(efx
->pci_dev
);
999 /* Remove legacy interrupt */
1000 efx
->legacy_irq
= 0;
1003 static void efx_set_channels(struct efx_nic
*efx
)
1005 struct efx_tx_queue
*tx_queue
;
1006 struct efx_rx_queue
*rx_queue
;
1008 efx_for_each_tx_queue(tx_queue
, efx
) {
1009 if (separate_tx_channels
)
1010 tx_queue
->channel
= &efx
->channel
[efx
->n_channels
-1];
1012 tx_queue
->channel
= &efx
->channel
[0];
1013 tx_queue
->channel
->used_flags
|= EFX_USED_BY_TX
;
1016 efx_for_each_rx_queue(rx_queue
, efx
) {
1017 rx_queue
->channel
= &efx
->channel
[rx_queue
->queue
];
1018 rx_queue
->channel
->used_flags
|= EFX_USED_BY_RX
;
1022 static int efx_probe_nic(struct efx_nic
*efx
)
1026 EFX_LOG(efx
, "creating NIC\n");
1028 /* Carry out hardware-type specific initialisation */
1029 rc
= falcon_probe_nic(efx
);
1033 /* Determine the number of channels and RX queues by trying to hook
1034 * in MSI-X interrupts. */
1035 efx_probe_interrupts(efx
);
1037 efx_set_channels(efx
);
1039 /* Initialise the interrupt moderation settings */
1040 efx_init_irq_moderation(efx
, tx_irq_mod_usec
, rx_irq_mod_usec
, true);
1045 static void efx_remove_nic(struct efx_nic
*efx
)
1047 EFX_LOG(efx
, "destroying NIC\n");
1049 efx_remove_interrupts(efx
);
1050 falcon_remove_nic(efx
);
1053 /**************************************************************************
1055 * NIC startup/shutdown
1057 *************************************************************************/
1059 static int efx_probe_all(struct efx_nic
*efx
)
1061 struct efx_channel
*channel
;
1065 rc
= efx_probe_nic(efx
);
1067 EFX_ERR(efx
, "failed to create NIC\n");
1072 rc
= efx_probe_port(efx
);
1074 EFX_ERR(efx
, "failed to create port\n");
1078 /* Create channels */
1079 efx_for_each_channel(channel
, efx
) {
1080 rc
= efx_probe_channel(channel
);
1082 EFX_ERR(efx
, "failed to create channel %d\n",
1087 efx_set_channel_names(efx
);
1092 efx_for_each_channel(channel
, efx
)
1093 efx_remove_channel(channel
);
1094 efx_remove_port(efx
);
1096 efx_remove_nic(efx
);
1101 /* Called after previous invocation(s) of efx_stop_all, restarts the
1102 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1103 * and ensures that the port is scheduled to be reconfigured.
1104 * This function is safe to call multiple times when the NIC is in any
1106 static void efx_start_all(struct efx_nic
*efx
)
1108 struct efx_channel
*channel
;
1110 EFX_ASSERT_RESET_SERIALISED(efx
);
1112 /* Check that it is appropriate to restart the interface. All
1113 * of these flags are safe to read under just the rtnl lock */
1114 if (efx
->port_enabled
)
1116 if ((efx
->state
!= STATE_RUNNING
) && (efx
->state
!= STATE_INIT
))
1118 if (efx_dev_registered(efx
) && !netif_running(efx
->net_dev
))
1121 /* Mark the port as enabled so port reconfigurations can start, then
1122 * restart the transmit interface early so the watchdog timer stops */
1123 efx_start_port(efx
);
1124 if (efx_dev_registered(efx
))
1125 efx_wake_queue(efx
);
1127 efx_for_each_channel(channel
, efx
)
1128 efx_start_channel(channel
);
1130 falcon_enable_interrupts(efx
);
1132 /* Start hardware monitor if we're in RUNNING */
1133 if (efx
->state
== STATE_RUNNING
)
1134 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1135 efx_monitor_interval
);
1138 /* Flush all delayed work. Should only be called when no more delayed work
1139 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1140 * since we're holding the rtnl_lock at this point. */
1141 static void efx_flush_all(struct efx_nic
*efx
)
1143 struct efx_rx_queue
*rx_queue
;
1145 /* Make sure the hardware monitor is stopped */
1146 cancel_delayed_work_sync(&efx
->monitor_work
);
1148 /* Ensure that all RX slow refills are complete. */
1149 efx_for_each_rx_queue(rx_queue
, efx
)
1150 cancel_delayed_work_sync(&rx_queue
->work
);
1152 /* Stop scheduled port reconfigurations */
1153 cancel_work_sync(&efx
->mac_work
);
1154 cancel_work_sync(&efx
->phy_work
);
1158 /* Quiesce hardware and software without bringing the link down.
1159 * Safe to call multiple times, when the nic and interface is in any
1160 * state. The caller is guaranteed to subsequently be in a position
1161 * to modify any hardware and software state they see fit without
1163 static void efx_stop_all(struct efx_nic
*efx
)
1165 struct efx_channel
*channel
;
1167 EFX_ASSERT_RESET_SERIALISED(efx
);
1169 /* port_enabled can be read safely under the rtnl lock */
1170 if (!efx
->port_enabled
)
1173 /* Disable interrupts and wait for ISR to complete */
1174 falcon_disable_interrupts(efx
);
1175 if (efx
->legacy_irq
)
1176 synchronize_irq(efx
->legacy_irq
);
1177 efx_for_each_channel(channel
, efx
) {
1179 synchronize_irq(channel
->irq
);
1182 /* Stop all NAPI processing and synchronous rx refills */
1183 efx_for_each_channel(channel
, efx
)
1184 efx_stop_channel(channel
);
1186 /* Stop all asynchronous port reconfigurations. Since all
1187 * event processing has already been stopped, there is no
1188 * window to loose phy events */
1191 /* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */
1194 /* Isolate the MAC from the TX and RX engines, so that queue
1195 * flushes will complete in a timely fashion. */
1196 falcon_drain_tx_fifo(efx
);
1198 /* Stop the kernel transmit interface late, so the watchdog
1199 * timer isn't ticking over the flush */
1200 if (efx_dev_registered(efx
)) {
1201 efx_stop_queue(efx
);
1202 netif_tx_lock_bh(efx
->net_dev
);
1203 netif_tx_unlock_bh(efx
->net_dev
);
1207 static void efx_remove_all(struct efx_nic
*efx
)
1209 struct efx_channel
*channel
;
1211 efx_for_each_channel(channel
, efx
)
1212 efx_remove_channel(channel
);
1213 efx_remove_port(efx
);
1214 efx_remove_nic(efx
);
1217 /* A convinience function to safely flush all the queues */
1218 void efx_flush_queues(struct efx_nic
*efx
)
1220 EFX_ASSERT_RESET_SERIALISED(efx
);
1224 efx_fini_channels(efx
);
1225 efx_init_channels(efx
);
1230 /**************************************************************************
1232 * Interrupt moderation
1234 **************************************************************************/
1236 /* Set interrupt moderation parameters */
1237 void efx_init_irq_moderation(struct efx_nic
*efx
, int tx_usecs
, int rx_usecs
,
1240 struct efx_tx_queue
*tx_queue
;
1241 struct efx_rx_queue
*rx_queue
;
1243 EFX_ASSERT_RESET_SERIALISED(efx
);
1245 efx_for_each_tx_queue(tx_queue
, efx
)
1246 tx_queue
->channel
->irq_moderation
= tx_usecs
;
1248 efx
->irq_rx_adaptive
= rx_adaptive
;
1249 efx
->irq_rx_moderation
= rx_usecs
;
1250 efx_for_each_rx_queue(rx_queue
, efx
)
1251 rx_queue
->channel
->irq_moderation
= rx_usecs
;
1254 /**************************************************************************
1258 **************************************************************************/
1260 /* Run periodically off the general workqueue. Serialised against
1261 * efx_reconfigure_port via the mac_lock */
1262 static void efx_monitor(struct work_struct
*data
)
1264 struct efx_nic
*efx
= container_of(data
, struct efx_nic
,
1268 EFX_TRACE(efx
, "hardware monitor executing on CPU %d\n",
1269 raw_smp_processor_id());
1271 /* If the mac_lock is already held then it is likely a port
1272 * reconfiguration is already in place, which will likely do
1273 * most of the work of check_hw() anyway. */
1274 if (!mutex_trylock(&efx
->mac_lock
))
1276 if (!efx
->port_enabled
)
1278 rc
= efx
->board_info
.monitor(efx
);
1280 EFX_ERR(efx
, "Board sensor %s; shutting down PHY\n",
1281 (rc
== -ERANGE
) ? "reported fault" : "failed");
1282 efx
->phy_mode
|= PHY_MODE_LOW_POWER
;
1283 falcon_sim_phy_event(efx
);
1285 efx
->phy_op
->poll(efx
);
1286 efx
->mac_op
->poll(efx
);
1289 mutex_unlock(&efx
->mac_lock
);
1291 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1292 efx_monitor_interval
);
1295 /**************************************************************************
1299 *************************************************************************/
1302 * Context: process, rtnl_lock() held.
1304 static int efx_ioctl(struct net_device
*net_dev
, struct ifreq
*ifr
, int cmd
)
1306 struct efx_nic
*efx
= netdev_priv(net_dev
);
1308 EFX_ASSERT_RESET_SERIALISED(efx
);
1310 return generic_mii_ioctl(&efx
->mii
, if_mii(ifr
), cmd
, NULL
);
1313 /**************************************************************************
1317 **************************************************************************/
1319 static int efx_init_napi(struct efx_nic
*efx
)
1321 struct efx_channel
*channel
;
1323 efx_for_each_channel(channel
, efx
) {
1324 channel
->napi_dev
= efx
->net_dev
;
1329 static void efx_fini_napi(struct efx_nic
*efx
)
1331 struct efx_channel
*channel
;
1333 efx_for_each_channel(channel
, efx
) {
1334 channel
->napi_dev
= NULL
;
1338 /**************************************************************************
1340 * Kernel netpoll interface
1342 *************************************************************************/
1344 #ifdef CONFIG_NET_POLL_CONTROLLER
1346 /* Although in the common case interrupts will be disabled, this is not
1347 * guaranteed. However, all our work happens inside the NAPI callback,
1348 * so no locking is required.
1350 static void efx_netpoll(struct net_device
*net_dev
)
1352 struct efx_nic
*efx
= netdev_priv(net_dev
);
1353 struct efx_channel
*channel
;
1355 efx_for_each_channel(channel
, efx
)
1356 efx_schedule_channel(channel
);
1361 /**************************************************************************
1363 * Kernel net device interface
1365 *************************************************************************/
1367 /* Context: process, rtnl_lock() held. */
1368 static int efx_net_open(struct net_device
*net_dev
)
1370 struct efx_nic
*efx
= netdev_priv(net_dev
);
1371 EFX_ASSERT_RESET_SERIALISED(efx
);
1373 EFX_LOG(efx
, "opening device %s on CPU %d\n", net_dev
->name
,
1374 raw_smp_processor_id());
1376 if (efx
->state
== STATE_DISABLED
)
1378 if (efx
->phy_mode
& PHY_MODE_SPECIAL
)
1385 /* Context: process, rtnl_lock() held.
1386 * Note that the kernel will ignore our return code; this method
1387 * should really be a void.
1389 static int efx_net_stop(struct net_device
*net_dev
)
1391 struct efx_nic
*efx
= netdev_priv(net_dev
);
1393 EFX_LOG(efx
, "closing %s on CPU %d\n", net_dev
->name
,
1394 raw_smp_processor_id());
1396 if (efx
->state
!= STATE_DISABLED
) {
1397 /* Stop the device and flush all the channels */
1399 efx_fini_channels(efx
);
1400 efx_init_channels(efx
);
1406 void efx_stats_disable(struct efx_nic
*efx
)
1408 spin_lock(&efx
->stats_lock
);
1409 ++efx
->stats_disable_count
;
1410 spin_unlock(&efx
->stats_lock
);
1413 void efx_stats_enable(struct efx_nic
*efx
)
1415 spin_lock(&efx
->stats_lock
);
1416 --efx
->stats_disable_count
;
1417 spin_unlock(&efx
->stats_lock
);
1420 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1421 static struct net_device_stats
*efx_net_stats(struct net_device
*net_dev
)
1423 struct efx_nic
*efx
= netdev_priv(net_dev
);
1424 struct efx_mac_stats
*mac_stats
= &efx
->mac_stats
;
1425 struct net_device_stats
*stats
= &net_dev
->stats
;
1427 /* Update stats if possible, but do not wait if another thread
1428 * is updating them or if MAC stats fetches are temporarily
1429 * disabled; slightly stale stats are acceptable.
1431 if (!spin_trylock(&efx
->stats_lock
))
1433 if (!efx
->stats_disable_count
) {
1434 efx
->mac_op
->update_stats(efx
);
1435 falcon_update_nic_stats(efx
);
1437 spin_unlock(&efx
->stats_lock
);
1439 stats
->rx_packets
= mac_stats
->rx_packets
;
1440 stats
->tx_packets
= mac_stats
->tx_packets
;
1441 stats
->rx_bytes
= mac_stats
->rx_bytes
;
1442 stats
->tx_bytes
= mac_stats
->tx_bytes
;
1443 stats
->multicast
= mac_stats
->rx_multicast
;
1444 stats
->collisions
= mac_stats
->tx_collision
;
1445 stats
->rx_length_errors
= (mac_stats
->rx_gtjumbo
+
1446 mac_stats
->rx_length_error
);
1447 stats
->rx_over_errors
= efx
->n_rx_nodesc_drop_cnt
;
1448 stats
->rx_crc_errors
= mac_stats
->rx_bad
;
1449 stats
->rx_frame_errors
= mac_stats
->rx_align_error
;
1450 stats
->rx_fifo_errors
= mac_stats
->rx_overflow
;
1451 stats
->rx_missed_errors
= mac_stats
->rx_missed
;
1452 stats
->tx_window_errors
= mac_stats
->tx_late_collision
;
1454 stats
->rx_errors
= (stats
->rx_length_errors
+
1455 stats
->rx_over_errors
+
1456 stats
->rx_crc_errors
+
1457 stats
->rx_frame_errors
+
1458 stats
->rx_fifo_errors
+
1459 stats
->rx_missed_errors
+
1460 mac_stats
->rx_symbol_error
);
1461 stats
->tx_errors
= (stats
->tx_window_errors
+
1467 /* Context: netif_tx_lock held, BHs disabled. */
1468 static void efx_watchdog(struct net_device
*net_dev
)
1470 struct efx_nic
*efx
= netdev_priv(net_dev
);
1472 EFX_ERR(efx
, "TX stuck with stop_count=%d port_enabled=%d:"
1473 " resetting channels\n",
1474 atomic_read(&efx
->netif_stop_count
), efx
->port_enabled
);
1476 efx_schedule_reset(efx
, RESET_TYPE_TX_WATCHDOG
);
1480 /* Context: process, rtnl_lock() held. */
1481 static int efx_change_mtu(struct net_device
*net_dev
, int new_mtu
)
1483 struct efx_nic
*efx
= netdev_priv(net_dev
);
1486 EFX_ASSERT_RESET_SERIALISED(efx
);
1488 if (new_mtu
> EFX_MAX_MTU
)
1493 EFX_LOG(efx
, "changing MTU to %d\n", new_mtu
);
1495 efx_fini_channels(efx
);
1496 net_dev
->mtu
= new_mtu
;
1497 efx_init_channels(efx
);
1503 static int efx_set_mac_address(struct net_device
*net_dev
, void *data
)
1505 struct efx_nic
*efx
= netdev_priv(net_dev
);
1506 struct sockaddr
*addr
= data
;
1507 char *new_addr
= addr
->sa_data
;
1509 EFX_ASSERT_RESET_SERIALISED(efx
);
1511 if (!is_valid_ether_addr(new_addr
)) {
1512 EFX_ERR(efx
, "invalid ethernet MAC address requested: %pM\n",
1517 memcpy(net_dev
->dev_addr
, new_addr
, net_dev
->addr_len
);
1519 /* Reconfigure the MAC */
1520 efx_reconfigure_port(efx
);
1525 /* Context: netif_addr_lock held, BHs disabled. */
1526 static void efx_set_multicast_list(struct net_device
*net_dev
)
1528 struct efx_nic
*efx
= netdev_priv(net_dev
);
1529 struct dev_mc_list
*mc_list
= net_dev
->mc_list
;
1530 union efx_multicast_hash
*mc_hash
= &efx
->multicast_hash
;
1531 bool promiscuous
= !!(net_dev
->flags
& IFF_PROMISC
);
1532 bool changed
= (efx
->promiscuous
!= promiscuous
);
1537 efx
->promiscuous
= promiscuous
;
1539 /* Build multicast hash table */
1540 if (promiscuous
|| (net_dev
->flags
& IFF_ALLMULTI
)) {
1541 memset(mc_hash
, 0xff, sizeof(*mc_hash
));
1543 memset(mc_hash
, 0x00, sizeof(*mc_hash
));
1544 for (i
= 0; i
< net_dev
->mc_count
; i
++) {
1545 crc
= ether_crc_le(ETH_ALEN
, mc_list
->dmi_addr
);
1546 bit
= crc
& (EFX_MCAST_HASH_ENTRIES
- 1);
1547 set_bit_le(bit
, mc_hash
->byte
);
1548 mc_list
= mc_list
->next
;
1552 if (!efx
->port_enabled
)
1553 /* Delay pushing settings until efx_start_port() */
1557 queue_work(efx
->workqueue
, &efx
->phy_work
);
1559 /* Create and activate new global multicast hash table */
1560 falcon_set_multicast_hash(efx
);
1563 static const struct net_device_ops efx_netdev_ops
= {
1564 .ndo_open
= efx_net_open
,
1565 .ndo_stop
= efx_net_stop
,
1566 .ndo_get_stats
= efx_net_stats
,
1567 .ndo_tx_timeout
= efx_watchdog
,
1568 .ndo_start_xmit
= efx_hard_start_xmit
,
1569 .ndo_validate_addr
= eth_validate_addr
,
1570 .ndo_do_ioctl
= efx_ioctl
,
1571 .ndo_change_mtu
= efx_change_mtu
,
1572 .ndo_set_mac_address
= efx_set_mac_address
,
1573 .ndo_set_multicast_list
= efx_set_multicast_list
,
1574 #ifdef CONFIG_NET_POLL_CONTROLLER
1575 .ndo_poll_controller
= efx_netpoll
,
1579 static void efx_update_name(struct efx_nic
*efx
)
1581 strcpy(efx
->name
, efx
->net_dev
->name
);
1582 efx_mtd_rename(efx
);
1583 efx_set_channel_names(efx
);
1586 static int efx_netdev_event(struct notifier_block
*this,
1587 unsigned long event
, void *ptr
)
1589 struct net_device
*net_dev
= ptr
;
1591 if (net_dev
->netdev_ops
== &efx_netdev_ops
&&
1592 event
== NETDEV_CHANGENAME
)
1593 efx_update_name(netdev_priv(net_dev
));
1598 static struct notifier_block efx_netdev_notifier
= {
1599 .notifier_call
= efx_netdev_event
,
1603 show_phy_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1605 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
1606 return sprintf(buf
, "%d\n", efx
->phy_type
);
1608 static DEVICE_ATTR(phy_type
, 0644, show_phy_type
, NULL
);
1610 static int efx_register_netdev(struct efx_nic
*efx
)
1612 struct net_device
*net_dev
= efx
->net_dev
;
1615 net_dev
->watchdog_timeo
= 5 * HZ
;
1616 net_dev
->irq
= efx
->pci_dev
->irq
;
1617 net_dev
->netdev_ops
= &efx_netdev_ops
;
1618 SET_NETDEV_DEV(net_dev
, &efx
->pci_dev
->dev
);
1619 SET_ETHTOOL_OPS(net_dev
, &efx_ethtool_ops
);
1621 /* Always start with carrier off; PHY events will detect the link */
1622 netif_carrier_off(efx
->net_dev
);
1624 /* Clear MAC statistics */
1625 efx
->mac_op
->update_stats(efx
);
1626 memset(&efx
->mac_stats
, 0, sizeof(efx
->mac_stats
));
1628 rc
= register_netdev(net_dev
);
1630 EFX_ERR(efx
, "could not register net dev\n");
1635 efx_update_name(efx
);
1638 rc
= device_create_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
1640 EFX_ERR(efx
, "failed to init net dev attributes\n");
1641 goto fail_registered
;
1647 unregister_netdev(net_dev
);
1651 static void efx_unregister_netdev(struct efx_nic
*efx
)
1653 struct efx_tx_queue
*tx_queue
;
1658 BUG_ON(netdev_priv(efx
->net_dev
) != efx
);
1660 /* Free up any skbs still remaining. This has to happen before
1661 * we try to unregister the netdev as running their destructors
1662 * may be needed to get the device ref. count to 0. */
1663 efx_for_each_tx_queue(tx_queue
, efx
)
1664 efx_release_tx_buffers(tx_queue
);
1666 if (efx_dev_registered(efx
)) {
1667 strlcpy(efx
->name
, pci_name(efx
->pci_dev
), sizeof(efx
->name
));
1668 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
1669 unregister_netdev(efx
->net_dev
);
1673 /**************************************************************************
1675 * Device reset and suspend
1677 **************************************************************************/
1679 /* Tears down the entire software state and most of the hardware state
1681 void efx_reset_down(struct efx_nic
*efx
, enum reset_type method
,
1682 struct ethtool_cmd
*ecmd
)
1684 EFX_ASSERT_RESET_SERIALISED(efx
);
1686 efx_stats_disable(efx
);
1688 mutex_lock(&efx
->mac_lock
);
1689 mutex_lock(&efx
->spi_lock
);
1691 efx
->phy_op
->get_settings(efx
, ecmd
);
1693 efx_fini_channels(efx
);
1694 if (efx
->port_initialized
&& method
!= RESET_TYPE_INVISIBLE
)
1695 efx
->phy_op
->fini(efx
);
1698 /* This function will always ensure that the locks acquired in
1699 * efx_reset_down() are released. A failure return code indicates
1700 * that we were unable to reinitialise the hardware, and the
1701 * driver should be disabled. If ok is false, then the rx and tx
1702 * engines are not restarted, pending a RESET_DISABLE. */
1703 int efx_reset_up(struct efx_nic
*efx
, enum reset_type method
,
1704 struct ethtool_cmd
*ecmd
, bool ok
)
1708 EFX_ASSERT_RESET_SERIALISED(efx
);
1710 rc
= falcon_init_nic(efx
);
1712 EFX_ERR(efx
, "failed to initialise NIC\n");
1716 if (efx
->port_initialized
&& method
!= RESET_TYPE_INVISIBLE
) {
1718 rc
= efx
->phy_op
->init(efx
);
1723 efx
->port_initialized
= false;
1727 efx_init_channels(efx
);
1729 if (efx
->phy_op
->set_settings(efx
, ecmd
))
1730 EFX_ERR(efx
, "could not restore PHY settings\n");
1733 mutex_unlock(&efx
->spi_lock
);
1734 mutex_unlock(&efx
->mac_lock
);
1738 efx_stats_enable(efx
);
1743 /* Reset the NIC as transparently as possible. Do not reset the PHY
1744 * Note that the reset may fail, in which case the card will be left
1745 * in a most-probably-unusable state.
1747 * This function will sleep. You cannot reset from within an atomic
1748 * state; use efx_schedule_reset() instead.
1750 * Grabs the rtnl_lock.
1752 static int efx_reset(struct efx_nic
*efx
)
1754 struct ethtool_cmd ecmd
;
1755 enum reset_type method
= efx
->reset_pending
;
1758 /* Serialise with kernel interfaces */
1761 /* If we're not RUNNING then don't reset. Leave the reset_pending
1762 * flag set so that efx_pci_probe_main will be retried */
1763 if (efx
->state
!= STATE_RUNNING
) {
1764 EFX_INFO(efx
, "scheduled reset quenched. NIC not RUNNING\n");
1768 EFX_INFO(efx
, "resetting (%d)\n", method
);
1770 efx_reset_down(efx
, method
, &ecmd
);
1772 rc
= falcon_reset_hw(efx
, method
);
1774 EFX_ERR(efx
, "failed to reset hardware\n");
1778 /* Allow resets to be rescheduled. */
1779 efx
->reset_pending
= RESET_TYPE_NONE
;
1781 /* Reinitialise bus-mastering, which may have been turned off before
1782 * the reset was scheduled. This is still appropriate, even in the
1783 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1784 * can respond to requests. */
1785 pci_set_master(efx
->pci_dev
);
1787 /* Leave device stopped if necessary */
1788 if (method
== RESET_TYPE_DISABLE
) {
1789 efx_reset_up(efx
, method
, &ecmd
, false);
1792 rc
= efx_reset_up(efx
, method
, &ecmd
, true);
1797 EFX_ERR(efx
, "has been disabled\n");
1798 efx
->state
= STATE_DISABLED
;
1799 dev_close(efx
->net_dev
);
1801 EFX_LOG(efx
, "reset complete\n");
1809 /* The worker thread exists so that code that cannot sleep can
1810 * schedule a reset for later.
1812 static void efx_reset_work(struct work_struct
*data
)
1814 struct efx_nic
*nic
= container_of(data
, struct efx_nic
, reset_work
);
1819 void efx_schedule_reset(struct efx_nic
*efx
, enum reset_type type
)
1821 enum reset_type method
;
1823 if (efx
->reset_pending
!= RESET_TYPE_NONE
) {
1824 EFX_INFO(efx
, "quenching already scheduled reset\n");
1829 case RESET_TYPE_INVISIBLE
:
1830 case RESET_TYPE_ALL
:
1831 case RESET_TYPE_WORLD
:
1832 case RESET_TYPE_DISABLE
:
1835 case RESET_TYPE_RX_RECOVERY
:
1836 case RESET_TYPE_RX_DESC_FETCH
:
1837 case RESET_TYPE_TX_DESC_FETCH
:
1838 case RESET_TYPE_TX_SKIP
:
1839 method
= RESET_TYPE_INVISIBLE
;
1842 method
= RESET_TYPE_ALL
;
1847 EFX_LOG(efx
, "scheduling reset (%d:%d)\n", type
, method
);
1849 EFX_LOG(efx
, "scheduling reset (%d)\n", method
);
1851 efx
->reset_pending
= method
;
1853 queue_work(reset_workqueue
, &efx
->reset_work
);
1856 /**************************************************************************
1858 * List of NICs we support
1860 **************************************************************************/
1862 /* PCI device ID table */
1863 static struct pci_device_id efx_pci_table
[] __devinitdata
= {
1864 {PCI_DEVICE(EFX_VENDID_SFC
, FALCON_A_P_DEVID
),
1865 .driver_data
= (unsigned long) &falcon_a_nic_type
},
1866 {PCI_DEVICE(EFX_VENDID_SFC
, FALCON_B_P_DEVID
),
1867 .driver_data
= (unsigned long) &falcon_b_nic_type
},
1868 {0} /* end of list */
1871 /**************************************************************************
1873 * Dummy PHY/MAC/Board operations
1875 * Can be used for some unimplemented operations
1876 * Needed so all function pointers are valid and do not have to be tested
1879 **************************************************************************/
1880 int efx_port_dummy_op_int(struct efx_nic
*efx
)
1884 void efx_port_dummy_op_void(struct efx_nic
*efx
) {}
1885 void efx_port_dummy_op_blink(struct efx_nic
*efx
, bool blink
) {}
1887 static struct efx_mac_operations efx_dummy_mac_operations
= {
1888 .reconfigure
= efx_port_dummy_op_void
,
1889 .poll
= efx_port_dummy_op_void
,
1890 .irq
= efx_port_dummy_op_void
,
1893 static struct efx_phy_operations efx_dummy_phy_operations
= {
1894 .init
= efx_port_dummy_op_int
,
1895 .reconfigure
= efx_port_dummy_op_void
,
1896 .poll
= efx_port_dummy_op_void
,
1897 .fini
= efx_port_dummy_op_void
,
1898 .clear_interrupt
= efx_port_dummy_op_void
,
1901 static struct efx_board efx_dummy_board_info
= {
1902 .init
= efx_port_dummy_op_int
,
1903 .init_leds
= efx_port_dummy_op_void
,
1904 .set_id_led
= efx_port_dummy_op_blink
,
1905 .monitor
= efx_port_dummy_op_int
,
1906 .blink
= efx_port_dummy_op_blink
,
1907 .fini
= efx_port_dummy_op_void
,
1910 /**************************************************************************
1914 **************************************************************************/
1916 /* This zeroes out and then fills in the invariants in a struct
1917 * efx_nic (including all sub-structures).
1919 static int efx_init_struct(struct efx_nic
*efx
, struct efx_nic_type
*type
,
1920 struct pci_dev
*pci_dev
, struct net_device
*net_dev
)
1922 struct efx_channel
*channel
;
1923 struct efx_tx_queue
*tx_queue
;
1924 struct efx_rx_queue
*rx_queue
;
1927 /* Initialise common structures */
1928 memset(efx
, 0, sizeof(*efx
));
1929 spin_lock_init(&efx
->biu_lock
);
1930 spin_lock_init(&efx
->phy_lock
);
1931 mutex_init(&efx
->spi_lock
);
1932 INIT_WORK(&efx
->reset_work
, efx_reset_work
);
1933 INIT_DELAYED_WORK(&efx
->monitor_work
, efx_monitor
);
1934 efx
->pci_dev
= pci_dev
;
1935 efx
->state
= STATE_INIT
;
1936 efx
->reset_pending
= RESET_TYPE_NONE
;
1937 strlcpy(efx
->name
, pci_name(pci_dev
), sizeof(efx
->name
));
1938 efx
->board_info
= efx_dummy_board_info
;
1940 efx
->net_dev
= net_dev
;
1941 efx
->rx_checksum_enabled
= true;
1942 spin_lock_init(&efx
->netif_stop_lock
);
1943 spin_lock_init(&efx
->stats_lock
);
1944 efx
->stats_disable_count
= 1;
1945 mutex_init(&efx
->mac_lock
);
1946 efx
->mac_op
= &efx_dummy_mac_operations
;
1947 efx
->phy_op
= &efx_dummy_phy_operations
;
1948 efx
->mii
.dev
= net_dev
;
1949 INIT_WORK(&efx
->phy_work
, efx_phy_work
);
1950 INIT_WORK(&efx
->mac_work
, efx_mac_work
);
1951 atomic_set(&efx
->netif_stop_count
, 1);
1953 for (i
= 0; i
< EFX_MAX_CHANNELS
; i
++) {
1954 channel
= &efx
->channel
[i
];
1956 channel
->channel
= i
;
1957 channel
->work_pending
= false;
1959 for (i
= 0; i
< EFX_TX_QUEUE_COUNT
; i
++) {
1960 tx_queue
= &efx
->tx_queue
[i
];
1961 tx_queue
->efx
= efx
;
1962 tx_queue
->queue
= i
;
1963 tx_queue
->buffer
= NULL
;
1964 tx_queue
->channel
= &efx
->channel
[0]; /* for safety */
1965 tx_queue
->tso_headers_free
= NULL
;
1967 for (i
= 0; i
< EFX_MAX_RX_QUEUES
; i
++) {
1968 rx_queue
= &efx
->rx_queue
[i
];
1969 rx_queue
->efx
= efx
;
1970 rx_queue
->queue
= i
;
1971 rx_queue
->channel
= &efx
->channel
[0]; /* for safety */
1972 rx_queue
->buffer
= NULL
;
1973 spin_lock_init(&rx_queue
->add_lock
);
1974 INIT_DELAYED_WORK(&rx_queue
->work
, efx_rx_work
);
1979 /* Sanity-check NIC type */
1980 EFX_BUG_ON_PARANOID(efx
->type
->txd_ring_mask
&
1981 (efx
->type
->txd_ring_mask
+ 1));
1982 EFX_BUG_ON_PARANOID(efx
->type
->rxd_ring_mask
&
1983 (efx
->type
->rxd_ring_mask
+ 1));
1984 EFX_BUG_ON_PARANOID(efx
->type
->evq_size
&
1985 (efx
->type
->evq_size
- 1));
1986 /* As close as we can get to guaranteeing that we don't overflow */
1987 EFX_BUG_ON_PARANOID(efx
->type
->evq_size
<
1988 (efx
->type
->txd_ring_mask
+ 1 +
1989 efx
->type
->rxd_ring_mask
+ 1));
1990 EFX_BUG_ON_PARANOID(efx
->type
->phys_addr_channels
> EFX_MAX_CHANNELS
);
1992 /* Higher numbered interrupt modes are less capable! */
1993 efx
->interrupt_mode
= max(efx
->type
->max_interrupt_mode
,
1996 /* Would be good to use the net_dev name, but we're too early */
1997 snprintf(efx
->workqueue_name
, sizeof(efx
->workqueue_name
), "sfc%s",
1999 efx
->workqueue
= create_singlethread_workqueue(efx
->workqueue_name
);
2000 if (!efx
->workqueue
)
2006 static void efx_fini_struct(struct efx_nic
*efx
)
2008 if (efx
->workqueue
) {
2009 destroy_workqueue(efx
->workqueue
);
2010 efx
->workqueue
= NULL
;
2014 /**************************************************************************
2018 **************************************************************************/
2020 /* Main body of final NIC shutdown code
2021 * This is called only at module unload (or hotplug removal).
2023 static void efx_pci_remove_main(struct efx_nic
*efx
)
2025 EFX_ASSERT_RESET_SERIALISED(efx
);
2027 /* Skip everything if we never obtained a valid membase */
2031 efx_fini_channels(efx
);
2034 /* Shutdown the board, then the NIC and board state */
2035 efx
->board_info
.fini(efx
);
2036 falcon_fini_interrupt(efx
);
2039 efx_remove_all(efx
);
2042 /* Final NIC shutdown
2043 * This is called only at module unload (or hotplug removal).
2045 static void efx_pci_remove(struct pci_dev
*pci_dev
)
2047 struct efx_nic
*efx
;
2049 efx
= pci_get_drvdata(pci_dev
);
2053 /* Mark the NIC as fini, then stop the interface */
2055 efx
->state
= STATE_FINI
;
2056 dev_close(efx
->net_dev
);
2058 /* Allow any queued efx_resets() to complete */
2061 if (efx
->membase
== NULL
)
2064 efx_unregister_netdev(efx
);
2066 efx_mtd_remove(efx
);
2068 /* Wait for any scheduled resets to complete. No more will be
2069 * scheduled from this point because efx_stop_all() has been
2070 * called, we are no longer registered with driverlink, and
2071 * the net_device's have been removed. */
2072 cancel_work_sync(&efx
->reset_work
);
2074 efx_pci_remove_main(efx
);
2078 EFX_LOG(efx
, "shutdown successful\n");
2080 pci_set_drvdata(pci_dev
, NULL
);
2081 efx_fini_struct(efx
);
2082 free_netdev(efx
->net_dev
);
2085 /* Main body of NIC initialisation
2086 * This is called at module load (or hotplug insertion, theoretically).
2088 static int efx_pci_probe_main(struct efx_nic
*efx
)
2092 /* Do start-of-day initialisation */
2093 rc
= efx_probe_all(efx
);
2097 rc
= efx_init_napi(efx
);
2101 /* Initialise the board */
2102 rc
= efx
->board_info
.init(efx
);
2104 EFX_ERR(efx
, "failed to initialise board\n");
2108 rc
= falcon_init_nic(efx
);
2110 EFX_ERR(efx
, "failed to initialise NIC\n");
2114 rc
= efx_init_port(efx
);
2116 EFX_ERR(efx
, "failed to initialise port\n");
2120 efx_init_channels(efx
);
2122 rc
= falcon_init_interrupt(efx
);
2129 efx_fini_channels(efx
);
2133 efx
->board_info
.fini(efx
);
2137 efx_remove_all(efx
);
2142 /* NIC initialisation
2144 * This is called at module load (or hotplug insertion,
2145 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2146 * sets up and registers the network devices with the kernel and hooks
2147 * the interrupt service routine. It does not prepare the device for
2148 * transmission; this is left to the first time one of the network
2149 * interfaces is brought up (i.e. efx_net_open).
2151 static int __devinit
efx_pci_probe(struct pci_dev
*pci_dev
,
2152 const struct pci_device_id
*entry
)
2154 struct efx_nic_type
*type
= (struct efx_nic_type
*) entry
->driver_data
;
2155 struct net_device
*net_dev
;
2156 struct efx_nic
*efx
;
2159 /* Allocate and initialise a struct net_device and struct efx_nic */
2160 net_dev
= alloc_etherdev(sizeof(*efx
));
2163 net_dev
->features
|= (NETIF_F_IP_CSUM
| NETIF_F_SG
|
2164 NETIF_F_HIGHDMA
| NETIF_F_TSO
);
2166 net_dev
->features
|= NETIF_F_GRO
;
2167 /* Mask for features that also apply to VLAN devices */
2168 net_dev
->vlan_features
|= (NETIF_F_ALL_CSUM
| NETIF_F_SG
|
2169 NETIF_F_HIGHDMA
| NETIF_F_TSO
);
2170 efx
= netdev_priv(net_dev
);
2171 pci_set_drvdata(pci_dev
, efx
);
2172 rc
= efx_init_struct(efx
, type
, pci_dev
, net_dev
);
2176 EFX_INFO(efx
, "Solarflare Communications NIC detected\n");
2178 /* Set up basic I/O (BAR mappings etc) */
2179 rc
= efx_init_io(efx
);
2183 /* No serialisation is required with the reset path because
2184 * we're in STATE_INIT. */
2185 for (i
= 0; i
< 5; i
++) {
2186 rc
= efx_pci_probe_main(efx
);
2188 /* Serialise against efx_reset(). No more resets will be
2189 * scheduled since efx_stop_all() has been called, and we
2190 * have not and never have been registered with either
2191 * the rtnetlink or driverlink layers. */
2192 cancel_work_sync(&efx
->reset_work
);
2195 if (efx
->reset_pending
!= RESET_TYPE_NONE
) {
2196 /* If there was a scheduled reset during
2197 * probe, the NIC is probably hosed anyway */
2198 efx_pci_remove_main(efx
);
2205 /* Retry if a recoverably reset event has been scheduled */
2206 if ((efx
->reset_pending
!= RESET_TYPE_INVISIBLE
) &&
2207 (efx
->reset_pending
!= RESET_TYPE_ALL
))
2210 efx
->reset_pending
= RESET_TYPE_NONE
;
2214 EFX_ERR(efx
, "Could not reset NIC\n");
2218 /* Switch to the running state before we expose the device to
2219 * the OS. This is to ensure that the initial gathering of
2220 * MAC stats succeeds. */
2221 efx
->state
= STATE_RUNNING
;
2223 efx_mtd_probe(efx
); /* allowed to fail */
2225 rc
= efx_register_netdev(efx
);
2229 EFX_LOG(efx
, "initialisation successful\n");
2233 efx_pci_remove_main(efx
);
2238 efx_fini_struct(efx
);
2240 EFX_LOG(efx
, "initialisation failed. rc=%d\n", rc
);
2241 free_netdev(net_dev
);
2245 static struct pci_driver efx_pci_driver
= {
2246 .name
= EFX_DRIVER_NAME
,
2247 .id_table
= efx_pci_table
,
2248 .probe
= efx_pci_probe
,
2249 .remove
= efx_pci_remove
,
2252 /**************************************************************************
2254 * Kernel module interface
2256 *************************************************************************/
2258 module_param(interrupt_mode
, uint
, 0444);
2259 MODULE_PARM_DESC(interrupt_mode
,
2260 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2262 static int __init
efx_init_module(void)
2266 printk(KERN_INFO
"Solarflare NET driver v" EFX_DRIVER_VERSION
"\n");
2268 rc
= register_netdevice_notifier(&efx_netdev_notifier
);
2272 refill_workqueue
= create_workqueue("sfc_refill");
2273 if (!refill_workqueue
) {
2277 reset_workqueue
= create_singlethread_workqueue("sfc_reset");
2278 if (!reset_workqueue
) {
2283 rc
= pci_register_driver(&efx_pci_driver
);
2290 destroy_workqueue(reset_workqueue
);
2292 destroy_workqueue(refill_workqueue
);
2294 unregister_netdevice_notifier(&efx_netdev_notifier
);
2299 static void __exit
efx_exit_module(void)
2301 printk(KERN_INFO
"Solarflare NET driver unloading\n");
2303 pci_unregister_driver(&efx_pci_driver
);
2304 destroy_workqueue(reset_workqueue
);
2305 destroy_workqueue(refill_workqueue
);
2306 unregister_netdevice_notifier(&efx_netdev_notifier
);
2310 module_init(efx_init_module
);
2311 module_exit(efx_exit_module
);
2313 MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2314 "Solarflare Communications");
2315 MODULE_DESCRIPTION("Solarflare Communications network driver");
2316 MODULE_LICENSE("GPL");
2317 MODULE_DEVICE_TABLE(pci
, efx_pci_table
);