2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 * This file incorporates work covered by the following copyright and
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/interrupt.h>
39 #include <linux/ipv6.h>
40 #include <linux/if_vlan.h>
41 #include <linux/mdio.h>
42 #include <linux/aer.h>
43 #include <linux/bitops.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <net/ip6_checksum.h>
47 #include <linux/crc32.h>
52 const char alx_drv_name
[] = "alx";
54 static bool msix
= false;
55 module_param(msix
, bool, 0);
56 MODULE_PARM_DESC(msix
, "Enable msi-x interrupt support");
58 static void alx_free_txbuf(struct alx_priv
*alx
, int entry
)
60 struct alx_buffer
*txb
= &alx
->txq
.bufs
[entry
];
62 if (dma_unmap_len(txb
, size
)) {
63 dma_unmap_single(&alx
->hw
.pdev
->dev
,
64 dma_unmap_addr(txb
, dma
),
65 dma_unmap_len(txb
, size
),
67 dma_unmap_len_set(txb
, size
, 0);
71 dev_kfree_skb_any(txb
->skb
);
76 static int alx_refill_rx_ring(struct alx_priv
*alx
, gfp_t gfp
)
78 struct alx_rx_queue
*rxq
= &alx
->rxq
;
80 struct alx_buffer
*cur_buf
;
82 u16 cur
, next
, count
= 0;
84 next
= cur
= rxq
->write_idx
;
85 if (++next
== alx
->rx_ringsz
)
87 cur_buf
= &rxq
->bufs
[cur
];
89 while (!cur_buf
->skb
&& next
!= rxq
->read_idx
) {
90 struct alx_rfd
*rfd
= &rxq
->rfd
[cur
];
93 * When DMA RX address is set to something like
94 * 0x....fc0, it will be very likely to cause DMA
97 * To work around it, we apply rx skb with 64 bytes
98 * longer space, and offset the address whenever
99 * 0x....fc0 is detected.
101 skb
= __netdev_alloc_skb(alx
->dev
, alx
->rxbuf_size
+ 64, gfp
);
105 if (((unsigned long)skb
->data
& 0xfff) == 0xfc0)
106 skb_reserve(skb
, 64);
108 dma
= dma_map_single(&alx
->hw
.pdev
->dev
,
109 skb
->data
, alx
->rxbuf_size
,
111 if (dma_mapping_error(&alx
->hw
.pdev
->dev
, dma
)) {
116 /* Unfortunately, RX descriptor buffers must be 4-byte
117 * aligned, so we can't use IP alignment.
119 if (WARN_ON(dma
& 3)) {
125 dma_unmap_len_set(cur_buf
, size
, alx
->rxbuf_size
);
126 dma_unmap_addr_set(cur_buf
, dma
, dma
);
127 rfd
->addr
= cpu_to_le64(dma
);
130 if (++next
== alx
->rx_ringsz
)
132 cur_buf
= &rxq
->bufs
[cur
];
137 /* flush all updates before updating hardware */
139 rxq
->write_idx
= cur
;
140 alx_write_mem16(&alx
->hw
, ALX_RFD_PIDX
, cur
);
146 static inline int alx_tpd_avail(struct alx_priv
*alx
)
148 struct alx_tx_queue
*txq
= &alx
->txq
;
150 if (txq
->write_idx
>= txq
->read_idx
)
151 return alx
->tx_ringsz
+ txq
->read_idx
- txq
->write_idx
- 1;
152 return txq
->read_idx
- txq
->write_idx
- 1;
155 static bool alx_clean_tx_irq(struct alx_priv
*alx
)
157 struct alx_tx_queue
*txq
= &alx
->txq
;
158 u16 hw_read_idx
, sw_read_idx
;
159 unsigned int total_bytes
= 0, total_packets
= 0;
160 int budget
= ALX_DEFAULT_TX_WORK
;
162 sw_read_idx
= txq
->read_idx
;
163 hw_read_idx
= alx_read_mem16(&alx
->hw
, ALX_TPD_PRI0_CIDX
);
165 if (sw_read_idx
!= hw_read_idx
) {
166 while (sw_read_idx
!= hw_read_idx
&& budget
> 0) {
169 skb
= txq
->bufs
[sw_read_idx
].skb
;
171 total_bytes
+= skb
->len
;
176 alx_free_txbuf(alx
, sw_read_idx
);
178 if (++sw_read_idx
== alx
->tx_ringsz
)
181 txq
->read_idx
= sw_read_idx
;
183 netdev_completed_queue(alx
->dev
, total_packets
, total_bytes
);
186 if (netif_queue_stopped(alx
->dev
) && netif_carrier_ok(alx
->dev
) &&
187 alx_tpd_avail(alx
) > alx
->tx_ringsz
/4)
188 netif_wake_queue(alx
->dev
);
190 return sw_read_idx
== hw_read_idx
;
193 static void alx_schedule_link_check(struct alx_priv
*alx
)
195 schedule_work(&alx
->link_check_wk
);
198 static void alx_schedule_reset(struct alx_priv
*alx
)
200 schedule_work(&alx
->reset_wk
);
203 static int alx_clean_rx_irq(struct alx_priv
*alx
, int budget
)
205 struct alx_rx_queue
*rxq
= &alx
->rxq
;
207 struct alx_buffer
*rxb
;
209 u16 length
, rfd_cleaned
= 0;
212 while (work
< budget
) {
213 rrd
= &rxq
->rrd
[rxq
->rrd_read_idx
];
214 if (!(rrd
->word3
& cpu_to_le32(1 << RRD_UPDATED_SHIFT
)))
216 rrd
->word3
&= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT
);
218 if (ALX_GET_FIELD(le32_to_cpu(rrd
->word0
),
219 RRD_SI
) != rxq
->read_idx
||
220 ALX_GET_FIELD(le32_to_cpu(rrd
->word0
),
222 alx_schedule_reset(alx
);
226 rxb
= &rxq
->bufs
[rxq
->read_idx
];
227 dma_unmap_single(&alx
->hw
.pdev
->dev
,
228 dma_unmap_addr(rxb
, dma
),
229 dma_unmap_len(rxb
, size
),
231 dma_unmap_len_set(rxb
, size
, 0);
235 if (rrd
->word3
& cpu_to_le32(1 << RRD_ERR_RES_SHIFT
) ||
236 rrd
->word3
& cpu_to_le32(1 << RRD_ERR_LEN_SHIFT
)) {
238 dev_kfree_skb_any(skb
);
242 length
= ALX_GET_FIELD(le32_to_cpu(rrd
->word3
),
243 RRD_PKTLEN
) - ETH_FCS_LEN
;
244 skb_put(skb
, length
);
245 skb
->protocol
= eth_type_trans(skb
, alx
->dev
);
247 skb_checksum_none_assert(skb
);
248 if (alx
->dev
->features
& NETIF_F_RXCSUM
&&
249 !(rrd
->word3
& (cpu_to_le32(1 << RRD_ERR_L4_SHIFT
) |
250 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT
)))) {
251 switch (ALX_GET_FIELD(le32_to_cpu(rrd
->word2
),
253 case RRD_PID_IPV6UDP
:
254 case RRD_PID_IPV4UDP
:
255 case RRD_PID_IPV4TCP
:
256 case RRD_PID_IPV6TCP
:
257 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
262 napi_gro_receive(&alx
->napi
, skb
);
266 if (++rxq
->read_idx
== alx
->rx_ringsz
)
268 if (++rxq
->rrd_read_idx
== alx
->rx_ringsz
)
269 rxq
->rrd_read_idx
= 0;
271 if (++rfd_cleaned
> ALX_RX_ALLOC_THRESH
)
272 rfd_cleaned
-= alx_refill_rx_ring(alx
, GFP_ATOMIC
);
276 alx_refill_rx_ring(alx
, GFP_ATOMIC
);
281 static int alx_poll(struct napi_struct
*napi
, int budget
)
283 struct alx_priv
*alx
= container_of(napi
, struct alx_priv
, napi
);
284 struct alx_hw
*hw
= &alx
->hw
;
289 tx_complete
= alx_clean_tx_irq(alx
);
290 work
= alx_clean_rx_irq(alx
, budget
);
292 if (!tx_complete
|| work
== budget
)
295 napi_complete(&alx
->napi
);
297 /* enable interrupt */
298 if (alx
->flags
& ALX_FLAG_USING_MSIX
) {
299 alx_mask_msix(hw
, 1, false);
301 spin_lock_irqsave(&alx
->irq_lock
, flags
);
302 alx
->int_mask
|= ALX_ISR_TX_Q0
| ALX_ISR_RX_Q0
;
303 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
304 spin_unlock_irqrestore(&alx
->irq_lock
, flags
);
312 static bool alx_intr_handle_misc(struct alx_priv
*alx
, u32 intr
)
314 struct alx_hw
*hw
= &alx
->hw
;
316 if (intr
& ALX_ISR_FATAL
) {
317 netif_warn(alx
, hw
, alx
->dev
,
318 "fatal interrupt 0x%x, resetting\n", intr
);
319 alx_schedule_reset(alx
);
323 if (intr
& ALX_ISR_ALERT
)
324 netdev_warn(alx
->dev
, "alert interrupt: 0x%x\n", intr
);
326 if (intr
& ALX_ISR_PHY
) {
327 /* suppress PHY interrupt, because the source
328 * is from PHY internal. only the internal status
329 * is cleared, the interrupt status could be cleared.
331 alx
->int_mask
&= ~ALX_ISR_PHY
;
332 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
333 alx_schedule_link_check(alx
);
339 static irqreturn_t
alx_intr_handle(struct alx_priv
*alx
, u32 intr
)
341 struct alx_hw
*hw
= &alx
->hw
;
343 spin_lock(&alx
->irq_lock
);
346 alx_write_mem32(hw
, ALX_ISR
, intr
| ALX_ISR_DIS
);
347 intr
&= alx
->int_mask
;
349 if (alx_intr_handle_misc(alx
, intr
))
352 if (intr
& (ALX_ISR_TX_Q0
| ALX_ISR_RX_Q0
)) {
353 napi_schedule(&alx
->napi
);
354 /* mask rx/tx interrupt, enable them when napi complete */
355 alx
->int_mask
&= ~ALX_ISR_ALL_QUEUES
;
356 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
359 alx_write_mem32(hw
, ALX_ISR
, 0);
362 spin_unlock(&alx
->irq_lock
);
366 static irqreturn_t
alx_intr_msix_ring(int irq
, void *data
)
368 struct alx_priv
*alx
= data
;
369 struct alx_hw
*hw
= &alx
->hw
;
371 /* mask interrupt to ACK chip */
372 alx_mask_msix(hw
, 1, true);
373 /* clear interrupt status */
374 alx_write_mem32(hw
, ALX_ISR
, (ALX_ISR_TX_Q0
| ALX_ISR_RX_Q0
));
376 napi_schedule(&alx
->napi
);
381 static irqreturn_t
alx_intr_msix_misc(int irq
, void *data
)
383 struct alx_priv
*alx
= data
;
384 struct alx_hw
*hw
= &alx
->hw
;
387 /* mask interrupt to ACK chip */
388 alx_mask_msix(hw
, 0, true);
390 /* read interrupt status */
391 intr
= alx_read_mem32(hw
, ALX_ISR
);
392 intr
&= (alx
->int_mask
& ~ALX_ISR_ALL_QUEUES
);
394 if (alx_intr_handle_misc(alx
, intr
))
397 /* clear interrupt status */
398 alx_write_mem32(hw
, ALX_ISR
, intr
);
400 /* enable interrupt again */
401 alx_mask_msix(hw
, 0, false);
406 static irqreturn_t
alx_intr_msi(int irq
, void *data
)
408 struct alx_priv
*alx
= data
;
410 return alx_intr_handle(alx
, alx_read_mem32(&alx
->hw
, ALX_ISR
));
413 static irqreturn_t
alx_intr_legacy(int irq
, void *data
)
415 struct alx_priv
*alx
= data
;
416 struct alx_hw
*hw
= &alx
->hw
;
419 intr
= alx_read_mem32(hw
, ALX_ISR
);
421 if (intr
& ALX_ISR_DIS
|| !(intr
& alx
->int_mask
))
424 return alx_intr_handle(alx
, intr
);
427 static void alx_init_ring_ptrs(struct alx_priv
*alx
)
429 struct alx_hw
*hw
= &alx
->hw
;
430 u32 addr_hi
= ((u64
)alx
->descmem
.dma
) >> 32;
432 alx
->rxq
.read_idx
= 0;
433 alx
->rxq
.write_idx
= 0;
434 alx
->rxq
.rrd_read_idx
= 0;
435 alx_write_mem32(hw
, ALX_RX_BASE_ADDR_HI
, addr_hi
);
436 alx_write_mem32(hw
, ALX_RRD_ADDR_LO
, alx
->rxq
.rrd_dma
);
437 alx_write_mem32(hw
, ALX_RRD_RING_SZ
, alx
->rx_ringsz
);
438 alx_write_mem32(hw
, ALX_RFD_ADDR_LO
, alx
->rxq
.rfd_dma
);
439 alx_write_mem32(hw
, ALX_RFD_RING_SZ
, alx
->rx_ringsz
);
440 alx_write_mem32(hw
, ALX_RFD_BUF_SZ
, alx
->rxbuf_size
);
442 alx
->txq
.read_idx
= 0;
443 alx
->txq
.write_idx
= 0;
444 alx_write_mem32(hw
, ALX_TX_BASE_ADDR_HI
, addr_hi
);
445 alx_write_mem32(hw
, ALX_TPD_PRI0_ADDR_LO
, alx
->txq
.tpd_dma
);
446 alx_write_mem32(hw
, ALX_TPD_RING_SZ
, alx
->tx_ringsz
);
448 /* load these pointers into the chip */
449 alx_write_mem32(hw
, ALX_SRAM9
, ALX_SRAM_LOAD_PTR
);
452 static void alx_free_txring_buf(struct alx_priv
*alx
)
454 struct alx_tx_queue
*txq
= &alx
->txq
;
460 for (i
= 0; i
< alx
->tx_ringsz
; i
++)
461 alx_free_txbuf(alx
, i
);
463 memset(txq
->bufs
, 0, alx
->tx_ringsz
* sizeof(struct alx_buffer
));
464 memset(txq
->tpd
, 0, alx
->tx_ringsz
* sizeof(struct alx_txd
));
468 netdev_reset_queue(alx
->dev
);
471 static void alx_free_rxring_buf(struct alx_priv
*alx
)
473 struct alx_rx_queue
*rxq
= &alx
->rxq
;
474 struct alx_buffer
*cur_buf
;
480 for (i
= 0; i
< alx
->rx_ringsz
; i
++) {
481 cur_buf
= rxq
->bufs
+ i
;
483 dma_unmap_single(&alx
->hw
.pdev
->dev
,
484 dma_unmap_addr(cur_buf
, dma
),
485 dma_unmap_len(cur_buf
, size
),
487 dev_kfree_skb(cur_buf
->skb
);
489 dma_unmap_len_set(cur_buf
, size
, 0);
490 dma_unmap_addr_set(cur_buf
, dma
, 0);
496 rxq
->rrd_read_idx
= 0;
499 static void alx_free_buffers(struct alx_priv
*alx
)
501 alx_free_txring_buf(alx
);
502 alx_free_rxring_buf(alx
);
505 static int alx_reinit_rings(struct alx_priv
*alx
)
507 alx_free_buffers(alx
);
509 alx_init_ring_ptrs(alx
);
511 if (!alx_refill_rx_ring(alx
, GFP_KERNEL
))
517 static void alx_add_mc_addr(struct alx_hw
*hw
, const u8
*addr
, u32
*mc_hash
)
521 crc32
= ether_crc(ETH_ALEN
, addr
);
522 reg
= (crc32
>> 31) & 0x1;
523 bit
= (crc32
>> 26) & 0x1F;
525 mc_hash
[reg
] |= BIT(bit
);
528 static void __alx_set_rx_mode(struct net_device
*netdev
)
530 struct alx_priv
*alx
= netdev_priv(netdev
);
531 struct alx_hw
*hw
= &alx
->hw
;
532 struct netdev_hw_addr
*ha
;
535 if (!(netdev
->flags
& IFF_ALLMULTI
)) {
536 netdev_for_each_mc_addr(ha
, netdev
)
537 alx_add_mc_addr(hw
, ha
->addr
, mc_hash
);
539 alx_write_mem32(hw
, ALX_HASH_TBL0
, mc_hash
[0]);
540 alx_write_mem32(hw
, ALX_HASH_TBL1
, mc_hash
[1]);
543 hw
->rx_ctrl
&= ~(ALX_MAC_CTRL_MULTIALL_EN
| ALX_MAC_CTRL_PROMISC_EN
);
544 if (netdev
->flags
& IFF_PROMISC
)
545 hw
->rx_ctrl
|= ALX_MAC_CTRL_PROMISC_EN
;
546 if (netdev
->flags
& IFF_ALLMULTI
)
547 hw
->rx_ctrl
|= ALX_MAC_CTRL_MULTIALL_EN
;
549 alx_write_mem32(hw
, ALX_MAC_CTRL
, hw
->rx_ctrl
);
552 static void alx_set_rx_mode(struct net_device
*netdev
)
554 __alx_set_rx_mode(netdev
);
557 static int alx_set_mac_address(struct net_device
*netdev
, void *data
)
559 struct alx_priv
*alx
= netdev_priv(netdev
);
560 struct alx_hw
*hw
= &alx
->hw
;
561 struct sockaddr
*addr
= data
;
563 if (!is_valid_ether_addr(addr
->sa_data
))
564 return -EADDRNOTAVAIL
;
566 if (netdev
->addr_assign_type
& NET_ADDR_RANDOM
)
567 netdev
->addr_assign_type
^= NET_ADDR_RANDOM
;
569 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
570 memcpy(hw
->mac_addr
, addr
->sa_data
, netdev
->addr_len
);
571 alx_set_macaddr(hw
, hw
->mac_addr
);
576 static int alx_alloc_descriptors(struct alx_priv
*alx
)
578 alx
->txq
.bufs
= kcalloc(alx
->tx_ringsz
,
579 sizeof(struct alx_buffer
),
584 alx
->rxq
.bufs
= kcalloc(alx
->rx_ringsz
,
585 sizeof(struct alx_buffer
),
590 /* physical tx/rx ring descriptors
592 * Allocate them as a single chunk because they must not cross a
593 * 4G boundary (hardware has a single register for high 32 bits
596 alx
->descmem
.size
= sizeof(struct alx_txd
) * alx
->tx_ringsz
+
597 sizeof(struct alx_rrd
) * alx
->rx_ringsz
+
598 sizeof(struct alx_rfd
) * alx
->rx_ringsz
;
599 alx
->descmem
.virt
= dma_zalloc_coherent(&alx
->hw
.pdev
->dev
,
603 if (!alx
->descmem
.virt
)
606 alx
->txq
.tpd
= alx
->descmem
.virt
;
607 alx
->txq
.tpd_dma
= alx
->descmem
.dma
;
609 /* alignment requirement for next block */
610 BUILD_BUG_ON(sizeof(struct alx_txd
) % 8);
613 (void *)((u8
*)alx
->descmem
.virt
+
614 sizeof(struct alx_txd
) * alx
->tx_ringsz
);
615 alx
->rxq
.rrd_dma
= alx
->descmem
.dma
+
616 sizeof(struct alx_txd
) * alx
->tx_ringsz
;
618 /* alignment requirement for next block */
619 BUILD_BUG_ON(sizeof(struct alx_rrd
) % 8);
622 (void *)((u8
*)alx
->descmem
.virt
+
623 sizeof(struct alx_txd
) * alx
->tx_ringsz
+
624 sizeof(struct alx_rrd
) * alx
->rx_ringsz
);
625 alx
->rxq
.rfd_dma
= alx
->descmem
.dma
+
626 sizeof(struct alx_txd
) * alx
->tx_ringsz
+
627 sizeof(struct alx_rrd
) * alx
->rx_ringsz
;
631 kfree(alx
->txq
.bufs
);
632 kfree(alx
->rxq
.bufs
);
636 static int alx_alloc_rings(struct alx_priv
*alx
)
640 err
= alx_alloc_descriptors(alx
);
644 alx
->int_mask
&= ~ALX_ISR_ALL_QUEUES
;
645 alx
->int_mask
|= ALX_ISR_TX_Q0
| ALX_ISR_RX_Q0
;
647 netif_napi_add(alx
->dev
, &alx
->napi
, alx_poll
, 64);
649 alx_reinit_rings(alx
);
653 static void alx_free_rings(struct alx_priv
*alx
)
655 netif_napi_del(&alx
->napi
);
656 alx_free_buffers(alx
);
658 kfree(alx
->txq
.bufs
);
659 kfree(alx
->rxq
.bufs
);
661 dma_free_coherent(&alx
->hw
.pdev
->dev
,
667 static void alx_config_vector_mapping(struct alx_priv
*alx
)
669 struct alx_hw
*hw
= &alx
->hw
;
672 if (alx
->flags
& ALX_FLAG_USING_MSIX
) {
673 tbl
|= 1 << ALX_MSI_MAP_TBL1_TXQ0_SHIFT
;
674 tbl
|= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT
;
677 alx_write_mem32(hw
, ALX_MSI_MAP_TBL1
, tbl
);
678 alx_write_mem32(hw
, ALX_MSI_MAP_TBL2
, 0);
679 alx_write_mem32(hw
, ALX_MSI_ID_MAP
, 0);
682 static bool alx_enable_msix(struct alx_priv
*alx
)
684 int i
, err
, num_vec
= 2;
686 alx
->msix_entries
= kcalloc(num_vec
, sizeof(struct msix_entry
),
688 if (!alx
->msix_entries
) {
689 netdev_warn(alx
->dev
, "Allocation of msix entries failed!\n");
693 for (i
= 0; i
< num_vec
; i
++)
694 alx
->msix_entries
[i
].entry
= i
;
696 err
= pci_enable_msix(alx
->hw
.pdev
, alx
->msix_entries
, num_vec
);
698 kfree(alx
->msix_entries
);
699 netdev_warn(alx
->dev
, "Enabling MSI-X interrupts failed!\n");
703 alx
->num_vec
= num_vec
;
707 static int alx_request_msix(struct alx_priv
*alx
)
709 struct net_device
*netdev
= alx
->dev
;
710 int i
, err
, vector
= 0, free_vector
= 0;
712 err
= request_irq(alx
->msix_entries
[0].vector
, alx_intr_msix_misc
,
713 0, netdev
->name
, alx
);
718 sprintf(alx
->irq_lbl
, "%s-TxRx-0", netdev
->name
);
720 err
= request_irq(alx
->msix_entries
[vector
].vector
,
721 alx_intr_msix_ring
, 0, alx
->irq_lbl
, alx
);
728 free_irq(alx
->msix_entries
[free_vector
++].vector
, alx
);
731 for (i
= 0; i
< vector
; i
++)
732 free_irq(alx
->msix_entries
[free_vector
++].vector
, alx
);
738 static void alx_init_intr(struct alx_priv
*alx
, bool msix
)
741 if (alx_enable_msix(alx
))
742 alx
->flags
|= ALX_FLAG_USING_MSIX
;
745 if (!(alx
->flags
& ALX_FLAG_USING_MSIX
)) {
748 if (!pci_enable_msi(alx
->hw
.pdev
))
749 alx
->flags
|= ALX_FLAG_USING_MSI
;
753 static void alx_disable_advanced_intr(struct alx_priv
*alx
)
755 if (alx
->flags
& ALX_FLAG_USING_MSIX
) {
756 kfree(alx
->msix_entries
);
757 pci_disable_msix(alx
->hw
.pdev
);
758 alx
->flags
&= ~ALX_FLAG_USING_MSIX
;
761 if (alx
->flags
& ALX_FLAG_USING_MSI
) {
762 pci_disable_msi(alx
->hw
.pdev
);
763 alx
->flags
&= ~ALX_FLAG_USING_MSI
;
767 static void alx_irq_enable(struct alx_priv
*alx
)
769 struct alx_hw
*hw
= &alx
->hw
;
772 /* level-1 interrupt switch */
773 alx_write_mem32(hw
, ALX_ISR
, 0);
774 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
777 if (alx
->flags
& ALX_FLAG_USING_MSIX
)
778 /* enable all msix irqs */
779 for (i
= 0; i
< alx
->num_vec
; i
++)
780 alx_mask_msix(hw
, i
, false);
783 static void alx_irq_disable(struct alx_priv
*alx
)
785 struct alx_hw
*hw
= &alx
->hw
;
788 alx_write_mem32(hw
, ALX_ISR
, ALX_ISR_DIS
);
789 alx_write_mem32(hw
, ALX_IMR
, 0);
792 if (alx
->flags
& ALX_FLAG_USING_MSIX
) {
793 for (i
= 0; i
< alx
->num_vec
; i
++) {
794 alx_mask_msix(hw
, i
, true);
795 synchronize_irq(alx
->msix_entries
[i
].vector
);
798 synchronize_irq(alx
->hw
.pdev
->irq
);
802 static int alx_request_irq(struct alx_priv
*alx
)
804 struct pci_dev
*pdev
= alx
->hw
.pdev
;
805 struct alx_hw
*hw
= &alx
->hw
;
809 msi_ctrl
= (hw
->imt
>> 1) << ALX_MSI_RETRANS_TM_SHIFT
;
811 if (alx
->flags
& ALX_FLAG_USING_MSIX
) {
812 alx_write_mem32(hw
, ALX_MSI_RETRANS_TIMER
, msi_ctrl
);
813 err
= alx_request_msix(alx
);
817 /* msix request failed, realloc resources */
818 alx_disable_advanced_intr(alx
);
819 alx_init_intr(alx
, false);
822 if (alx
->flags
& ALX_FLAG_USING_MSI
) {
823 alx_write_mem32(hw
, ALX_MSI_RETRANS_TIMER
,
824 msi_ctrl
| ALX_MSI_MASK_SEL_LINE
);
825 err
= request_irq(pdev
->irq
, alx_intr_msi
, 0,
826 alx
->dev
->name
, alx
);
829 /* fall back to legacy interrupt */
830 alx
->flags
&= ~ALX_FLAG_USING_MSI
;
831 pci_disable_msi(alx
->hw
.pdev
);
834 alx_write_mem32(hw
, ALX_MSI_RETRANS_TIMER
, 0);
835 err
= request_irq(pdev
->irq
, alx_intr_legacy
, IRQF_SHARED
,
836 alx
->dev
->name
, alx
);
839 alx_config_vector_mapping(alx
);
841 netdev_err(alx
->dev
, "IRQ registration failed!\n");
845 static void alx_free_irq(struct alx_priv
*alx
)
847 struct pci_dev
*pdev
= alx
->hw
.pdev
;
850 if (alx
->flags
& ALX_FLAG_USING_MSIX
) {
851 /* we have only 2 vectors without multi queue support */
852 for (i
= 0; i
< 2; i
++)
853 free_irq(alx
->msix_entries
[i
].vector
, alx
);
855 free_irq(pdev
->irq
, alx
);
858 alx_disable_advanced_intr(alx
);
861 static int alx_identify_hw(struct alx_priv
*alx
)
863 struct alx_hw
*hw
= &alx
->hw
;
864 int rev
= alx_hw_revision(hw
);
866 if (rev
> ALX_REV_C0
)
869 hw
->max_dma_chnl
= rev
>= ALX_REV_B0
? 4 : 2;
874 static int alx_init_sw(struct alx_priv
*alx
)
876 struct pci_dev
*pdev
= alx
->hw
.pdev
;
877 struct alx_hw
*hw
= &alx
->hw
;
880 err
= alx_identify_hw(alx
);
882 dev_err(&pdev
->dev
, "unrecognized chip, aborting\n");
887 pdev
->device
== ALX_DEV_ID_AR8161
&&
888 pdev
->subsystem_vendor
== PCI_VENDOR_ID_ATTANSIC
&&
889 pdev
->subsystem_device
== 0x0091 &&
893 hw
->mtu
= alx
->dev
->mtu
;
894 alx
->rxbuf_size
= ALX_MAX_FRAME_LEN(hw
->mtu
);
895 alx
->tx_ringsz
= 256;
896 alx
->rx_ringsz
= 512;
898 alx
->int_mask
= ALX_ISR_MISC
;
899 hw
->dma_chnl
= hw
->max_dma_chnl
;
900 hw
->ith_tpd
= alx
->tx_ringsz
/ 3;
901 hw
->link_speed
= SPEED_UNKNOWN
;
902 hw
->duplex
= DUPLEX_UNKNOWN
;
903 hw
->adv_cfg
= ADVERTISED_Autoneg
|
904 ADVERTISED_10baseT_Half
|
905 ADVERTISED_10baseT_Full
|
906 ADVERTISED_100baseT_Full
|
907 ADVERTISED_100baseT_Half
|
908 ADVERTISED_1000baseT_Full
;
909 hw
->flowctrl
= ALX_FC_ANEG
| ALX_FC_RX
| ALX_FC_TX
;
911 hw
->rx_ctrl
= ALX_MAC_CTRL_WOLSPED_SWEN
|
912 ALX_MAC_CTRL_MHASH_ALG_HI5B
|
913 ALX_MAC_CTRL_BRD_EN
|
916 ALX_MAC_CTRL_RXFC_EN
|
917 ALX_MAC_CTRL_TXFC_EN
|
918 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT
;
924 static netdev_features_t
alx_fix_features(struct net_device
*netdev
,
925 netdev_features_t features
)
927 if (netdev
->mtu
> ALX_MAX_TSO_PKT_SIZE
)
928 features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
);
933 static void alx_netif_stop(struct alx_priv
*alx
)
935 netif_trans_update(alx
->dev
);
936 if (netif_carrier_ok(alx
->dev
)) {
937 netif_carrier_off(alx
->dev
);
938 netif_tx_disable(alx
->dev
);
939 napi_disable(&alx
->napi
);
943 static void alx_halt(struct alx_priv
*alx
)
945 struct alx_hw
*hw
= &alx
->hw
;
948 hw
->link_speed
= SPEED_UNKNOWN
;
949 hw
->duplex
= DUPLEX_UNKNOWN
;
954 alx_enable_aspm(hw
, false, false);
955 alx_irq_disable(alx
);
956 alx_free_buffers(alx
);
959 static void alx_configure(struct alx_priv
*alx
)
961 struct alx_hw
*hw
= &alx
->hw
;
963 alx_configure_basic(hw
);
965 __alx_set_rx_mode(alx
->dev
);
967 alx_write_mem32(hw
, ALX_MAC_CTRL
, hw
->rx_ctrl
);
970 static void alx_activate(struct alx_priv
*alx
)
972 /* hardware setting lost, restore it */
973 alx_reinit_rings(alx
);
976 /* clear old interrupts */
977 alx_write_mem32(&alx
->hw
, ALX_ISR
, ~(u32
)ALX_ISR_DIS
);
981 alx_schedule_link_check(alx
);
984 static void alx_reinit(struct alx_priv
*alx
)
992 static int alx_change_mtu(struct net_device
*netdev
, int mtu
)
994 struct alx_priv
*alx
= netdev_priv(netdev
);
995 int max_frame
= ALX_MAX_FRAME_LEN(mtu
);
997 if ((max_frame
< ALX_MIN_FRAME_SIZE
) ||
998 (max_frame
> ALX_MAX_FRAME_SIZE
))
1001 if (netdev
->mtu
== mtu
)
1006 alx
->rxbuf_size
= max(max_frame
, ALX_DEF_RXBUF_SIZE
);
1007 netdev_update_features(netdev
);
1008 if (netif_running(netdev
))
1013 static void alx_netif_start(struct alx_priv
*alx
)
1015 netif_tx_wake_all_queues(alx
->dev
);
1016 napi_enable(&alx
->napi
);
1017 netif_carrier_on(alx
->dev
);
1020 static int __alx_open(struct alx_priv
*alx
, bool resume
)
1024 alx_init_intr(alx
, msix
);
1027 netif_carrier_off(alx
->dev
);
1029 err
= alx_alloc_rings(alx
);
1035 err
= alx_request_irq(alx
);
1037 goto out_free_rings
;
1039 /* clear old interrupts */
1040 alx_write_mem32(&alx
->hw
, ALX_ISR
, ~(u32
)ALX_ISR_DIS
);
1042 alx_irq_enable(alx
);
1045 netif_tx_start_all_queues(alx
->dev
);
1047 alx_schedule_link_check(alx
);
1051 alx_free_rings(alx
);
1055 static void __alx_stop(struct alx_priv
*alx
)
1059 alx_free_rings(alx
);
1062 static const char *alx_speed_desc(struct alx_hw
*hw
)
1064 switch (alx_speed_to_ethadv(hw
->link_speed
, hw
->duplex
)) {
1065 case ADVERTISED_1000baseT_Full
:
1066 return "1 Gbps Full";
1067 case ADVERTISED_100baseT_Full
:
1068 return "100 Mbps Full";
1069 case ADVERTISED_100baseT_Half
:
1070 return "100 Mbps Half";
1071 case ADVERTISED_10baseT_Full
:
1072 return "10 Mbps Full";
1073 case ADVERTISED_10baseT_Half
:
1074 return "10 Mbps Half";
1076 return "Unknown speed";
1080 static void alx_check_link(struct alx_priv
*alx
)
1082 struct alx_hw
*hw
= &alx
->hw
;
1083 unsigned long flags
;
1088 /* clear PHY internal interrupt status, otherwise the main
1089 * interrupt status will be asserted forever
1091 alx_clear_phy_intr(hw
);
1093 old_speed
= hw
->link_speed
;
1094 old_duplex
= hw
->duplex
;
1095 err
= alx_read_phy_link(hw
);
1099 spin_lock_irqsave(&alx
->irq_lock
, flags
);
1100 alx
->int_mask
|= ALX_ISR_PHY
;
1101 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
1102 spin_unlock_irqrestore(&alx
->irq_lock
, flags
);
1104 if (old_speed
== hw
->link_speed
)
1107 if (hw
->link_speed
!= SPEED_UNKNOWN
) {
1108 netif_info(alx
, link
, alx
->dev
,
1109 "NIC Up: %s\n", alx_speed_desc(hw
));
1110 alx_post_phy_link(hw
);
1111 alx_enable_aspm(hw
, true, true);
1114 if (old_speed
== SPEED_UNKNOWN
)
1115 alx_netif_start(alx
);
1117 /* link is now down */
1118 alx_netif_stop(alx
);
1119 netif_info(alx
, link
, alx
->dev
, "Link Down\n");
1120 err
= alx_reset_mac(hw
);
1123 alx_irq_disable(alx
);
1125 /* MAC reset causes all HW settings to be lost, restore all */
1126 err
= alx_reinit_rings(alx
);
1130 alx_enable_aspm(hw
, false, true);
1131 alx_post_phy_link(hw
);
1132 alx_irq_enable(alx
);
1138 alx_schedule_reset(alx
);
1141 static int alx_open(struct net_device
*netdev
)
1143 return __alx_open(netdev_priv(netdev
), false);
1146 static int alx_stop(struct net_device
*netdev
)
1148 __alx_stop(netdev_priv(netdev
));
1152 static void alx_link_check(struct work_struct
*work
)
1154 struct alx_priv
*alx
;
1156 alx
= container_of(work
, struct alx_priv
, link_check_wk
);
1159 alx_check_link(alx
);
1163 static void alx_reset(struct work_struct
*work
)
1165 struct alx_priv
*alx
= container_of(work
, struct alx_priv
, reset_wk
);
1172 static int alx_tpd_req(struct sk_buff
*skb
)
1176 num
= skb_shinfo(skb
)->nr_frags
+ 1;
1177 /* we need one extra descriptor for LSOv2 */
1178 if (skb_is_gso(skb
) && skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
1184 static int alx_tx_csum(struct sk_buff
*skb
, struct alx_txd
*first
)
1188 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1191 cso
= skb_checksum_start_offset(skb
);
1195 css
= cso
+ skb
->csum_offset
;
1196 first
->word1
|= cpu_to_le32((cso
>> 1) << TPD_CXSUMSTART_SHIFT
);
1197 first
->word1
|= cpu_to_le32((css
>> 1) << TPD_CXSUMOFFSET_SHIFT
);
1198 first
->word1
|= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT
);
1203 static int alx_tso(struct sk_buff
*skb
, struct alx_txd
*first
)
1207 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1210 if (!skb_is_gso(skb
))
1213 err
= skb_cow_head(skb
, 0);
1217 if (skb
->protocol
== htons(ETH_P_IP
)) {
1218 struct iphdr
*iph
= ip_hdr(skb
);
1221 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1223 first
->word1
|= 1 << TPD_IPV4_SHIFT
;
1224 } else if (skb_is_gso_v6(skb
)) {
1225 ipv6_hdr(skb
)->payload_len
= 0;
1226 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1227 &ipv6_hdr(skb
)->daddr
,
1229 /* LSOv2: the first TPD only provides the packet length */
1230 first
->adrl
.l
.pkt_len
= skb
->len
;
1231 first
->word1
|= 1 << TPD_LSO_V2_SHIFT
;
1234 first
->word1
|= 1 << TPD_LSO_EN_SHIFT
;
1235 first
->word1
|= (skb_transport_offset(skb
) &
1236 TPD_L4HDROFFSET_MASK
) << TPD_L4HDROFFSET_SHIFT
;
1237 first
->word1
|= (skb_shinfo(skb
)->gso_size
&
1238 TPD_MSS_MASK
) << TPD_MSS_SHIFT
;
1242 static int alx_map_tx_skb(struct alx_priv
*alx
, struct sk_buff
*skb
)
1244 struct alx_tx_queue
*txq
= &alx
->txq
;
1245 struct alx_txd
*tpd
, *first_tpd
;
1247 int maplen
, f
, first_idx
= txq
->write_idx
;
1249 first_tpd
= &txq
->tpd
[txq
->write_idx
];
1252 if (tpd
->word1
& (1 << TPD_LSO_V2_SHIFT
)) {
1253 if (++txq
->write_idx
== alx
->tx_ringsz
)
1256 tpd
= &txq
->tpd
[txq
->write_idx
];
1257 tpd
->len
= first_tpd
->len
;
1258 tpd
->vlan_tag
= first_tpd
->vlan_tag
;
1259 tpd
->word1
= first_tpd
->word1
;
1262 maplen
= skb_headlen(skb
);
1263 dma
= dma_map_single(&alx
->hw
.pdev
->dev
, skb
->data
, maplen
,
1265 if (dma_mapping_error(&alx
->hw
.pdev
->dev
, dma
))
1268 dma_unmap_len_set(&txq
->bufs
[txq
->write_idx
], size
, maplen
);
1269 dma_unmap_addr_set(&txq
->bufs
[txq
->write_idx
], dma
, dma
);
1271 tpd
->adrl
.addr
= cpu_to_le64(dma
);
1272 tpd
->len
= cpu_to_le16(maplen
);
1274 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
1275 struct skb_frag_struct
*frag
;
1277 frag
= &skb_shinfo(skb
)->frags
[f
];
1279 if (++txq
->write_idx
== alx
->tx_ringsz
)
1281 tpd
= &txq
->tpd
[txq
->write_idx
];
1283 tpd
->word1
= first_tpd
->word1
;
1285 maplen
= skb_frag_size(frag
);
1286 dma
= skb_frag_dma_map(&alx
->hw
.pdev
->dev
, frag
, 0,
1287 maplen
, DMA_TO_DEVICE
);
1288 if (dma_mapping_error(&alx
->hw
.pdev
->dev
, dma
))
1290 dma_unmap_len_set(&txq
->bufs
[txq
->write_idx
], size
, maplen
);
1291 dma_unmap_addr_set(&txq
->bufs
[txq
->write_idx
], dma
, dma
);
1293 tpd
->adrl
.addr
= cpu_to_le64(dma
);
1294 tpd
->len
= cpu_to_le16(maplen
);
1297 /* last TPD, set EOP flag and store skb */
1298 tpd
->word1
|= cpu_to_le32(1 << TPD_EOP_SHIFT
);
1299 txq
->bufs
[txq
->write_idx
].skb
= skb
;
1301 if (++txq
->write_idx
== alx
->tx_ringsz
)
1308 while (f
!= txq
->write_idx
) {
1309 alx_free_txbuf(alx
, f
);
1310 if (++f
== alx
->tx_ringsz
)
1316 static netdev_tx_t
alx_start_xmit(struct sk_buff
*skb
,
1317 struct net_device
*netdev
)
1319 struct alx_priv
*alx
= netdev_priv(netdev
);
1320 struct alx_tx_queue
*txq
= &alx
->txq
;
1321 struct alx_txd
*first
;
1324 if (alx_tpd_avail(alx
) < alx_tpd_req(skb
)) {
1325 netif_stop_queue(alx
->dev
);
1329 first
= &txq
->tpd
[txq
->write_idx
];
1330 memset(first
, 0, sizeof(*first
));
1332 tso
= alx_tso(skb
, first
);
1335 else if (!tso
&& alx_tx_csum(skb
, first
))
1338 if (alx_map_tx_skb(alx
, skb
) < 0)
1341 netdev_sent_queue(alx
->dev
, skb
->len
);
1343 /* flush updates before updating hardware */
1345 alx_write_mem16(&alx
->hw
, ALX_TPD_PRI0_PIDX
, txq
->write_idx
);
1347 if (alx_tpd_avail(alx
) < alx
->tx_ringsz
/8)
1348 netif_stop_queue(alx
->dev
);
1350 return NETDEV_TX_OK
;
1353 dev_kfree_skb_any(skb
);
1354 return NETDEV_TX_OK
;
1357 static void alx_tx_timeout(struct net_device
*dev
)
1359 struct alx_priv
*alx
= netdev_priv(dev
);
1361 alx_schedule_reset(alx
);
1364 static int alx_mdio_read(struct net_device
*netdev
,
1365 int prtad
, int devad
, u16 addr
)
1367 struct alx_priv
*alx
= netdev_priv(netdev
);
1368 struct alx_hw
*hw
= &alx
->hw
;
1372 if (prtad
!= hw
->mdio
.prtad
)
1375 if (devad
== MDIO_DEVAD_NONE
)
1376 err
= alx_read_phy_reg(hw
, addr
, &val
);
1378 err
= alx_read_phy_ext(hw
, devad
, addr
, &val
);
1385 static int alx_mdio_write(struct net_device
*netdev
,
1386 int prtad
, int devad
, u16 addr
, u16 val
)
1388 struct alx_priv
*alx
= netdev_priv(netdev
);
1389 struct alx_hw
*hw
= &alx
->hw
;
1391 if (prtad
!= hw
->mdio
.prtad
)
1394 if (devad
== MDIO_DEVAD_NONE
)
1395 return alx_write_phy_reg(hw
, addr
, val
);
1397 return alx_write_phy_ext(hw
, devad
, addr
, val
);
1400 static int alx_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
1402 struct alx_priv
*alx
= netdev_priv(netdev
);
1404 if (!netif_running(netdev
))
1407 return mdio_mii_ioctl(&alx
->hw
.mdio
, if_mii(ifr
), cmd
);
1410 #ifdef CONFIG_NET_POLL_CONTROLLER
1411 static void alx_poll_controller(struct net_device
*netdev
)
1413 struct alx_priv
*alx
= netdev_priv(netdev
);
1415 if (alx
->flags
& ALX_FLAG_USING_MSIX
) {
1416 alx_intr_msix_misc(0, alx
);
1417 alx_intr_msix_ring(0, alx
);
1418 } else if (alx
->flags
& ALX_FLAG_USING_MSI
)
1419 alx_intr_msi(0, alx
);
1421 alx_intr_legacy(0, alx
);
1425 static struct rtnl_link_stats64
*alx_get_stats64(struct net_device
*dev
,
1426 struct rtnl_link_stats64
*net_stats
)
1428 struct alx_priv
*alx
= netdev_priv(dev
);
1429 struct alx_hw_stats
*hw_stats
= &alx
->hw
.stats
;
1431 spin_lock(&alx
->stats_lock
);
1433 alx_update_hw_stats(&alx
->hw
);
1435 net_stats
->tx_bytes
= hw_stats
->tx_byte_cnt
;
1436 net_stats
->rx_bytes
= hw_stats
->rx_byte_cnt
;
1437 net_stats
->multicast
= hw_stats
->rx_mcast
;
1438 net_stats
->collisions
= hw_stats
->tx_single_col
+
1439 hw_stats
->tx_multi_col
+
1440 hw_stats
->tx_late_col
+
1441 hw_stats
->tx_abort_col
;
1443 net_stats
->rx_errors
= hw_stats
->rx_frag
+
1444 hw_stats
->rx_fcs_err
+
1445 hw_stats
->rx_len_err
+
1446 hw_stats
->rx_ov_sz
+
1447 hw_stats
->rx_ov_rrd
+
1448 hw_stats
->rx_align_err
+
1449 hw_stats
->rx_ov_rxf
;
1451 net_stats
->rx_fifo_errors
= hw_stats
->rx_ov_rxf
;
1452 net_stats
->rx_length_errors
= hw_stats
->rx_len_err
;
1453 net_stats
->rx_crc_errors
= hw_stats
->rx_fcs_err
;
1454 net_stats
->rx_frame_errors
= hw_stats
->rx_align_err
;
1455 net_stats
->rx_dropped
= hw_stats
->rx_ov_rrd
;
1457 net_stats
->tx_errors
= hw_stats
->tx_late_col
+
1458 hw_stats
->tx_abort_col
+
1459 hw_stats
->tx_underrun
+
1462 net_stats
->tx_aborted_errors
= hw_stats
->tx_abort_col
;
1463 net_stats
->tx_fifo_errors
= hw_stats
->tx_underrun
;
1464 net_stats
->tx_window_errors
= hw_stats
->tx_late_col
;
1466 net_stats
->tx_packets
= hw_stats
->tx_ok
+ net_stats
->tx_errors
;
1467 net_stats
->rx_packets
= hw_stats
->rx_ok
+ net_stats
->rx_errors
;
1469 spin_unlock(&alx
->stats_lock
);
1474 static const struct net_device_ops alx_netdev_ops
= {
1475 .ndo_open
= alx_open
,
1476 .ndo_stop
= alx_stop
,
1477 .ndo_start_xmit
= alx_start_xmit
,
1478 .ndo_get_stats64
= alx_get_stats64
,
1479 .ndo_set_rx_mode
= alx_set_rx_mode
,
1480 .ndo_validate_addr
= eth_validate_addr
,
1481 .ndo_set_mac_address
= alx_set_mac_address
,
1482 .ndo_change_mtu
= alx_change_mtu
,
1483 .ndo_do_ioctl
= alx_ioctl
,
1484 .ndo_tx_timeout
= alx_tx_timeout
,
1485 .ndo_fix_features
= alx_fix_features
,
1486 #ifdef CONFIG_NET_POLL_CONTROLLER
1487 .ndo_poll_controller
= alx_poll_controller
,
1491 static int alx_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1493 struct net_device
*netdev
;
1494 struct alx_priv
*alx
;
1496 bool phy_configured
;
1499 err
= pci_enable_device_mem(pdev
);
1503 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1504 * shared register for the high 32 bits, so only a single, aligned,
1505 * 4 GB physical address range can be used for descriptors.
1507 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
1508 dev_dbg(&pdev
->dev
, "DMA to 64-BIT addresses\n");
1510 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1512 dev_err(&pdev
->dev
, "No usable DMA config, aborting\n");
1513 goto out_pci_disable
;
1517 err
= pci_request_mem_regions(pdev
, alx_drv_name
);
1520 "pci_request_mem_regions failed\n");
1521 goto out_pci_disable
;
1524 pci_enable_pcie_error_reporting(pdev
);
1525 pci_set_master(pdev
);
1527 if (!pdev
->pm_cap
) {
1529 "Can't find power management capability, aborting\n");
1531 goto out_pci_release
;
1534 netdev
= alloc_etherdev(sizeof(*alx
));
1537 goto out_pci_release
;
1540 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1541 alx
= netdev_priv(netdev
);
1542 spin_lock_init(&alx
->hw
.mdio_lock
);
1543 spin_lock_init(&alx
->irq_lock
);
1544 spin_lock_init(&alx
->stats_lock
);
1546 alx
->hw
.pdev
= pdev
;
1547 alx
->msg_enable
= NETIF_MSG_LINK
| NETIF_MSG_HW
| NETIF_MSG_IFUP
|
1548 NETIF_MSG_TX_ERR
| NETIF_MSG_RX_ERR
| NETIF_MSG_WOL
;
1550 pci_set_drvdata(pdev
, alx
);
1552 hw
->hw_addr
= pci_ioremap_bar(pdev
, 0);
1554 dev_err(&pdev
->dev
, "cannot map device registers\n");
1556 goto out_free_netdev
;
1559 netdev
->netdev_ops
= &alx_netdev_ops
;
1560 netdev
->ethtool_ops
= &alx_ethtool_ops
;
1561 netdev
->irq
= pdev
->irq
;
1562 netdev
->watchdog_timeo
= ALX_WATCHDOG_TIME
;
1564 if (ent
->driver_data
& ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
)
1565 pdev
->dev_flags
|= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG
;
1567 err
= alx_init_sw(alx
);
1569 dev_err(&pdev
->dev
, "net device private data init failed\n");
1575 phy_configured
= alx_phy_configured(hw
);
1577 if (!phy_configured
)
1580 err
= alx_reset_mac(hw
);
1582 dev_err(&pdev
->dev
, "MAC Reset failed, error = %d\n", err
);
1586 /* setup link to put it in a known good starting state */
1587 if (!phy_configured
) {
1588 err
= alx_setup_speed_duplex(hw
, hw
->adv_cfg
, hw
->flowctrl
);
1591 "failed to configure PHY speed/duplex (err=%d)\n",
1597 netdev
->hw_features
= NETIF_F_SG
|
1602 if (alx_get_perm_macaddr(hw
, hw
->perm_addr
)) {
1603 dev_warn(&pdev
->dev
,
1604 "Invalid permanent address programmed, using random one\n");
1605 eth_hw_addr_random(netdev
);
1606 memcpy(hw
->perm_addr
, netdev
->dev_addr
, netdev
->addr_len
);
1609 memcpy(hw
->mac_addr
, hw
->perm_addr
, ETH_ALEN
);
1610 memcpy(netdev
->dev_addr
, hw
->mac_addr
, ETH_ALEN
);
1611 memcpy(netdev
->perm_addr
, hw
->perm_addr
, ETH_ALEN
);
1615 hw
->mdio
.dev
= netdev
;
1616 hw
->mdio
.mode_support
= MDIO_SUPPORTS_C45
|
1619 hw
->mdio
.mdio_read
= alx_mdio_read
;
1620 hw
->mdio
.mdio_write
= alx_mdio_write
;
1622 if (!alx_get_phy_info(hw
)) {
1623 dev_err(&pdev
->dev
, "failed to identify PHY\n");
1628 INIT_WORK(&alx
->link_check_wk
, alx_link_check
);
1629 INIT_WORK(&alx
->reset_wk
, alx_reset
);
1630 netif_carrier_off(netdev
);
1632 err
= register_netdev(netdev
);
1634 dev_err(&pdev
->dev
, "register netdevice failed\n");
1639 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1645 iounmap(hw
->hw_addr
);
1647 free_netdev(netdev
);
1649 pci_release_mem_regions(pdev
);
1651 pci_disable_device(pdev
);
1655 static void alx_remove(struct pci_dev
*pdev
)
1657 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1658 struct alx_hw
*hw
= &alx
->hw
;
1660 cancel_work_sync(&alx
->link_check_wk
);
1661 cancel_work_sync(&alx
->reset_wk
);
1663 /* restore permanent mac address */
1664 alx_set_macaddr(hw
, hw
->perm_addr
);
1666 unregister_netdev(alx
->dev
);
1667 iounmap(hw
->hw_addr
);
1668 pci_release_mem_regions(pdev
);
1670 pci_disable_pcie_error_reporting(pdev
);
1671 pci_disable_device(pdev
);
1673 free_netdev(alx
->dev
);
1676 #ifdef CONFIG_PM_SLEEP
1677 static int alx_suspend(struct device
*dev
)
1679 struct pci_dev
*pdev
= to_pci_dev(dev
);
1680 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1682 if (!netif_running(alx
->dev
))
1684 netif_device_detach(alx
->dev
);
1689 static int alx_resume(struct device
*dev
)
1691 struct pci_dev
*pdev
= to_pci_dev(dev
);
1692 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1693 struct alx_hw
*hw
= &alx
->hw
;
1697 if (!netif_running(alx
->dev
))
1699 netif_device_attach(alx
->dev
);
1700 return __alx_open(alx
, true);
1703 static SIMPLE_DEV_PM_OPS(alx_pm_ops
, alx_suspend
, alx_resume
);
1704 #define ALX_PM_OPS (&alx_pm_ops)
1706 #define ALX_PM_OPS NULL
1710 static pci_ers_result_t
alx_pci_error_detected(struct pci_dev
*pdev
,
1711 pci_channel_state_t state
)
1713 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1714 struct net_device
*netdev
= alx
->dev
;
1715 pci_ers_result_t rc
= PCI_ERS_RESULT_NEED_RESET
;
1717 dev_info(&pdev
->dev
, "pci error detected\n");
1721 if (netif_running(netdev
)) {
1722 netif_device_detach(netdev
);
1726 if (state
== pci_channel_io_perm_failure
)
1727 rc
= PCI_ERS_RESULT_DISCONNECT
;
1729 pci_disable_device(pdev
);
1736 static pci_ers_result_t
alx_pci_error_slot_reset(struct pci_dev
*pdev
)
1738 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1739 struct alx_hw
*hw
= &alx
->hw
;
1740 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
1742 dev_info(&pdev
->dev
, "pci error slot reset\n");
1746 if (pci_enable_device(pdev
)) {
1747 dev_err(&pdev
->dev
, "Failed to re-enable PCI device after reset\n");
1751 pci_set_master(pdev
);
1754 if (!alx_reset_mac(hw
))
1755 rc
= PCI_ERS_RESULT_RECOVERED
;
1757 pci_cleanup_aer_uncorrect_error_status(pdev
);
1764 static void alx_pci_error_resume(struct pci_dev
*pdev
)
1766 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1767 struct net_device
*netdev
= alx
->dev
;
1769 dev_info(&pdev
->dev
, "pci error resume\n");
1773 if (netif_running(netdev
)) {
1775 netif_device_attach(netdev
);
1781 static const struct pci_error_handlers alx_err_handlers
= {
1782 .error_detected
= alx_pci_error_detected
,
1783 .slot_reset
= alx_pci_error_slot_reset
,
1784 .resume
= alx_pci_error_resume
,
1787 static const struct pci_device_id alx_pci_tbl
[] = {
1788 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8161
),
1789 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1790 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_E2200
),
1791 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1792 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_E2400
),
1793 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1794 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_E2500
),
1795 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1796 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8162
),
1797 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1798 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8171
) },
1799 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8172
) },
1803 static struct pci_driver alx_driver
= {
1804 .name
= alx_drv_name
,
1805 .id_table
= alx_pci_tbl
,
1807 .remove
= alx_remove
,
1808 .err_handler
= &alx_err_handlers
,
1809 .driver
.pm
= ALX_PM_OPS
,
1812 module_pci_driver(alx_driver
);
1813 MODULE_DEVICE_TABLE(pci
, alx_pci_tbl
);
1814 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
1815 MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
1817 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
1818 MODULE_LICENSE("GPL");