2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
36 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME "lan78xx"
39 #define DRIVER_VERSION "1.0.2"
41 #define TX_TIMEOUT_JIFFIES (5 * HZ)
42 #define THROTTLE_JIFFIES (HZ / 8)
43 #define UNLINK_TIMEOUT_MS 3
45 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
47 #define SS_USB_PKT_SIZE (1024)
48 #define HS_USB_PKT_SIZE (512)
49 #define FS_USB_PKT_SIZE (64)
51 #define MAX_RX_FIFO_SIZE (12 * 1024)
52 #define MAX_TX_FIFO_SIZE (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE (9000)
56 #define DEFAULT_TX_CSUM_ENABLE (true)
57 #define DEFAULT_RX_CSUM_ENABLE (true)
58 #define DEFAULT_TSO_CSUM_ENABLE (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE (true)
60 #define TX_OVERHEAD (8)
63 #define LAN78XX_USB_VENDOR_ID (0x0424)
64 #define LAN7800_USB_PRODUCT_ID (0x7800)
65 #define LAN7850_USB_PRODUCT_ID (0x7850)
66 #define LAN78XX_EEPROM_MAGIC (0x78A5)
67 #define LAN78XX_OTP_MAGIC (0x78F3)
72 #define EEPROM_INDICATOR (0xA5)
73 #define EEPROM_MAC_OFFSET (0x01)
74 #define MAX_EEPROM_SIZE 512
75 #define OTP_INDICATOR_1 (0xF3)
76 #define OTP_INDICATOR_2 (0xF7)
78 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
82 /* USB related defines */
83 #define BULK_IN_PIPE 1
84 #define BULK_OUT_PIPE 2
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
89 static const char lan78xx_gstrings
[][ETH_GSTRING_LEN
] = {
91 "RX Alignment Errors",
94 "RX Undersize Frame Errors",
95 "RX Oversize Frame Errors",
97 "RX Unicast Byte Count",
98 "RX Broadcast Byte Count",
99 "RX Multicast Byte Count",
101 "RX Broadcast Frames",
102 "RX Multicast Frames",
105 "RX 65 - 127 Byte Frames",
106 "RX 128 - 255 Byte Frames",
107 "RX 256 - 511 Bytes Frames",
108 "RX 512 - 1023 Byte Frames",
109 "RX 1024 - 1518 Byte Frames",
110 "RX Greater 1518 Byte Frames",
111 "EEE RX LPI Transitions",
114 "TX Excess Deferral Errors",
117 "TX Single Collisions",
118 "TX Multiple Collisions",
119 "TX Excessive Collision",
120 "TX Late Collisions",
121 "TX Unicast Byte Count",
122 "TX Broadcast Byte Count",
123 "TX Multicast Byte Count",
125 "TX Broadcast Frames",
126 "TX Multicast Frames",
129 "TX 65 - 127 Byte Frames",
130 "TX 128 - 255 Byte Frames",
131 "TX 256 - 511 Bytes Frames",
132 "TX 512 - 1023 Byte Frames",
133 "TX 1024 - 1518 Byte Frames",
134 "TX Greater 1518 Byte Frames",
135 "EEE TX LPI Transitions",
139 struct lan78xx_statstage
{
141 u32 rx_alignment_errors
;
142 u32 rx_fragment_errors
;
143 u32 rx_jabber_errors
;
144 u32 rx_undersize_frame_errors
;
145 u32 rx_oversize_frame_errors
;
146 u32 rx_dropped_frames
;
147 u32 rx_unicast_byte_count
;
148 u32 rx_broadcast_byte_count
;
149 u32 rx_multicast_byte_count
;
150 u32 rx_unicast_frames
;
151 u32 rx_broadcast_frames
;
152 u32 rx_multicast_frames
;
154 u32 rx_64_byte_frames
;
155 u32 rx_65_127_byte_frames
;
156 u32 rx_128_255_byte_frames
;
157 u32 rx_256_511_bytes_frames
;
158 u32 rx_512_1023_byte_frames
;
159 u32 rx_1024_1518_byte_frames
;
160 u32 rx_greater_1518_byte_frames
;
161 u32 eee_rx_lpi_transitions
;
164 u32 tx_excess_deferral_errors
;
165 u32 tx_carrier_errors
;
166 u32 tx_bad_byte_count
;
167 u32 tx_single_collisions
;
168 u32 tx_multiple_collisions
;
169 u32 tx_excessive_collision
;
170 u32 tx_late_collisions
;
171 u32 tx_unicast_byte_count
;
172 u32 tx_broadcast_byte_count
;
173 u32 tx_multicast_byte_count
;
174 u32 tx_unicast_frames
;
175 u32 tx_broadcast_frames
;
176 u32 tx_multicast_frames
;
178 u32 tx_64_byte_frames
;
179 u32 tx_65_127_byte_frames
;
180 u32 tx_128_255_byte_frames
;
181 u32 tx_256_511_bytes_frames
;
182 u32 tx_512_1023_byte_frames
;
183 u32 tx_1024_1518_byte_frames
;
184 u32 tx_greater_1518_byte_frames
;
185 u32 eee_tx_lpi_transitions
;
191 struct lan78xx_priv
{
192 struct lan78xx_net
*dev
;
194 u32 mchash_table
[DP_SEL_VHF_HASH_LEN
]; /* multicat hash table */
195 u32 pfilter_table
[NUM_OF_MAF
][2]; /* perfect filter table */
196 u32 vlan_table
[DP_SEL_VHF_VLAN_LEN
];
197 struct mutex dataport_mutex
; /* for dataport access */
198 spinlock_t rfe_ctl_lock
; /* for rfe register access */
199 struct work_struct set_multicast
;
200 struct work_struct set_vlan
;
214 struct skb_data
{ /* skb->cb is one of these */
216 struct lan78xx_net
*dev
;
217 enum skb_state state
;
222 struct usb_ctrlrequest req
;
223 struct lan78xx_net
*dev
;
226 #define EVENT_TX_HALT 0
227 #define EVENT_RX_HALT 1
228 #define EVENT_RX_MEMORY 2
229 #define EVENT_STS_SPLIT 3
230 #define EVENT_LINK_RESET 4
231 #define EVENT_RX_PAUSED 5
232 #define EVENT_DEV_WAKING 6
233 #define EVENT_DEV_ASLEEP 7
234 #define EVENT_DEV_OPEN 8
237 struct net_device
*net
;
238 struct usb_device
*udev
;
239 struct usb_interface
*intf
;
244 struct sk_buff_head rxq
;
245 struct sk_buff_head txq
;
246 struct sk_buff_head done
;
247 struct sk_buff_head rxq_pause
;
248 struct sk_buff_head txq_pend
;
250 struct tasklet_struct bh
;
251 struct delayed_work wq
;
253 struct usb_host_endpoint
*ep_blkin
;
254 struct usb_host_endpoint
*ep_blkout
;
255 struct usb_host_endpoint
*ep_intr
;
259 struct urb
*urb_intr
;
260 struct usb_anchor deferred
;
262 struct mutex phy_mutex
; /* for phy access */
263 unsigned pipe_in
, pipe_out
, pipe_intr
;
265 u32 hard_mtu
; /* count any extra framing */
266 size_t rx_urb_size
; /* size for rx urbs */
270 wait_queue_head_t
*wait
;
271 unsigned char suspend_count
;
274 struct timer_list delay
;
276 unsigned long data
[5];
282 struct mii_bus
*mdiobus
;
285 /* use ethtool to change the level for any given device */
286 static int msg_level
= -1;
287 module_param(msg_level
, int, 0);
288 MODULE_PARM_DESC(msg_level
, "Override default message level");
290 static int lan78xx_read_reg(struct lan78xx_net
*dev
, u32 index
, u32
*data
)
292 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
298 ret
= usb_control_msg(dev
->udev
, usb_rcvctrlpipe(dev
->udev
, 0),
299 USB_VENDOR_REQUEST_READ_REGISTER
,
300 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
301 0, index
, buf
, 4, USB_CTRL_GET_TIMEOUT
);
302 if (likely(ret
>= 0)) {
306 netdev_warn(dev
->net
,
307 "Failed to read register index 0x%08x. ret = %d",
316 static int lan78xx_write_reg(struct lan78xx_net
*dev
, u32 index
, u32 data
)
318 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
327 ret
= usb_control_msg(dev
->udev
, usb_sndctrlpipe(dev
->udev
, 0),
328 USB_VENDOR_REQUEST_WRITE_REGISTER
,
329 USB_DIR_OUT
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
330 0, index
, buf
, 4, USB_CTRL_SET_TIMEOUT
);
331 if (unlikely(ret
< 0)) {
332 netdev_warn(dev
->net
,
333 "Failed to write register index 0x%08x. ret = %d",
342 static int lan78xx_read_stats(struct lan78xx_net
*dev
,
343 struct lan78xx_statstage
*data
)
347 struct lan78xx_statstage
*stats
;
351 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
355 ret
= usb_control_msg(dev
->udev
,
356 usb_rcvctrlpipe(dev
->udev
, 0),
357 USB_VENDOR_REQUEST_GET_STATS
,
358 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
363 USB_CTRL_SET_TIMEOUT
);
364 if (likely(ret
>= 0)) {
367 for (i
= 0; i
< sizeof(*stats
)/sizeof(u32
); i
++) {
368 le32_to_cpus(&src
[i
]);
372 netdev_warn(dev
->net
,
373 "Failed to read stat ret = 0x%x", ret
);
381 /* Loop until the read is completed with timeout called with phy_mutex held */
382 static int lan78xx_phy_wait_not_busy(struct lan78xx_net
*dev
)
384 unsigned long start_time
= jiffies
;
389 ret
= lan78xx_read_reg(dev
, MII_ACC
, &val
);
390 if (unlikely(ret
< 0))
393 if (!(val
& MII_ACC_MII_BUSY_
))
395 } while (!time_after(jiffies
, start_time
+ HZ
));
400 static inline u32
mii_access(int id
, int index
, int read
)
404 ret
= ((u32
)id
<< MII_ACC_PHY_ADDR_SHIFT_
) & MII_ACC_PHY_ADDR_MASK_
;
405 ret
|= ((u32
)index
<< MII_ACC_MIIRINDA_SHIFT_
) & MII_ACC_MIIRINDA_MASK_
;
407 ret
|= MII_ACC_MII_READ_
;
409 ret
|= MII_ACC_MII_WRITE_
;
410 ret
|= MII_ACC_MII_BUSY_
;
415 static int lan78xx_wait_eeprom(struct lan78xx_net
*dev
)
417 unsigned long start_time
= jiffies
;
422 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
423 if (unlikely(ret
< 0))
426 if (!(val
& E2P_CMD_EPC_BUSY_
) ||
427 (val
& E2P_CMD_EPC_TIMEOUT_
))
429 usleep_range(40, 100);
430 } while (!time_after(jiffies
, start_time
+ HZ
));
432 if (val
& (E2P_CMD_EPC_TIMEOUT_
| E2P_CMD_EPC_BUSY_
)) {
433 netdev_warn(dev
->net
, "EEPROM read operation timeout");
440 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net
*dev
)
442 unsigned long start_time
= jiffies
;
447 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
448 if (unlikely(ret
< 0))
451 if (!(val
& E2P_CMD_EPC_BUSY_
))
454 usleep_range(40, 100);
455 } while (!time_after(jiffies
, start_time
+ HZ
));
457 netdev_warn(dev
->net
, "EEPROM is busy");
461 static int lan78xx_read_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
462 u32 length
, u8
*data
)
467 ret
= lan78xx_eeprom_confirm_not_busy(dev
);
471 for (i
= 0; i
< length
; i
++) {
472 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_READ_
;
473 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
474 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
475 if (unlikely(ret
< 0))
478 ret
= lan78xx_wait_eeprom(dev
);
482 ret
= lan78xx_read_reg(dev
, E2P_DATA
, &val
);
483 if (unlikely(ret
< 0))
486 data
[i
] = val
& 0xFF;
493 static int lan78xx_read_eeprom(struct lan78xx_net
*dev
, u32 offset
,
494 u32 length
, u8
*data
)
499 ret
= lan78xx_read_raw_eeprom(dev
, 0, 1, &sig
);
500 if ((ret
== 0) && (sig
== EEPROM_INDICATOR
))
501 ret
= lan78xx_read_raw_eeprom(dev
, offset
, length
, data
);
508 static int lan78xx_write_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
509 u32 length
, u8
*data
)
514 ret
= lan78xx_eeprom_confirm_not_busy(dev
);
518 /* Issue write/erase enable command */
519 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_EWEN_
;
520 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
521 if (unlikely(ret
< 0))
524 ret
= lan78xx_wait_eeprom(dev
);
528 for (i
= 0; i
< length
; i
++) {
529 /* Fill data register */
531 ret
= lan78xx_write_reg(dev
, E2P_DATA
, val
);
535 /* Send "write" command */
536 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_WRITE_
;
537 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
538 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
542 ret
= lan78xx_wait_eeprom(dev
);
552 static int lan78xx_read_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
553 u32 length
, u8
*data
)
558 unsigned long timeout
;
560 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
562 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
563 /* clear it and wait to be cleared */
564 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
566 timeout
= jiffies
+ HZ
;
569 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
570 if (time_after(jiffies
, timeout
)) {
571 netdev_warn(dev
->net
,
572 "timeout on OTP_PWR_DN");
575 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
578 for (i
= 0; i
< length
; i
++) {
579 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
580 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
581 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
582 ((offset
+ i
) & OTP_ADDR2_10_3
));
584 ret
= lan78xx_write_reg(dev
, OTP_FUNC_CMD
, OTP_FUNC_CMD_READ_
);
585 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
587 timeout
= jiffies
+ HZ
;
590 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
591 if (time_after(jiffies
, timeout
)) {
592 netdev_warn(dev
->net
,
593 "timeout on OTP_STATUS");
596 } while (buf
& OTP_STATUS_BUSY_
);
598 ret
= lan78xx_read_reg(dev
, OTP_RD_DATA
, &buf
);
600 data
[i
] = (u8
)(buf
& 0xFF);
606 static int lan78xx_write_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
607 u32 length
, u8
*data
)
612 unsigned long timeout
;
614 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
616 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
617 /* clear it and wait to be cleared */
618 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
620 timeout
= jiffies
+ HZ
;
623 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
624 if (time_after(jiffies
, timeout
)) {
625 netdev_warn(dev
->net
,
626 "timeout on OTP_PWR_DN completion");
629 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
632 /* set to BYTE program mode */
633 ret
= lan78xx_write_reg(dev
, OTP_PRGM_MODE
, OTP_PRGM_MODE_BYTE_
);
635 for (i
= 0; i
< length
; i
++) {
636 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
637 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
638 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
639 ((offset
+ i
) & OTP_ADDR2_10_3
));
640 ret
= lan78xx_write_reg(dev
, OTP_PRGM_DATA
, data
[i
]);
641 ret
= lan78xx_write_reg(dev
, OTP_TST_CMD
, OTP_TST_CMD_PRGVRFY_
);
642 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
644 timeout
= jiffies
+ HZ
;
647 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
648 if (time_after(jiffies
, timeout
)) {
649 netdev_warn(dev
->net
,
650 "Timeout on OTP_STATUS completion");
653 } while (buf
& OTP_STATUS_BUSY_
);
659 static int lan78xx_read_otp(struct lan78xx_net
*dev
, u32 offset
,
660 u32 length
, u8
*data
)
665 ret
= lan78xx_read_raw_otp(dev
, 0, 1, &sig
);
668 if (sig
== OTP_INDICATOR_1
)
670 else if (sig
== OTP_INDICATOR_2
)
674 ret
= lan78xx_read_raw_otp(dev
, offset
, length
, data
);
680 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net
*dev
)
684 for (i
= 0; i
< 100; i
++) {
687 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
688 if (unlikely(ret
< 0))
691 if (dp_sel
& DP_SEL_DPRDY_
)
694 usleep_range(40, 100);
697 netdev_warn(dev
->net
, "lan78xx_dataport_wait_not_busy timed out");
702 static int lan78xx_dataport_write(struct lan78xx_net
*dev
, u32 ram_select
,
703 u32 addr
, u32 length
, u32
*buf
)
705 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
709 if (usb_autopm_get_interface(dev
->intf
) < 0)
712 mutex_lock(&pdata
->dataport_mutex
);
714 ret
= lan78xx_dataport_wait_not_busy(dev
);
718 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
720 dp_sel
&= ~DP_SEL_RSEL_MASK_
;
721 dp_sel
|= ram_select
;
722 ret
= lan78xx_write_reg(dev
, DP_SEL
, dp_sel
);
724 for (i
= 0; i
< length
; i
++) {
725 ret
= lan78xx_write_reg(dev
, DP_ADDR
, addr
+ i
);
727 ret
= lan78xx_write_reg(dev
, DP_DATA
, buf
[i
]);
729 ret
= lan78xx_write_reg(dev
, DP_CMD
, DP_CMD_WRITE_
);
731 ret
= lan78xx_dataport_wait_not_busy(dev
);
737 mutex_unlock(&pdata
->dataport_mutex
);
738 usb_autopm_put_interface(dev
->intf
);
743 static void lan78xx_set_addr_filter(struct lan78xx_priv
*pdata
,
744 int index
, u8 addr
[ETH_ALEN
])
748 if ((pdata
) && (index
> 0) && (index
< NUM_OF_MAF
)) {
750 temp
= addr
[2] | (temp
<< 8);
751 temp
= addr
[1] | (temp
<< 8);
752 temp
= addr
[0] | (temp
<< 8);
753 pdata
->pfilter_table
[index
][1] = temp
;
755 temp
= addr
[4] | (temp
<< 8);
756 temp
|= MAF_HI_VALID_
| MAF_HI_TYPE_DST_
;
757 pdata
->pfilter_table
[index
][0] = temp
;
761 /* returns hash bit number for given MAC address */
762 static inline u32
lan78xx_hash(char addr
[ETH_ALEN
])
764 return (ether_crc(ETH_ALEN
, addr
) >> 23) & 0x1ff;
767 static void lan78xx_deferred_multicast_write(struct work_struct
*param
)
769 struct lan78xx_priv
*pdata
=
770 container_of(param
, struct lan78xx_priv
, set_multicast
);
771 struct lan78xx_net
*dev
= pdata
->dev
;
775 netif_dbg(dev
, drv
, dev
->net
, "deferred multicast write 0x%08x\n",
778 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, DP_SEL_VHF_VLAN_LEN
,
779 DP_SEL_VHF_HASH_LEN
, pdata
->mchash_table
);
781 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
782 ret
= lan78xx_write_reg(dev
, MAF_HI(i
), 0);
783 ret
= lan78xx_write_reg(dev
, MAF_LO(i
),
784 pdata
->pfilter_table
[i
][1]);
785 ret
= lan78xx_write_reg(dev
, MAF_HI(i
),
786 pdata
->pfilter_table
[i
][0]);
789 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
792 static void lan78xx_set_multicast(struct net_device
*netdev
)
794 struct lan78xx_net
*dev
= netdev_priv(netdev
);
795 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
799 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
801 pdata
->rfe_ctl
&= ~(RFE_CTL_UCAST_EN_
| RFE_CTL_MCAST_EN_
|
802 RFE_CTL_DA_PERFECT_
| RFE_CTL_MCAST_HASH_
);
804 for (i
= 0; i
< DP_SEL_VHF_HASH_LEN
; i
++)
805 pdata
->mchash_table
[i
] = 0;
806 /* pfilter_table[0] has own HW address */
807 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
808 pdata
->pfilter_table
[i
][0] =
809 pdata
->pfilter_table
[i
][1] = 0;
812 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
;
814 if (dev
->net
->flags
& IFF_PROMISC
) {
815 netif_dbg(dev
, drv
, dev
->net
, "promiscuous mode enabled");
816 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
| RFE_CTL_UCAST_EN_
;
818 if (dev
->net
->flags
& IFF_ALLMULTI
) {
819 netif_dbg(dev
, drv
, dev
->net
,
820 "receive all multicast enabled");
821 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
;
825 if (netdev_mc_count(dev
->net
)) {
826 struct netdev_hw_addr
*ha
;
829 netif_dbg(dev
, drv
, dev
->net
, "receive multicast hash filter");
831 pdata
->rfe_ctl
|= RFE_CTL_DA_PERFECT_
;
834 netdev_for_each_mc_addr(ha
, netdev
) {
835 /* set first 32 into Perfect Filter */
837 lan78xx_set_addr_filter(pdata
, i
, ha
->addr
);
839 u32 bitnum
= lan78xx_hash(ha
->addr
);
841 pdata
->mchash_table
[bitnum
/ 32] |=
842 (1 << (bitnum
% 32));
843 pdata
->rfe_ctl
|= RFE_CTL_MCAST_HASH_
;
849 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
851 /* defer register writes to a sleepable context */
852 schedule_work(&pdata
->set_multicast
);
855 static int lan78xx_update_flowcontrol(struct lan78xx_net
*dev
, u8 duplex
,
856 u16 lcladv
, u16 rmtadv
)
858 u32 flow
= 0, fct_flow
= 0;
861 u8 cap
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
863 if (cap
& FLOW_CTRL_TX
)
864 flow
= (FLOW_CR_TX_FCEN_
| 0xFFFF);
866 if (cap
& FLOW_CTRL_RX
)
867 flow
|= FLOW_CR_RX_FCEN_
;
869 if (dev
->udev
->speed
== USB_SPEED_SUPER
)
871 else if (dev
->udev
->speed
== USB_SPEED_HIGH
)
874 netif_dbg(dev
, link
, dev
->net
, "rx pause %s, tx pause %s",
875 (cap
& FLOW_CTRL_RX
? "enabled" : "disabled"),
876 (cap
& FLOW_CTRL_TX
? "enabled" : "disabled"));
878 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, fct_flow
);
880 /* threshold value should be set before enabling flow */
881 ret
= lan78xx_write_reg(dev
, FLOW
, flow
);
886 static int lan78xx_link_reset(struct lan78xx_net
*dev
)
888 struct phy_device
*phydev
= dev
->net
->phydev
;
889 struct ethtool_cmd ecmd
= { .cmd
= ETHTOOL_GSET
};
893 /* clear PHY interrupt status */
894 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
895 if (unlikely(ret
< 0))
898 /* clear LAN78xx interrupt status */
899 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_PHY_INT_
);
900 if (unlikely(ret
< 0))
903 phy_read_status(phydev
);
905 if (!phydev
->link
&& dev
->link_on
) {
906 dev
->link_on
= false;
909 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
910 if (unlikely(ret
< 0))
913 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
914 if (unlikely(ret
< 0))
917 phy_mac_interrupt(phydev
, 0);
918 } else if (phydev
->link
&& !dev
->link_on
) {
921 phy_ethtool_gset(phydev
, &ecmd
);
923 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
925 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
926 if (ethtool_cmd_speed(&ecmd
) == 1000) {
928 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
929 buf
&= ~USB_CFG1_DEV_U2_INIT_EN_
;
930 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
932 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
933 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
934 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
937 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
938 buf
|= USB_CFG1_DEV_U2_INIT_EN_
;
939 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
940 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
944 ladv
= phy_read(phydev
, MII_ADVERTISE
);
948 radv
= phy_read(phydev
, MII_LPA
);
952 netif_dbg(dev
, link
, dev
->net
,
953 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
954 ethtool_cmd_speed(&ecmd
), ecmd
.duplex
, ladv
, radv
);
956 ret
= lan78xx_update_flowcontrol(dev
, ecmd
.duplex
, ladv
, radv
);
957 phy_mac_interrupt(phydev
, 1);
963 /* some work can't be done in tasklets, so we use keventd
965 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
966 * but tasklet_schedule() doesn't. hope the failure is rare.
968 void lan78xx_defer_kevent(struct lan78xx_net
*dev
, int work
)
970 set_bit(work
, &dev
->flags
);
971 if (!schedule_delayed_work(&dev
->wq
, 0))
972 netdev_err(dev
->net
, "kevent %d may have been dropped\n", work
);
975 static void lan78xx_status(struct lan78xx_net
*dev
, struct urb
*urb
)
979 if (urb
->actual_length
!= 4) {
980 netdev_warn(dev
->net
,
981 "unexpected urb length %d", urb
->actual_length
);
985 memcpy(&intdata
, urb
->transfer_buffer
, 4);
986 le32_to_cpus(&intdata
);
988 if (intdata
& INT_ENP_PHY_INT
) {
989 netif_dbg(dev
, link
, dev
->net
, "PHY INTR: 0x%08x\n", intdata
);
990 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
992 netdev_warn(dev
->net
,
993 "unexpected interrupt: 0x%08x\n", intdata
);
996 static int lan78xx_ethtool_get_eeprom_len(struct net_device
*netdev
)
998 return MAX_EEPROM_SIZE
;
1001 static int lan78xx_ethtool_get_eeprom(struct net_device
*netdev
,
1002 struct ethtool_eeprom
*ee
, u8
*data
)
1004 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1006 ee
->magic
= LAN78XX_EEPROM_MAGIC
;
1008 return lan78xx_read_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1011 static int lan78xx_ethtool_set_eeprom(struct net_device
*netdev
,
1012 struct ethtool_eeprom
*ee
, u8
*data
)
1014 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1016 /* Allow entire eeprom update only */
1017 if ((ee
->magic
== LAN78XX_EEPROM_MAGIC
) &&
1018 (ee
->offset
== 0) &&
1020 (data
[0] == EEPROM_INDICATOR
))
1021 return lan78xx_write_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1022 else if ((ee
->magic
== LAN78XX_OTP_MAGIC
) &&
1023 (ee
->offset
== 0) &&
1025 (data
[0] == OTP_INDICATOR_1
))
1026 return lan78xx_write_raw_otp(dev
, ee
->offset
, ee
->len
, data
);
1031 static void lan78xx_get_strings(struct net_device
*netdev
, u32 stringset
,
1034 if (stringset
== ETH_SS_STATS
)
1035 memcpy(data
, lan78xx_gstrings
, sizeof(lan78xx_gstrings
));
1038 static int lan78xx_get_sset_count(struct net_device
*netdev
, int sset
)
1040 if (sset
== ETH_SS_STATS
)
1041 return ARRAY_SIZE(lan78xx_gstrings
);
1046 static void lan78xx_get_stats(struct net_device
*netdev
,
1047 struct ethtool_stats
*stats
, u64
*data
)
1049 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1050 struct lan78xx_statstage lan78xx_stat
;
1054 if (usb_autopm_get_interface(dev
->intf
) < 0)
1057 if (lan78xx_read_stats(dev
, &lan78xx_stat
) > 0) {
1058 p
= (u32
*)&lan78xx_stat
;
1059 for (i
= 0; i
< (sizeof(lan78xx_stat
) / (sizeof(u32
))); i
++)
1063 usb_autopm_put_interface(dev
->intf
);
1066 static void lan78xx_get_wol(struct net_device
*netdev
,
1067 struct ethtool_wolinfo
*wol
)
1069 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1072 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1074 if (usb_autopm_get_interface(dev
->intf
) < 0)
1077 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1078 if (unlikely(ret
< 0)) {
1082 if (buf
& USB_CFG_RMT_WKP_
) {
1083 wol
->supported
= WAKE_ALL
;
1084 wol
->wolopts
= pdata
->wol
;
1091 usb_autopm_put_interface(dev
->intf
);
1094 static int lan78xx_set_wol(struct net_device
*netdev
,
1095 struct ethtool_wolinfo
*wol
)
1097 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1098 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1101 ret
= usb_autopm_get_interface(dev
->intf
);
1106 if (wol
->wolopts
& WAKE_UCAST
)
1107 pdata
->wol
|= WAKE_UCAST
;
1108 if (wol
->wolopts
& WAKE_MCAST
)
1109 pdata
->wol
|= WAKE_MCAST
;
1110 if (wol
->wolopts
& WAKE_BCAST
)
1111 pdata
->wol
|= WAKE_BCAST
;
1112 if (wol
->wolopts
& WAKE_MAGIC
)
1113 pdata
->wol
|= WAKE_MAGIC
;
1114 if (wol
->wolopts
& WAKE_PHY
)
1115 pdata
->wol
|= WAKE_PHY
;
1116 if (wol
->wolopts
& WAKE_ARP
)
1117 pdata
->wol
|= WAKE_ARP
;
1119 device_set_wakeup_enable(&dev
->udev
->dev
, (bool)wol
->wolopts
);
1121 phy_ethtool_set_wol(netdev
->phydev
, wol
);
1123 usb_autopm_put_interface(dev
->intf
);
1128 static int lan78xx_get_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1130 struct lan78xx_net
*dev
= netdev_priv(net
);
1131 struct phy_device
*phydev
= net
->phydev
;
1135 ret
= usb_autopm_get_interface(dev
->intf
);
1139 ret
= phy_ethtool_get_eee(phydev
, edata
);
1143 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1144 if (buf
& MAC_CR_EEE_EN_
) {
1145 edata
->eee_enabled
= true;
1146 edata
->eee_active
= !!(edata
->advertised
&
1147 edata
->lp_advertised
);
1148 edata
->tx_lpi_enabled
= true;
1149 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1150 ret
= lan78xx_read_reg(dev
, EEE_TX_LPI_REQ_DLY
, &buf
);
1151 edata
->tx_lpi_timer
= buf
;
1153 edata
->eee_enabled
= false;
1154 edata
->eee_active
= false;
1155 edata
->tx_lpi_enabled
= false;
1156 edata
->tx_lpi_timer
= 0;
1161 usb_autopm_put_interface(dev
->intf
);
1166 static int lan78xx_set_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1168 struct lan78xx_net
*dev
= netdev_priv(net
);
1172 ret
= usb_autopm_get_interface(dev
->intf
);
1176 if (edata
->eee_enabled
) {
1177 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1178 buf
|= MAC_CR_EEE_EN_
;
1179 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1181 phy_ethtool_set_eee(net
->phydev
, edata
);
1183 buf
= (u32
)edata
->tx_lpi_timer
;
1184 ret
= lan78xx_write_reg(dev
, EEE_TX_LPI_REQ_DLY
, buf
);
1186 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1187 buf
&= ~MAC_CR_EEE_EN_
;
1188 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1191 usb_autopm_put_interface(dev
->intf
);
1196 static u32
lan78xx_get_link(struct net_device
*net
)
1198 phy_read_status(net
->phydev
);
1200 return net
->phydev
->link
;
1203 int lan78xx_nway_reset(struct net_device
*net
)
1205 return phy_start_aneg(net
->phydev
);
1208 static void lan78xx_get_drvinfo(struct net_device
*net
,
1209 struct ethtool_drvinfo
*info
)
1211 struct lan78xx_net
*dev
= netdev_priv(net
);
1213 strncpy(info
->driver
, DRIVER_NAME
, sizeof(info
->driver
));
1214 strncpy(info
->version
, DRIVER_VERSION
, sizeof(info
->version
));
1215 usb_make_path(dev
->udev
, info
->bus_info
, sizeof(info
->bus_info
));
1218 static u32
lan78xx_get_msglevel(struct net_device
*net
)
1220 struct lan78xx_net
*dev
= netdev_priv(net
);
1222 return dev
->msg_enable
;
1225 static void lan78xx_set_msglevel(struct net_device
*net
, u32 level
)
1227 struct lan78xx_net
*dev
= netdev_priv(net
);
1229 dev
->msg_enable
= level
;
1232 static int lan78xx_get_mdix_status(struct net_device
*net
)
1234 struct phy_device
*phydev
= net
->phydev
;
1237 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
, LAN88XX_EXT_PAGE_SPACE_1
);
1238 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1239 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
, LAN88XX_EXT_PAGE_SPACE_0
);
1244 static void lan78xx_set_mdix_status(struct net_device
*net
, __u8 mdix_ctrl
)
1246 struct lan78xx_net
*dev
= netdev_priv(net
);
1247 struct phy_device
*phydev
= net
->phydev
;
1250 if (mdix_ctrl
== ETH_TP_MDI
) {
1251 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1252 LAN88XX_EXT_PAGE_SPACE_1
);
1253 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1254 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1255 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1256 buf
| LAN88XX_EXT_MODE_CTRL_MDI_
);
1257 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1258 LAN88XX_EXT_PAGE_SPACE_0
);
1259 } else if (mdix_ctrl
== ETH_TP_MDI_X
) {
1260 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1261 LAN88XX_EXT_PAGE_SPACE_1
);
1262 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1263 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1264 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1265 buf
| LAN88XX_EXT_MODE_CTRL_MDI_X_
);
1266 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1267 LAN88XX_EXT_PAGE_SPACE_0
);
1268 } else if (mdix_ctrl
== ETH_TP_MDI_AUTO
) {
1269 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1270 LAN88XX_EXT_PAGE_SPACE_1
);
1271 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1272 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1273 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1274 buf
| LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_
);
1275 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1276 LAN88XX_EXT_PAGE_SPACE_0
);
1278 dev
->mdix_ctrl
= mdix_ctrl
;
1281 static int lan78xx_get_settings(struct net_device
*net
, struct ethtool_cmd
*cmd
)
1283 struct lan78xx_net
*dev
= netdev_priv(net
);
1284 struct phy_device
*phydev
= net
->phydev
;
1288 ret
= usb_autopm_get_interface(dev
->intf
);
1292 ret
= phy_ethtool_gset(phydev
, cmd
);
1294 buf
= lan78xx_get_mdix_status(net
);
1296 buf
&= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1297 if (buf
== LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_
) {
1298 cmd
->eth_tp_mdix
= ETH_TP_MDI_AUTO
;
1299 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
1300 } else if (buf
== LAN88XX_EXT_MODE_CTRL_MDI_
) {
1301 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
1302 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI
;
1303 } else if (buf
== LAN88XX_EXT_MODE_CTRL_MDI_X_
) {
1304 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
1305 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI_X
;
1308 usb_autopm_put_interface(dev
->intf
);
1313 static int lan78xx_set_settings(struct net_device
*net
, struct ethtool_cmd
*cmd
)
1315 struct lan78xx_net
*dev
= netdev_priv(net
);
1316 struct phy_device
*phydev
= net
->phydev
;
1320 ret
= usb_autopm_get_interface(dev
->intf
);
1324 if (dev
->mdix_ctrl
!= cmd
->eth_tp_mdix_ctrl
) {
1325 lan78xx_set_mdix_status(net
, cmd
->eth_tp_mdix_ctrl
);
1328 /* change speed & duplex */
1329 ret
= phy_ethtool_sset(phydev
, cmd
);
1331 if (!cmd
->autoneg
) {
1332 /* force link down */
1333 temp
= phy_read(phydev
, MII_BMCR
);
1334 phy_write(phydev
, MII_BMCR
, temp
| BMCR_LOOPBACK
);
1336 phy_write(phydev
, MII_BMCR
, temp
);
1339 usb_autopm_put_interface(dev
->intf
);
1344 static const struct ethtool_ops lan78xx_ethtool_ops
= {
1345 .get_link
= lan78xx_get_link
,
1346 .nway_reset
= lan78xx_nway_reset
,
1347 .get_drvinfo
= lan78xx_get_drvinfo
,
1348 .get_msglevel
= lan78xx_get_msglevel
,
1349 .set_msglevel
= lan78xx_set_msglevel
,
1350 .get_settings
= lan78xx_get_settings
,
1351 .set_settings
= lan78xx_set_settings
,
1352 .get_eeprom_len
= lan78xx_ethtool_get_eeprom_len
,
1353 .get_eeprom
= lan78xx_ethtool_get_eeprom
,
1354 .set_eeprom
= lan78xx_ethtool_set_eeprom
,
1355 .get_ethtool_stats
= lan78xx_get_stats
,
1356 .get_sset_count
= lan78xx_get_sset_count
,
1357 .get_strings
= lan78xx_get_strings
,
1358 .get_wol
= lan78xx_get_wol
,
1359 .set_wol
= lan78xx_set_wol
,
1360 .get_eee
= lan78xx_get_eee
,
1361 .set_eee
= lan78xx_set_eee
,
1364 static int lan78xx_ioctl(struct net_device
*netdev
, struct ifreq
*rq
, int cmd
)
1366 if (!netif_running(netdev
))
1369 return phy_mii_ioctl(netdev
->phydev
, rq
, cmd
);
1372 static void lan78xx_init_mac_address(struct lan78xx_net
*dev
)
1374 u32 addr_lo
, addr_hi
;
1378 ret
= lan78xx_read_reg(dev
, RX_ADDRL
, &addr_lo
);
1379 ret
= lan78xx_read_reg(dev
, RX_ADDRH
, &addr_hi
);
1381 addr
[0] = addr_lo
& 0xFF;
1382 addr
[1] = (addr_lo
>> 8) & 0xFF;
1383 addr
[2] = (addr_lo
>> 16) & 0xFF;
1384 addr
[3] = (addr_lo
>> 24) & 0xFF;
1385 addr
[4] = addr_hi
& 0xFF;
1386 addr
[5] = (addr_hi
>> 8) & 0xFF;
1388 if (!is_valid_ether_addr(addr
)) {
1389 /* reading mac address from EEPROM or OTP */
1390 if ((lan78xx_read_eeprom(dev
, EEPROM_MAC_OFFSET
, ETH_ALEN
,
1392 (lan78xx_read_otp(dev
, EEPROM_MAC_OFFSET
, ETH_ALEN
,
1394 if (is_valid_ether_addr(addr
)) {
1395 /* eeprom values are valid so use them */
1396 netif_dbg(dev
, ifup
, dev
->net
,
1397 "MAC address read from EEPROM");
1399 /* generate random MAC */
1400 random_ether_addr(addr
);
1401 netif_dbg(dev
, ifup
, dev
->net
,
1402 "MAC address set to random addr");
1405 addr_lo
= addr
[0] | (addr
[1] << 8) |
1406 (addr
[2] << 16) | (addr
[3] << 24);
1407 addr_hi
= addr
[4] | (addr
[5] << 8);
1409 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
1410 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
1412 /* generate random MAC */
1413 random_ether_addr(addr
);
1414 netif_dbg(dev
, ifup
, dev
->net
,
1415 "MAC address set to random addr");
1419 ret
= lan78xx_write_reg(dev
, MAF_LO(0), addr_lo
);
1420 ret
= lan78xx_write_reg(dev
, MAF_HI(0), addr_hi
| MAF_HI_VALID_
);
1422 ether_addr_copy(dev
->net
->dev_addr
, addr
);
1425 /* MDIO read and write wrappers for phylib */
1426 static int lan78xx_mdiobus_read(struct mii_bus
*bus
, int phy_id
, int idx
)
1428 struct lan78xx_net
*dev
= bus
->priv
;
1432 ret
= usb_autopm_get_interface(dev
->intf
);
1436 mutex_lock(&dev
->phy_mutex
);
1438 /* confirm MII not busy */
1439 ret
= lan78xx_phy_wait_not_busy(dev
);
1443 /* set the address, index & direction (read from PHY) */
1444 addr
= mii_access(phy_id
, idx
, MII_READ
);
1445 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1447 ret
= lan78xx_phy_wait_not_busy(dev
);
1451 ret
= lan78xx_read_reg(dev
, MII_DATA
, &val
);
1453 ret
= (int)(val
& 0xFFFF);
1456 mutex_unlock(&dev
->phy_mutex
);
1457 usb_autopm_put_interface(dev
->intf
);
1461 static int lan78xx_mdiobus_write(struct mii_bus
*bus
, int phy_id
, int idx
,
1464 struct lan78xx_net
*dev
= bus
->priv
;
1468 ret
= usb_autopm_get_interface(dev
->intf
);
1472 mutex_lock(&dev
->phy_mutex
);
1474 /* confirm MII not busy */
1475 ret
= lan78xx_phy_wait_not_busy(dev
);
1480 ret
= lan78xx_write_reg(dev
, MII_DATA
, val
);
1482 /* set the address, index & direction (write to PHY) */
1483 addr
= mii_access(phy_id
, idx
, MII_WRITE
);
1484 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1486 ret
= lan78xx_phy_wait_not_busy(dev
);
1491 mutex_unlock(&dev
->phy_mutex
);
1492 usb_autopm_put_interface(dev
->intf
);
1496 static int lan78xx_mdio_init(struct lan78xx_net
*dev
)
1500 dev
->mdiobus
= mdiobus_alloc();
1501 if (!dev
->mdiobus
) {
1502 netdev_err(dev
->net
, "can't allocate MDIO bus\n");
1506 dev
->mdiobus
->priv
= (void *)dev
;
1507 dev
->mdiobus
->read
= lan78xx_mdiobus_read
;
1508 dev
->mdiobus
->write
= lan78xx_mdiobus_write
;
1509 dev
->mdiobus
->name
= "lan78xx-mdiobus";
1511 snprintf(dev
->mdiobus
->id
, MII_BUS_ID_SIZE
, "usb-%03d:%03d",
1512 dev
->udev
->bus
->busnum
, dev
->udev
->devnum
);
1514 switch (dev
->devid
& ID_REV_CHIP_ID_MASK_
) {
1517 /* set to internal PHY id */
1518 dev
->mdiobus
->phy_mask
= ~(1 << 1);
1522 ret
= mdiobus_register(dev
->mdiobus
);
1524 netdev_err(dev
->net
, "can't register MDIO bus\n");
1528 netdev_dbg(dev
->net
, "registered mdiobus bus %s\n", dev
->mdiobus
->id
);
1531 mdiobus_free(dev
->mdiobus
);
1535 static void lan78xx_remove_mdio(struct lan78xx_net
*dev
)
1537 mdiobus_unregister(dev
->mdiobus
);
1538 mdiobus_free(dev
->mdiobus
);
1541 static void lan78xx_link_status_change(struct net_device
*net
)
1546 static int lan78xx_phy_init(struct lan78xx_net
*dev
)
1549 struct phy_device
*phydev
= dev
->net
->phydev
;
1551 phydev
= phy_find_first(dev
->mdiobus
);
1553 netdev_err(dev
->net
, "no PHY found\n");
1557 /* Enable PHY interrupts.
1558 * We handle our own interrupt
1560 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
1561 ret
= phy_write(phydev
, LAN88XX_INT_MASK
,
1562 LAN88XX_INT_MASK_MDINTPIN_EN_
|
1563 LAN88XX_INT_MASK_LINK_CHANGE_
);
1565 phydev
->irq
= PHY_IGNORE_INTERRUPT
;
1567 ret
= phy_connect_direct(dev
->net
, phydev
,
1568 lan78xx_link_status_change
,
1569 PHY_INTERFACE_MODE_GMII
);
1571 netdev_err(dev
->net
, "can't attach PHY to %s\n",
1576 /* set to AUTOMDIX */
1577 lan78xx_set_mdix_status(dev
->net
, ETH_TP_MDI_AUTO
);
1579 /* MAC doesn't support 1000T Half */
1580 phydev
->supported
&= ~SUPPORTED_1000baseT_Half
;
1581 phydev
->supported
|= (SUPPORTED_10baseT_Half
|
1582 SUPPORTED_10baseT_Full
|
1583 SUPPORTED_100baseT_Half
|
1584 SUPPORTED_100baseT_Full
|
1585 SUPPORTED_1000baseT_Full
|
1586 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
);
1587 genphy_config_aneg(phydev
);
1591 netif_dbg(dev
, ifup
, dev
->net
, "phy initialised successfully");
1596 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net
*dev
, int size
)
1602 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
1604 rxenabled
= ((buf
& MAC_RX_RXEN_
) != 0);
1607 buf
&= ~MAC_RX_RXEN_
;
1608 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1611 /* add 4 to size for FCS */
1612 buf
&= ~MAC_RX_MAX_SIZE_MASK_
;
1613 buf
|= (((size
+ 4) << MAC_RX_MAX_SIZE_SHIFT_
) & MAC_RX_MAX_SIZE_MASK_
);
1615 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1618 buf
|= MAC_RX_RXEN_
;
1619 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1625 static int unlink_urbs(struct lan78xx_net
*dev
, struct sk_buff_head
*q
)
1627 struct sk_buff
*skb
;
1628 unsigned long flags
;
1631 spin_lock_irqsave(&q
->lock
, flags
);
1632 while (!skb_queue_empty(q
)) {
1633 struct skb_data
*entry
;
1637 skb_queue_walk(q
, skb
) {
1638 entry
= (struct skb_data
*)skb
->cb
;
1639 if (entry
->state
!= unlink_start
)
1644 entry
->state
= unlink_start
;
1647 /* Get reference count of the URB to avoid it to be
1648 * freed during usb_unlink_urb, which may trigger
1649 * use-after-free problem inside usb_unlink_urb since
1650 * usb_unlink_urb is always racing with .complete
1651 * handler(include defer_bh).
1654 spin_unlock_irqrestore(&q
->lock
, flags
);
1655 /* during some PM-driven resume scenarios,
1656 * these (async) unlinks complete immediately
1658 ret
= usb_unlink_urb(urb
);
1659 if (ret
!= -EINPROGRESS
&& ret
!= 0)
1660 netdev_dbg(dev
->net
, "unlink urb err, %d\n", ret
);
1664 spin_lock_irqsave(&q
->lock
, flags
);
1666 spin_unlock_irqrestore(&q
->lock
, flags
);
1670 static int lan78xx_change_mtu(struct net_device
*netdev
, int new_mtu
)
1672 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1673 int ll_mtu
= new_mtu
+ netdev
->hard_header_len
;
1674 int old_hard_mtu
= dev
->hard_mtu
;
1675 int old_rx_urb_size
= dev
->rx_urb_size
;
1678 if (new_mtu
> MAX_SINGLE_PACKET_SIZE
)
1683 /* no second zero-length packet read wanted after mtu-sized packets */
1684 if ((ll_mtu
% dev
->maxpacket
) == 0)
1687 ret
= lan78xx_set_rx_max_frame_length(dev
, new_mtu
+ ETH_HLEN
);
1689 netdev
->mtu
= new_mtu
;
1691 dev
->hard_mtu
= netdev
->mtu
+ netdev
->hard_header_len
;
1692 if (dev
->rx_urb_size
== old_hard_mtu
) {
1693 dev
->rx_urb_size
= dev
->hard_mtu
;
1694 if (dev
->rx_urb_size
> old_rx_urb_size
) {
1695 if (netif_running(dev
->net
)) {
1696 unlink_urbs(dev
, &dev
->rxq
);
1697 tasklet_schedule(&dev
->bh
);
1705 int lan78xx_set_mac_addr(struct net_device
*netdev
, void *p
)
1707 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1708 struct sockaddr
*addr
= p
;
1709 u32 addr_lo
, addr_hi
;
1712 if (netif_running(netdev
))
1715 if (!is_valid_ether_addr(addr
->sa_data
))
1716 return -EADDRNOTAVAIL
;
1718 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
1720 addr_lo
= netdev
->dev_addr
[0] |
1721 netdev
->dev_addr
[1] << 8 |
1722 netdev
->dev_addr
[2] << 16 |
1723 netdev
->dev_addr
[3] << 24;
1724 addr_hi
= netdev
->dev_addr
[4] |
1725 netdev
->dev_addr
[5] << 8;
1727 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
1728 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
1733 /* Enable or disable Rx checksum offload engine */
1734 static int lan78xx_set_features(struct net_device
*netdev
,
1735 netdev_features_t features
)
1737 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1738 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1739 unsigned long flags
;
1742 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
1744 if (features
& NETIF_F_RXCSUM
) {
1745 pdata
->rfe_ctl
|= RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
;
1746 pdata
->rfe_ctl
|= RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
;
1748 pdata
->rfe_ctl
&= ~(RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
);
1749 pdata
->rfe_ctl
&= ~(RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
);
1752 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1753 pdata
->rfe_ctl
|= RFE_CTL_VLAN_FILTER_
;
1755 pdata
->rfe_ctl
&= ~RFE_CTL_VLAN_FILTER_
;
1757 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
1759 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
1764 static void lan78xx_deferred_vlan_write(struct work_struct
*param
)
1766 struct lan78xx_priv
*pdata
=
1767 container_of(param
, struct lan78xx_priv
, set_vlan
);
1768 struct lan78xx_net
*dev
= pdata
->dev
;
1770 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, 0,
1771 DP_SEL_VHF_VLAN_LEN
, pdata
->vlan_table
);
1774 static int lan78xx_vlan_rx_add_vid(struct net_device
*netdev
,
1775 __be16 proto
, u16 vid
)
1777 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1778 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1780 u16 vid_dword_index
;
1782 vid_dword_index
= (vid
>> 5) & 0x7F;
1783 vid_bit_index
= vid
& 0x1F;
1785 pdata
->vlan_table
[vid_dword_index
] |= (1 << vid_bit_index
);
1787 /* defer register writes to a sleepable context */
1788 schedule_work(&pdata
->set_vlan
);
1793 static int lan78xx_vlan_rx_kill_vid(struct net_device
*netdev
,
1794 __be16 proto
, u16 vid
)
1796 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1797 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1799 u16 vid_dword_index
;
1801 vid_dword_index
= (vid
>> 5) & 0x7F;
1802 vid_bit_index
= vid
& 0x1F;
1804 pdata
->vlan_table
[vid_dword_index
] &= ~(1 << vid_bit_index
);
1806 /* defer register writes to a sleepable context */
1807 schedule_work(&pdata
->set_vlan
);
1812 static void lan78xx_init_ltm(struct lan78xx_net
*dev
)
1816 u32 regs
[6] = { 0 };
1818 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
1819 if (buf
& USB_CFG1_LTM_ENABLE_
) {
1821 /* Get values from EEPROM first */
1822 if (lan78xx_read_eeprom(dev
, 0x3F, 2, temp
) == 0) {
1823 if (temp
[0] == 24) {
1824 ret
= lan78xx_read_raw_eeprom(dev
,
1831 } else if (lan78xx_read_otp(dev
, 0x3F, 2, temp
) == 0) {
1832 if (temp
[0] == 24) {
1833 ret
= lan78xx_read_raw_otp(dev
,
1843 lan78xx_write_reg(dev
, LTM_BELT_IDLE0
, regs
[0]);
1844 lan78xx_write_reg(dev
, LTM_BELT_IDLE1
, regs
[1]);
1845 lan78xx_write_reg(dev
, LTM_BELT_ACT0
, regs
[2]);
1846 lan78xx_write_reg(dev
, LTM_BELT_ACT1
, regs
[3]);
1847 lan78xx_write_reg(dev
, LTM_INACTIVE0
, regs
[4]);
1848 lan78xx_write_reg(dev
, LTM_INACTIVE1
, regs
[5]);
1851 static int lan78xx_reset(struct lan78xx_net
*dev
)
1853 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1856 unsigned long timeout
;
1858 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1859 buf
|= HW_CFG_LRST_
;
1860 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
1862 timeout
= jiffies
+ HZ
;
1865 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1866 if (time_after(jiffies
, timeout
)) {
1867 netdev_warn(dev
->net
,
1868 "timeout on completion of LiteReset");
1871 } while (buf
& HW_CFG_LRST_
);
1873 lan78xx_init_mac_address(dev
);
1875 /* save DEVID for later usage */
1876 ret
= lan78xx_read_reg(dev
, ID_REV
, &buf
);
1879 /* Respond to the IN token with a NAK */
1880 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1881 buf
|= USB_CFG_BIR_
;
1882 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
1885 lan78xx_init_ltm(dev
);
1887 dev
->net
->hard_header_len
+= TX_OVERHEAD
;
1888 dev
->hard_mtu
= dev
->net
->mtu
+ dev
->net
->hard_header_len
;
1890 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
1891 buf
= DEFAULT_BURST_CAP_SIZE
/ SS_USB_PKT_SIZE
;
1892 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
1895 } else if (dev
->udev
->speed
== USB_SPEED_HIGH
) {
1896 buf
= DEFAULT_BURST_CAP_SIZE
/ HS_USB_PKT_SIZE
;
1897 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
1898 dev
->rx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->rx_urb_size
;
1899 dev
->tx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->hard_mtu
;
1901 buf
= DEFAULT_BURST_CAP_SIZE
/ FS_USB_PKT_SIZE
;
1902 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
1906 ret
= lan78xx_write_reg(dev
, BURST_CAP
, buf
);
1907 ret
= lan78xx_write_reg(dev
, BULK_IN_DLY
, DEFAULT_BULK_IN_DELAY
);
1909 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1911 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
1913 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1914 buf
|= USB_CFG_BCE_
;
1915 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
1917 /* set FIFO sizes */
1918 buf
= (MAX_RX_FIFO_SIZE
- 512) / 512;
1919 ret
= lan78xx_write_reg(dev
, FCT_RX_FIFO_END
, buf
);
1921 buf
= (MAX_TX_FIFO_SIZE
- 512) / 512;
1922 ret
= lan78xx_write_reg(dev
, FCT_TX_FIFO_END
, buf
);
1924 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_CLEAR_ALL_
);
1925 ret
= lan78xx_write_reg(dev
, FLOW
, 0);
1926 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, 0);
1928 /* Don't need rfe_ctl_lock during initialisation */
1929 ret
= lan78xx_read_reg(dev
, RFE_CTL
, &pdata
->rfe_ctl
);
1930 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
| RFE_CTL_DA_PERFECT_
;
1931 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
1933 /* Enable or disable checksum offload engines */
1934 lan78xx_set_features(dev
->net
, dev
->net
->features
);
1936 lan78xx_set_multicast(dev
->net
);
1939 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
1940 buf
|= PMT_CTL_PHY_RST_
;
1941 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
1943 timeout
= jiffies
+ HZ
;
1946 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
1947 if (time_after(jiffies
, timeout
)) {
1948 netdev_warn(dev
->net
, "timeout waiting for PHY Reset");
1951 } while ((buf
& PMT_CTL_PHY_RST_
) || !(buf
& PMT_CTL_READY_
));
1953 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1954 buf
|= MAC_CR_AUTO_DUPLEX_
| MAC_CR_AUTO_SPEED_
;
1955 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1957 /* enable PHY interrupts */
1958 ret
= lan78xx_read_reg(dev
, INT_EP_CTL
, &buf
);
1959 buf
|= INT_ENP_PHY_INT
;
1960 ret
= lan78xx_write_reg(dev
, INT_EP_CTL
, buf
);
1962 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
1963 buf
|= MAC_TX_TXEN_
;
1964 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
1966 ret
= lan78xx_read_reg(dev
, FCT_TX_CTL
, &buf
);
1967 buf
|= FCT_TX_CTL_EN_
;
1968 ret
= lan78xx_write_reg(dev
, FCT_TX_CTL
, buf
);
1970 ret
= lan78xx_set_rx_max_frame_length(dev
, dev
->net
->mtu
+ ETH_HLEN
);
1972 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
1973 buf
|= MAC_RX_RXEN_
;
1974 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1976 ret
= lan78xx_read_reg(dev
, FCT_RX_CTL
, &buf
);
1977 buf
|= FCT_RX_CTL_EN_
;
1978 ret
= lan78xx_write_reg(dev
, FCT_RX_CTL
, buf
);
1983 static int lan78xx_open(struct net_device
*net
)
1985 struct lan78xx_net
*dev
= netdev_priv(net
);
1988 ret
= usb_autopm_get_interface(dev
->intf
);
1992 ret
= lan78xx_reset(dev
);
1996 ret
= lan78xx_phy_init(dev
);
2000 /* for Link Check */
2001 if (dev
->urb_intr
) {
2002 ret
= usb_submit_urb(dev
->urb_intr
, GFP_KERNEL
);
2004 netif_err(dev
, ifup
, dev
->net
,
2005 "intr submit %d\n", ret
);
2010 set_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2012 netif_start_queue(net
);
2014 dev
->link_on
= false;
2016 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
2018 usb_autopm_put_interface(dev
->intf
);
2024 static void lan78xx_terminate_urbs(struct lan78xx_net
*dev
)
2026 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup
);
2027 DECLARE_WAITQUEUE(wait
, current
);
2030 /* ensure there are no more active urbs */
2031 add_wait_queue(&unlink_wakeup
, &wait
);
2032 set_current_state(TASK_UNINTERRUPTIBLE
);
2033 dev
->wait
= &unlink_wakeup
;
2034 temp
= unlink_urbs(dev
, &dev
->txq
) + unlink_urbs(dev
, &dev
->rxq
);
2036 /* maybe wait for deletions to finish. */
2037 while (!skb_queue_empty(&dev
->rxq
) &&
2038 !skb_queue_empty(&dev
->txq
) &&
2039 !skb_queue_empty(&dev
->done
)) {
2040 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS
));
2041 set_current_state(TASK_UNINTERRUPTIBLE
);
2042 netif_dbg(dev
, ifdown
, dev
->net
,
2043 "waited for %d urb completions\n", temp
);
2045 set_current_state(TASK_RUNNING
);
2047 remove_wait_queue(&unlink_wakeup
, &wait
);
2050 int lan78xx_stop(struct net_device
*net
)
2052 struct lan78xx_net
*dev
= netdev_priv(net
);
2054 phy_stop(net
->phydev
);
2055 phy_disconnect(net
->phydev
);
2058 clear_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2059 netif_stop_queue(net
);
2061 netif_info(dev
, ifdown
, dev
->net
,
2062 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2063 net
->stats
.rx_packets
, net
->stats
.tx_packets
,
2064 net
->stats
.rx_errors
, net
->stats
.tx_errors
);
2066 lan78xx_terminate_urbs(dev
);
2068 usb_kill_urb(dev
->urb_intr
);
2070 skb_queue_purge(&dev
->rxq_pause
);
2072 /* deferred work (task, timer, softirq) must also stop.
2073 * can't flush_scheduled_work() until we drop rtnl (later),
2074 * else workers could deadlock; so make workers a NOP.
2077 cancel_delayed_work_sync(&dev
->wq
);
2078 tasklet_kill(&dev
->bh
);
2080 usb_autopm_put_interface(dev
->intf
);
2085 static int lan78xx_linearize(struct sk_buff
*skb
)
2087 return skb_linearize(skb
);
2090 static struct sk_buff
*lan78xx_tx_prep(struct lan78xx_net
*dev
,
2091 struct sk_buff
*skb
, gfp_t flags
)
2093 u32 tx_cmd_a
, tx_cmd_b
;
2095 if (skb_headroom(skb
) < TX_OVERHEAD
) {
2096 struct sk_buff
*skb2
;
2098 skb2
= skb_copy_expand(skb
, TX_OVERHEAD
, 0, flags
);
2099 dev_kfree_skb_any(skb
);
2105 if (lan78xx_linearize(skb
) < 0)
2108 tx_cmd_a
= (u32
)(skb
->len
& TX_CMD_A_LEN_MASK_
) | TX_CMD_A_FCS_
;
2110 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2111 tx_cmd_a
|= TX_CMD_A_IPE_
| TX_CMD_A_TPE_
;
2114 if (skb_is_gso(skb
)) {
2115 u16 mss
= max(skb_shinfo(skb
)->gso_size
, TX_CMD_B_MSS_MIN_
);
2117 tx_cmd_b
= (mss
<< TX_CMD_B_MSS_SHIFT_
) & TX_CMD_B_MSS_MASK_
;
2119 tx_cmd_a
|= TX_CMD_A_LSO_
;
2122 if (skb_vlan_tag_present(skb
)) {
2123 tx_cmd_a
|= TX_CMD_A_IVTG_
;
2124 tx_cmd_b
|= skb_vlan_tag_get(skb
) & TX_CMD_B_VTAG_MASK_
;
2128 cpu_to_le32s(&tx_cmd_b
);
2129 memcpy(skb
->data
, &tx_cmd_b
, 4);
2132 cpu_to_le32s(&tx_cmd_a
);
2133 memcpy(skb
->data
, &tx_cmd_a
, 4);
2138 static enum skb_state
defer_bh(struct lan78xx_net
*dev
, struct sk_buff
*skb
,
2139 struct sk_buff_head
*list
, enum skb_state state
)
2141 unsigned long flags
;
2142 enum skb_state old_state
;
2143 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2145 spin_lock_irqsave(&list
->lock
, flags
);
2146 old_state
= entry
->state
;
2147 entry
->state
= state
;
2149 __skb_unlink(skb
, list
);
2150 spin_unlock(&list
->lock
);
2151 spin_lock(&dev
->done
.lock
);
2153 __skb_queue_tail(&dev
->done
, skb
);
2154 if (skb_queue_len(&dev
->done
) == 1)
2155 tasklet_schedule(&dev
->bh
);
2156 spin_unlock_irqrestore(&dev
->done
.lock
, flags
);
2161 static void tx_complete(struct urb
*urb
)
2163 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
2164 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2165 struct lan78xx_net
*dev
= entry
->dev
;
2167 if (urb
->status
== 0) {
2168 dev
->net
->stats
.tx_packets
++;
2169 dev
->net
->stats
.tx_bytes
+= entry
->length
;
2171 dev
->net
->stats
.tx_errors
++;
2173 switch (urb
->status
) {
2175 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
2178 /* software-driven interface shutdown */
2186 netif_stop_queue(dev
->net
);
2189 netif_dbg(dev
, tx_err
, dev
->net
,
2190 "tx err %d\n", entry
->urb
->status
);
2195 usb_autopm_put_interface_async(dev
->intf
);
2197 defer_bh(dev
, skb
, &dev
->txq
, tx_done
);
2200 static void lan78xx_queue_skb(struct sk_buff_head
*list
,
2201 struct sk_buff
*newsk
, enum skb_state state
)
2203 struct skb_data
*entry
= (struct skb_data
*)newsk
->cb
;
2205 __skb_queue_tail(list
, newsk
);
2206 entry
->state
= state
;
2209 netdev_tx_t
lan78xx_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
2211 struct lan78xx_net
*dev
= netdev_priv(net
);
2212 struct sk_buff
*skb2
= NULL
;
2215 skb_tx_timestamp(skb
);
2216 skb2
= lan78xx_tx_prep(dev
, skb
, GFP_ATOMIC
);
2220 skb_queue_tail(&dev
->txq_pend
, skb2
);
2222 if (skb_queue_len(&dev
->txq_pend
) > 10)
2223 netif_stop_queue(net
);
2225 netif_dbg(dev
, tx_err
, dev
->net
,
2226 "lan78xx_tx_prep return NULL\n");
2227 dev
->net
->stats
.tx_errors
++;
2228 dev
->net
->stats
.tx_dropped
++;
2231 tasklet_schedule(&dev
->bh
);
2233 return NETDEV_TX_OK
;
2236 int lan78xx_get_endpoints(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2239 struct usb_host_interface
*alt
= NULL
;
2240 struct usb_host_endpoint
*in
= NULL
, *out
= NULL
;
2241 struct usb_host_endpoint
*status
= NULL
;
2243 for (tmp
= 0; tmp
< intf
->num_altsetting
; tmp
++) {
2249 alt
= intf
->altsetting
+ tmp
;
2251 for (ep
= 0; ep
< alt
->desc
.bNumEndpoints
; ep
++) {
2252 struct usb_host_endpoint
*e
;
2255 e
= alt
->endpoint
+ ep
;
2256 switch (e
->desc
.bmAttributes
) {
2257 case USB_ENDPOINT_XFER_INT
:
2258 if (!usb_endpoint_dir_in(&e
->desc
))
2262 case USB_ENDPOINT_XFER_BULK
:
2267 if (usb_endpoint_dir_in(&e
->desc
)) {
2270 else if (intr
&& !status
)
2280 if (!alt
|| !in
|| !out
)
2283 dev
->pipe_in
= usb_rcvbulkpipe(dev
->udev
,
2284 in
->desc
.bEndpointAddress
&
2285 USB_ENDPOINT_NUMBER_MASK
);
2286 dev
->pipe_out
= usb_sndbulkpipe(dev
->udev
,
2287 out
->desc
.bEndpointAddress
&
2288 USB_ENDPOINT_NUMBER_MASK
);
2289 dev
->ep_intr
= status
;
2294 static int lan78xx_bind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2296 struct lan78xx_priv
*pdata
= NULL
;
2300 ret
= lan78xx_get_endpoints(dev
, intf
);
2302 dev
->data
[0] = (unsigned long)kzalloc(sizeof(*pdata
), GFP_KERNEL
);
2304 pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2306 netdev_warn(dev
->net
, "Unable to allocate lan78xx_priv");
2312 spin_lock_init(&pdata
->rfe_ctl_lock
);
2313 mutex_init(&pdata
->dataport_mutex
);
2315 INIT_WORK(&pdata
->set_multicast
, lan78xx_deferred_multicast_write
);
2317 for (i
= 0; i
< DP_SEL_VHF_VLAN_LEN
; i
++)
2318 pdata
->vlan_table
[i
] = 0;
2320 INIT_WORK(&pdata
->set_vlan
, lan78xx_deferred_vlan_write
);
2322 dev
->net
->features
= 0;
2324 if (DEFAULT_TX_CSUM_ENABLE
)
2325 dev
->net
->features
|= NETIF_F_HW_CSUM
;
2327 if (DEFAULT_RX_CSUM_ENABLE
)
2328 dev
->net
->features
|= NETIF_F_RXCSUM
;
2330 if (DEFAULT_TSO_CSUM_ENABLE
)
2331 dev
->net
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_SG
;
2333 dev
->net
->hw_features
= dev
->net
->features
;
2335 /* Init all registers */
2336 ret
= lan78xx_reset(dev
);
2338 lan78xx_mdio_init(dev
);
2340 dev
->net
->flags
|= IFF_MULTICAST
;
2342 pdata
->wol
= WAKE_MAGIC
;
2347 static void lan78xx_unbind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2349 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2351 lan78xx_remove_mdio(dev
);
2354 netif_dbg(dev
, ifdown
, dev
->net
, "free pdata");
2361 static void lan78xx_rx_csum_offload(struct lan78xx_net
*dev
,
2362 struct sk_buff
*skb
,
2363 u32 rx_cmd_a
, u32 rx_cmd_b
)
2365 if (!(dev
->net
->features
& NETIF_F_RXCSUM
) ||
2366 unlikely(rx_cmd_a
& RX_CMD_A_ICSM_
)) {
2367 skb
->ip_summed
= CHECKSUM_NONE
;
2369 skb
->csum
= ntohs((u16
)(rx_cmd_b
>> RX_CMD_B_CSUM_SHIFT_
));
2370 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2374 void lan78xx_skb_return(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2378 if (test_bit(EVENT_RX_PAUSED
, &dev
->flags
)) {
2379 skb_queue_tail(&dev
->rxq_pause
, skb
);
2383 skb
->protocol
= eth_type_trans(skb
, dev
->net
);
2384 dev
->net
->stats
.rx_packets
++;
2385 dev
->net
->stats
.rx_bytes
+= skb
->len
;
2387 netif_dbg(dev
, rx_status
, dev
->net
, "< rx, len %zu, type 0x%x\n",
2388 skb
->len
+ sizeof(struct ethhdr
), skb
->protocol
);
2389 memset(skb
->cb
, 0, sizeof(struct skb_data
));
2391 if (skb_defer_rx_timestamp(skb
))
2394 status
= netif_rx(skb
);
2395 if (status
!= NET_RX_SUCCESS
)
2396 netif_dbg(dev
, rx_err
, dev
->net
,
2397 "netif_rx status %d\n", status
);
2400 static int lan78xx_rx(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2402 if (skb
->len
< dev
->net
->hard_header_len
)
2405 while (skb
->len
> 0) {
2406 u32 rx_cmd_a
, rx_cmd_b
, align_count
, size
;
2408 struct sk_buff
*skb2
;
2409 unsigned char *packet
;
2411 memcpy(&rx_cmd_a
, skb
->data
, sizeof(rx_cmd_a
));
2412 le32_to_cpus(&rx_cmd_a
);
2413 skb_pull(skb
, sizeof(rx_cmd_a
));
2415 memcpy(&rx_cmd_b
, skb
->data
, sizeof(rx_cmd_b
));
2416 le32_to_cpus(&rx_cmd_b
);
2417 skb_pull(skb
, sizeof(rx_cmd_b
));
2419 memcpy(&rx_cmd_c
, skb
->data
, sizeof(rx_cmd_c
));
2420 le16_to_cpus(&rx_cmd_c
);
2421 skb_pull(skb
, sizeof(rx_cmd_c
));
2425 /* get the packet length */
2426 size
= (rx_cmd_a
& RX_CMD_A_LEN_MASK_
);
2427 align_count
= (4 - ((size
+ RXW_PADDING
) % 4)) % 4;
2429 if (unlikely(rx_cmd_a
& RX_CMD_A_RED_
)) {
2430 netif_dbg(dev
, rx_err
, dev
->net
,
2431 "Error rx_cmd_a=0x%08x", rx_cmd_a
);
2433 /* last frame in this batch */
2434 if (skb
->len
== size
) {
2435 lan78xx_rx_csum_offload(dev
, skb
,
2436 rx_cmd_a
, rx_cmd_b
);
2438 skb_trim(skb
, skb
->len
- 4); /* remove fcs */
2439 skb
->truesize
= size
+ sizeof(struct sk_buff
);
2444 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2445 if (unlikely(!skb2
)) {
2446 netdev_warn(dev
->net
, "Error allocating skb");
2451 skb2
->data
= packet
;
2452 skb_set_tail_pointer(skb2
, size
);
2454 lan78xx_rx_csum_offload(dev
, skb2
, rx_cmd_a
, rx_cmd_b
);
2456 skb_trim(skb2
, skb2
->len
- 4); /* remove fcs */
2457 skb2
->truesize
= size
+ sizeof(struct sk_buff
);
2459 lan78xx_skb_return(dev
, skb2
);
2462 skb_pull(skb
, size
);
2464 /* padding bytes before the next frame starts */
2466 skb_pull(skb
, align_count
);
2472 static inline void rx_process(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2474 if (!lan78xx_rx(dev
, skb
)) {
2475 dev
->net
->stats
.rx_errors
++;
2480 lan78xx_skb_return(dev
, skb
);
2484 netif_dbg(dev
, rx_err
, dev
->net
, "drop\n");
2485 dev
->net
->stats
.rx_errors
++;
2487 skb_queue_tail(&dev
->done
, skb
);
2490 static void rx_complete(struct urb
*urb
);
2492 static int rx_submit(struct lan78xx_net
*dev
, struct urb
*urb
, gfp_t flags
)
2494 struct sk_buff
*skb
;
2495 struct skb_data
*entry
;
2496 unsigned long lockflags
;
2497 size_t size
= dev
->rx_urb_size
;
2500 skb
= netdev_alloc_skb_ip_align(dev
->net
, size
);
2506 entry
= (struct skb_data
*)skb
->cb
;
2511 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_in
,
2512 skb
->data
, size
, rx_complete
, skb
);
2514 spin_lock_irqsave(&dev
->rxq
.lock
, lockflags
);
2516 if (netif_device_present(dev
->net
) &&
2517 netif_running(dev
->net
) &&
2518 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
2519 !test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
2520 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
2523 lan78xx_queue_skb(&dev
->rxq
, skb
, rx_start
);
2526 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
2529 netif_dbg(dev
, ifdown
, dev
->net
, "device gone\n");
2530 netif_device_detach(dev
->net
);
2536 netif_dbg(dev
, rx_err
, dev
->net
,
2537 "rx submit, %d\n", ret
);
2538 tasklet_schedule(&dev
->bh
);
2541 netif_dbg(dev
, ifdown
, dev
->net
, "rx: stopped\n");
2544 spin_unlock_irqrestore(&dev
->rxq
.lock
, lockflags
);
2546 dev_kfree_skb_any(skb
);
2552 static void rx_complete(struct urb
*urb
)
2554 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
2555 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2556 struct lan78xx_net
*dev
= entry
->dev
;
2557 int urb_status
= urb
->status
;
2558 enum skb_state state
;
2560 skb_put(skb
, urb
->actual_length
);
2564 switch (urb_status
) {
2566 if (skb
->len
< dev
->net
->hard_header_len
) {
2568 dev
->net
->stats
.rx_errors
++;
2569 dev
->net
->stats
.rx_length_errors
++;
2570 netif_dbg(dev
, rx_err
, dev
->net
,
2571 "rx length %d\n", skb
->len
);
2573 usb_mark_last_busy(dev
->udev
);
2576 dev
->net
->stats
.rx_errors
++;
2577 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
2579 case -ECONNRESET
: /* async unlink */
2580 case -ESHUTDOWN
: /* hardware gone */
2581 netif_dbg(dev
, ifdown
, dev
->net
,
2582 "rx shutdown, code %d\n", urb_status
);
2590 dev
->net
->stats
.rx_errors
++;
2596 /* data overrun ... flush fifo? */
2598 dev
->net
->stats
.rx_over_errors
++;
2603 dev
->net
->stats
.rx_errors
++;
2604 netif_dbg(dev
, rx_err
, dev
->net
, "rx status %d\n", urb_status
);
2608 state
= defer_bh(dev
, skb
, &dev
->rxq
, state
);
2611 if (netif_running(dev
->net
) &&
2612 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
2613 state
!= unlink_start
) {
2614 rx_submit(dev
, urb
, GFP_ATOMIC
);
2619 netif_dbg(dev
, rx_err
, dev
->net
, "no read resubmitted\n");
2622 static void lan78xx_tx_bh(struct lan78xx_net
*dev
)
2625 struct urb
*urb
= NULL
;
2626 struct skb_data
*entry
;
2627 unsigned long flags
;
2628 struct sk_buff_head
*tqp
= &dev
->txq_pend
;
2629 struct sk_buff
*skb
, *skb2
;
2632 int skb_totallen
, pkt_cnt
;
2636 for (skb
= tqp
->next
; pkt_cnt
< tqp
->qlen
; skb
= skb
->next
) {
2637 if (skb_is_gso(skb
)) {
2639 /* handle previous packets first */
2643 skb2
= skb_dequeue(tqp
);
2647 if ((skb_totallen
+ skb
->len
) > MAX_SINGLE_PACKET_SIZE
)
2649 skb_totallen
= skb
->len
+ roundup(skb_totallen
, sizeof(u32
));
2653 /* copy to a single skb */
2654 skb
= alloc_skb(skb_totallen
, GFP_ATOMIC
);
2658 skb_put(skb
, skb_totallen
);
2660 for (count
= pos
= 0; count
< pkt_cnt
; count
++) {
2661 skb2
= skb_dequeue(tqp
);
2663 memcpy(skb
->data
+ pos
, skb2
->data
, skb2
->len
);
2664 pos
+= roundup(skb2
->len
, sizeof(u32
));
2665 dev_kfree_skb(skb2
);
2669 length
= skb_totallen
;
2672 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
2674 netif_dbg(dev
, tx_err
, dev
->net
, "no urb\n");
2678 entry
= (struct skb_data
*)skb
->cb
;
2681 entry
->length
= length
;
2683 spin_lock_irqsave(&dev
->txq
.lock
, flags
);
2684 ret
= usb_autopm_get_interface_async(dev
->intf
);
2686 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2690 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_out
,
2691 skb
->data
, skb
->len
, tx_complete
, skb
);
2693 if (length
% dev
->maxpacket
== 0) {
2694 /* send USB_ZERO_PACKET */
2695 urb
->transfer_flags
|= URB_ZERO_PACKET
;
2699 /* if this triggers the device is still a sleep */
2700 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
2701 /* transmission will be done in resume */
2702 usb_anchor_urb(urb
, &dev
->deferred
);
2703 /* no use to process more packets */
2704 netif_stop_queue(dev
->net
);
2706 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2707 netdev_dbg(dev
->net
, "Delaying transmission for resumption\n");
2712 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
2715 dev
->net
->trans_start
= jiffies
;
2716 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
2717 if (skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
)
2718 netif_stop_queue(dev
->net
);
2721 netif_stop_queue(dev
->net
);
2722 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
2723 usb_autopm_put_interface_async(dev
->intf
);
2726 usb_autopm_put_interface_async(dev
->intf
);
2727 netif_dbg(dev
, tx_err
, dev
->net
,
2728 "tx: submit urb err %d\n", ret
);
2732 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2735 netif_dbg(dev
, tx_err
, dev
->net
, "drop, code %d\n", ret
);
2737 dev
->net
->stats
.tx_dropped
++;
2739 dev_kfree_skb_any(skb
);
2742 netif_dbg(dev
, tx_queued
, dev
->net
,
2743 "> tx, len %d, type 0x%x\n", length
, skb
->protocol
);
2746 static void lan78xx_rx_bh(struct lan78xx_net
*dev
)
2751 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
) {
2752 for (i
= 0; i
< 10; i
++) {
2753 if (skb_queue_len(&dev
->rxq
) >= dev
->rx_qlen
)
2755 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
2757 if (rx_submit(dev
, urb
, GFP_ATOMIC
) == -ENOLINK
)
2761 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
)
2762 tasklet_schedule(&dev
->bh
);
2764 if (skb_queue_len(&dev
->txq
) < dev
->tx_qlen
)
2765 netif_wake_queue(dev
->net
);
2768 static void lan78xx_bh(unsigned long param
)
2770 struct lan78xx_net
*dev
= (struct lan78xx_net
*)param
;
2771 struct sk_buff
*skb
;
2772 struct skb_data
*entry
;
2774 while ((skb
= skb_dequeue(&dev
->done
))) {
2775 entry
= (struct skb_data
*)(skb
->cb
);
2776 switch (entry
->state
) {
2778 entry
->state
= rx_cleanup
;
2779 rx_process(dev
, skb
);
2782 usb_free_urb(entry
->urb
);
2786 usb_free_urb(entry
->urb
);
2790 netdev_dbg(dev
->net
, "skb state %d\n", entry
->state
);
2795 if (netif_device_present(dev
->net
) && netif_running(dev
->net
)) {
2796 if (!skb_queue_empty(&dev
->txq_pend
))
2799 if (!timer_pending(&dev
->delay
) &&
2800 !test_bit(EVENT_RX_HALT
, &dev
->flags
))
2805 static void lan78xx_delayedwork(struct work_struct
*work
)
2808 struct lan78xx_net
*dev
;
2810 dev
= container_of(work
, struct lan78xx_net
, wq
.work
);
2812 if (test_bit(EVENT_TX_HALT
, &dev
->flags
)) {
2813 unlink_urbs(dev
, &dev
->txq
);
2814 status
= usb_autopm_get_interface(dev
->intf
);
2817 status
= usb_clear_halt(dev
->udev
, dev
->pipe_out
);
2818 usb_autopm_put_interface(dev
->intf
);
2821 status
!= -ESHUTDOWN
) {
2822 if (netif_msg_tx_err(dev
))
2824 netdev_err(dev
->net
,
2825 "can't clear tx halt, status %d\n",
2828 clear_bit(EVENT_TX_HALT
, &dev
->flags
);
2829 if (status
!= -ESHUTDOWN
)
2830 netif_wake_queue(dev
->net
);
2833 if (test_bit(EVENT_RX_HALT
, &dev
->flags
)) {
2834 unlink_urbs(dev
, &dev
->rxq
);
2835 status
= usb_autopm_get_interface(dev
->intf
);
2838 status
= usb_clear_halt(dev
->udev
, dev
->pipe_in
);
2839 usb_autopm_put_interface(dev
->intf
);
2842 status
!= -ESHUTDOWN
) {
2843 if (netif_msg_rx_err(dev
))
2845 netdev_err(dev
->net
,
2846 "can't clear rx halt, status %d\n",
2849 clear_bit(EVENT_RX_HALT
, &dev
->flags
);
2850 tasklet_schedule(&dev
->bh
);
2854 if (test_bit(EVENT_LINK_RESET
, &dev
->flags
)) {
2857 clear_bit(EVENT_LINK_RESET
, &dev
->flags
);
2858 status
= usb_autopm_get_interface(dev
->intf
);
2861 if (lan78xx_link_reset(dev
) < 0) {
2862 usb_autopm_put_interface(dev
->intf
);
2864 netdev_info(dev
->net
, "link reset failed (%d)\n",
2867 usb_autopm_put_interface(dev
->intf
);
2872 static void intr_complete(struct urb
*urb
)
2874 struct lan78xx_net
*dev
= urb
->context
;
2875 int status
= urb
->status
;
2880 lan78xx_status(dev
, urb
);
2883 /* software-driven interface shutdown */
2884 case -ENOENT
: /* urb killed */
2885 case -ESHUTDOWN
: /* hardware gone */
2886 netif_dbg(dev
, ifdown
, dev
->net
,
2887 "intr shutdown, code %d\n", status
);
2890 /* NOTE: not throttling like RX/TX, since this endpoint
2891 * already polls infrequently
2894 netdev_dbg(dev
->net
, "intr status %d\n", status
);
2898 if (!netif_running(dev
->net
))
2901 memset(urb
->transfer_buffer
, 0, urb
->transfer_buffer_length
);
2902 status
= usb_submit_urb(urb
, GFP_ATOMIC
);
2904 netif_err(dev
, timer
, dev
->net
,
2905 "intr resubmit --> %d\n", status
);
2908 static void lan78xx_disconnect(struct usb_interface
*intf
)
2910 struct lan78xx_net
*dev
;
2911 struct usb_device
*udev
;
2912 struct net_device
*net
;
2914 dev
= usb_get_intfdata(intf
);
2915 usb_set_intfdata(intf
, NULL
);
2919 udev
= interface_to_usbdev(intf
);
2922 unregister_netdev(net
);
2924 cancel_delayed_work_sync(&dev
->wq
);
2926 usb_scuttle_anchored_urbs(&dev
->deferred
);
2928 lan78xx_unbind(dev
, intf
);
2930 usb_kill_urb(dev
->urb_intr
);
2931 usb_free_urb(dev
->urb_intr
);
2937 void lan78xx_tx_timeout(struct net_device
*net
)
2939 struct lan78xx_net
*dev
= netdev_priv(net
);
2941 unlink_urbs(dev
, &dev
->txq
);
2942 tasklet_schedule(&dev
->bh
);
2945 static const struct net_device_ops lan78xx_netdev_ops
= {
2946 .ndo_open
= lan78xx_open
,
2947 .ndo_stop
= lan78xx_stop
,
2948 .ndo_start_xmit
= lan78xx_start_xmit
,
2949 .ndo_tx_timeout
= lan78xx_tx_timeout
,
2950 .ndo_change_mtu
= lan78xx_change_mtu
,
2951 .ndo_set_mac_address
= lan78xx_set_mac_addr
,
2952 .ndo_validate_addr
= eth_validate_addr
,
2953 .ndo_do_ioctl
= lan78xx_ioctl
,
2954 .ndo_set_rx_mode
= lan78xx_set_multicast
,
2955 .ndo_set_features
= lan78xx_set_features
,
2956 .ndo_vlan_rx_add_vid
= lan78xx_vlan_rx_add_vid
,
2957 .ndo_vlan_rx_kill_vid
= lan78xx_vlan_rx_kill_vid
,
2960 static int lan78xx_probe(struct usb_interface
*intf
,
2961 const struct usb_device_id
*id
)
2963 struct lan78xx_net
*dev
;
2964 struct net_device
*netdev
;
2965 struct usb_device
*udev
;
2971 udev
= interface_to_usbdev(intf
);
2972 udev
= usb_get_dev(udev
);
2975 netdev
= alloc_etherdev(sizeof(struct lan78xx_net
));
2977 dev_err(&intf
->dev
, "Error: OOM\n");
2981 /* netdev_printk() needs this */
2982 SET_NETDEV_DEV(netdev
, &intf
->dev
);
2984 dev
= netdev_priv(netdev
);
2988 dev
->msg_enable
= netif_msg_init(msg_level
, NETIF_MSG_DRV
2989 | NETIF_MSG_PROBE
| NETIF_MSG_LINK
);
2991 skb_queue_head_init(&dev
->rxq
);
2992 skb_queue_head_init(&dev
->txq
);
2993 skb_queue_head_init(&dev
->done
);
2994 skb_queue_head_init(&dev
->rxq_pause
);
2995 skb_queue_head_init(&dev
->txq_pend
);
2996 mutex_init(&dev
->phy_mutex
);
2998 tasklet_init(&dev
->bh
, lan78xx_bh
, (unsigned long)dev
);
2999 INIT_DELAYED_WORK(&dev
->wq
, lan78xx_delayedwork
);
3000 init_usb_anchor(&dev
->deferred
);
3002 netdev
->netdev_ops
= &lan78xx_netdev_ops
;
3003 netdev
->watchdog_timeo
= TX_TIMEOUT_JIFFIES
;
3004 netdev
->ethtool_ops
= &lan78xx_ethtool_ops
;
3006 ret
= lan78xx_bind(dev
, intf
);
3009 strcpy(netdev
->name
, "eth%d");
3011 if (netdev
->mtu
> (dev
->hard_mtu
- netdev
->hard_header_len
))
3012 netdev
->mtu
= dev
->hard_mtu
- netdev
->hard_header_len
;
3014 dev
->ep_blkin
= (intf
->cur_altsetting
)->endpoint
+ 0;
3015 dev
->ep_blkout
= (intf
->cur_altsetting
)->endpoint
+ 1;
3016 dev
->ep_intr
= (intf
->cur_altsetting
)->endpoint
+ 2;
3018 dev
->pipe_in
= usb_rcvbulkpipe(udev
, BULK_IN_PIPE
);
3019 dev
->pipe_out
= usb_sndbulkpipe(udev
, BULK_OUT_PIPE
);
3021 dev
->pipe_intr
= usb_rcvintpipe(dev
->udev
,
3022 dev
->ep_intr
->desc
.bEndpointAddress
&
3023 USB_ENDPOINT_NUMBER_MASK
);
3024 period
= dev
->ep_intr
->desc
.bInterval
;
3026 maxp
= usb_maxpacket(dev
->udev
, dev
->pipe_intr
, 0);
3027 buf
= kmalloc(maxp
, GFP_KERNEL
);
3029 dev
->urb_intr
= usb_alloc_urb(0, GFP_KERNEL
);
3030 if (!dev
->urb_intr
) {
3034 usb_fill_int_urb(dev
->urb_intr
, dev
->udev
,
3035 dev
->pipe_intr
, buf
, maxp
,
3036 intr_complete
, dev
, period
);
3040 dev
->maxpacket
= usb_maxpacket(dev
->udev
, dev
->pipe_out
, 1);
3042 /* driver requires remote-wakeup capability during autosuspend. */
3043 intf
->needs_remote_wakeup
= 1;
3045 ret
= register_netdev(netdev
);
3047 netif_err(dev
, probe
, netdev
, "couldn't register the device\n");
3051 usb_set_intfdata(intf
, dev
);
3053 ret
= device_set_wakeup_enable(&udev
->dev
, true);
3055 /* Default delay of 2sec has more overhead than advantage.
3056 * Set to 10sec as default.
3058 pm_runtime_set_autosuspend_delay(&udev
->dev
,
3059 DEFAULT_AUTOSUSPEND_DELAY
);
3064 lan78xx_unbind(dev
, intf
);
3066 free_netdev(netdev
);
3073 static u16
lan78xx_wakeframe_crc16(const u8
*buf
, int len
)
3075 const u16 crc16poly
= 0x8005;
3081 for (i
= 0; i
< len
; i
++) {
3083 for (bit
= 0; bit
< 8; bit
++) {
3087 if (msb
^ (u16
)(data
& 1)) {
3089 crc
|= (u16
)0x0001U
;
3098 static int lan78xx_set_suspend(struct lan78xx_net
*dev
, u32 wol
)
3106 const u8 ipv4_multicast
[3] = { 0x01, 0x00, 0x5E };
3107 const u8 ipv6_multicast
[3] = { 0x33, 0x33 };
3108 const u8 arp_type
[2] = { 0x08, 0x06 };
3110 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3111 buf
&= ~MAC_TX_TXEN_
;
3112 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3113 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3114 buf
&= ~MAC_RX_RXEN_
;
3115 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3117 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3118 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3119 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3124 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &temp_pmt_ctl
);
3125 temp_pmt_ctl
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3126 temp_pmt_ctl
|= PMT_CTL_RES_CLR_WKP_STS_
;
3128 for (mask_index
= 0; mask_index
< NUM_OF_WUF_CFG
; mask_index
++)
3129 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
), 0);
3132 if (wol
& WAKE_PHY
) {
3133 temp_pmt_ctl
|= PMT_CTL_PHY_WAKE_EN_
;
3135 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3136 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3137 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3139 if (wol
& WAKE_MAGIC
) {
3140 temp_wucsr
|= WUCSR_MPEN_
;
3142 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3143 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3144 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_3_
;
3146 if (wol
& WAKE_BCAST
) {
3147 temp_wucsr
|= WUCSR_BCST_EN_
;
3149 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3150 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3151 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3153 if (wol
& WAKE_MCAST
) {
3154 temp_wucsr
|= WUCSR_WAKE_EN_
;
3156 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3157 crc
= lan78xx_wakeframe_crc16(ipv4_multicast
, 3);
3158 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3160 WUF_CFGX_TYPE_MCAST_
|
3161 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3162 (crc
& WUF_CFGX_CRC16_MASK_
));
3164 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 7);
3165 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3166 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3167 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3170 /* for IPv6 Multicast */
3171 crc
= lan78xx_wakeframe_crc16(ipv6_multicast
, 2);
3172 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3174 WUF_CFGX_TYPE_MCAST_
|
3175 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3176 (crc
& WUF_CFGX_CRC16_MASK_
));
3178 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 3);
3179 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3180 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3181 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3184 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3185 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3186 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3188 if (wol
& WAKE_UCAST
) {
3189 temp_wucsr
|= WUCSR_PFDA_EN_
;
3191 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3192 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3193 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3195 if (wol
& WAKE_ARP
) {
3196 temp_wucsr
|= WUCSR_WAKE_EN_
;
3198 /* set WUF_CFG & WUF_MASK
3199 * for packettype (offset 12,13) = ARP (0x0806)
3201 crc
= lan78xx_wakeframe_crc16(arp_type
, 2);
3202 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3204 WUF_CFGX_TYPE_ALL_
|
3205 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3206 (crc
& WUF_CFGX_CRC16_MASK_
));
3208 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 0x3000);
3209 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3210 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3211 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3214 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3215 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3216 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3219 ret
= lan78xx_write_reg(dev
, WUCSR
, temp_wucsr
);
3221 /* when multiple WOL bits are set */
3222 if (hweight_long((unsigned long)wol
) > 1) {
3223 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3224 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3225 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3227 ret
= lan78xx_write_reg(dev
, PMT_CTL
, temp_pmt_ctl
);
3230 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3231 buf
|= PMT_CTL_WUPS_MASK_
;
3232 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3234 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3235 buf
|= MAC_RX_RXEN_
;
3236 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3241 int lan78xx_suspend(struct usb_interface
*intf
, pm_message_t message
)
3243 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3244 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
3249 event
= message
.event
;
3251 if (!dev
->suspend_count
++) {
3252 spin_lock_irq(&dev
->txq
.lock
);
3253 /* don't autosuspend while transmitting */
3254 if ((skb_queue_len(&dev
->txq
) ||
3255 skb_queue_len(&dev
->txq_pend
)) &&
3256 PMSG_IS_AUTO(message
)) {
3257 spin_unlock_irq(&dev
->txq
.lock
);
3261 set_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
3262 spin_unlock_irq(&dev
->txq
.lock
);
3266 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3267 buf
&= ~MAC_TX_TXEN_
;
3268 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3269 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3270 buf
&= ~MAC_RX_RXEN_
;
3271 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3273 /* empty out the rx and queues */
3274 netif_device_detach(dev
->net
);
3275 lan78xx_terminate_urbs(dev
);
3276 usb_kill_urb(dev
->urb_intr
);
3279 netif_device_attach(dev
->net
);
3282 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
3283 if (PMSG_IS_AUTO(message
)) {
3284 /* auto suspend (selective suspend) */
3285 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3286 buf
&= ~MAC_TX_TXEN_
;
3287 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3288 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3289 buf
&= ~MAC_RX_RXEN_
;
3290 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3292 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3293 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3294 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3296 /* set goodframe wakeup */
3297 ret
= lan78xx_read_reg(dev
, WUCSR
, &buf
);
3299 buf
|= WUCSR_RFE_WAKE_EN_
;
3300 buf
|= WUCSR_STORE_WAKE_
;
3302 ret
= lan78xx_write_reg(dev
, WUCSR
, buf
);
3304 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3306 buf
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3307 buf
|= PMT_CTL_RES_CLR_WKP_STS_
;
3309 buf
|= PMT_CTL_PHY_WAKE_EN_
;
3310 buf
|= PMT_CTL_WOL_EN_
;
3311 buf
&= ~PMT_CTL_SUS_MODE_MASK_
;
3312 buf
|= PMT_CTL_SUS_MODE_3_
;
3314 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3316 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3318 buf
|= PMT_CTL_WUPS_MASK_
;
3320 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3322 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3323 buf
|= MAC_RX_RXEN_
;
3324 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3326 lan78xx_set_suspend(dev
, pdata
->wol
);
3335 int lan78xx_resume(struct usb_interface
*intf
)
3337 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3338 struct sk_buff
*skb
;
3343 if (!--dev
->suspend_count
) {
3344 /* resume interrupt URBs */
3345 if (dev
->urb_intr
&& test_bit(EVENT_DEV_OPEN
, &dev
->flags
))
3346 usb_submit_urb(dev
->urb_intr
, GFP_NOIO
);
3348 spin_lock_irq(&dev
->txq
.lock
);
3349 while ((res
= usb_get_from_anchor(&dev
->deferred
))) {
3350 skb
= (struct sk_buff
*)res
->context
;
3351 ret
= usb_submit_urb(res
, GFP_ATOMIC
);
3353 dev_kfree_skb_any(skb
);
3355 usb_autopm_put_interface_async(dev
->intf
);
3357 dev
->net
->trans_start
= jiffies
;
3358 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
3362 clear_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
3363 spin_unlock_irq(&dev
->txq
.lock
);
3365 if (test_bit(EVENT_DEV_OPEN
, &dev
->flags
)) {
3366 if (!(skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
))
3367 netif_start_queue(dev
->net
);
3368 tasklet_schedule(&dev
->bh
);
3372 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3373 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3374 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3376 ret
= lan78xx_write_reg(dev
, WUCSR2
, WUCSR2_NS_RCD_
|
3378 WUCSR2_IPV6_TCPSYN_RCD_
|
3379 WUCSR2_IPV4_TCPSYN_RCD_
);
3381 ret
= lan78xx_write_reg(dev
, WUCSR
, WUCSR_EEE_TX_WAKE_
|
3382 WUCSR_EEE_RX_WAKE_
|
3384 WUCSR_RFE_WAKE_FR_
|
3389 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3390 buf
|= MAC_TX_TXEN_
;
3391 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3396 int lan78xx_reset_resume(struct usb_interface
*intf
)
3398 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3402 lan78xx_phy_init(dev
);
3404 return lan78xx_resume(intf
);
3407 static const struct usb_device_id products
[] = {
3409 /* LAN7800 USB Gigabit Ethernet Device */
3410 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7800_USB_PRODUCT_ID
),
3413 /* LAN7850 USB Gigabit Ethernet Device */
3414 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7850_USB_PRODUCT_ID
),
3418 MODULE_DEVICE_TABLE(usb
, products
);
3420 static struct usb_driver lan78xx_driver
= {
3421 .name
= DRIVER_NAME
,
3422 .id_table
= products
,
3423 .probe
= lan78xx_probe
,
3424 .disconnect
= lan78xx_disconnect
,
3425 .suspend
= lan78xx_suspend
,
3426 .resume
= lan78xx_resume
,
3427 .reset_resume
= lan78xx_reset_resume
,
3428 .supports_autosuspend
= 1,
3429 .disable_hub_initiated_lpm
= 1,
3432 module_usb_driver(lan78xx_driver
);
3434 MODULE_AUTHOR(DRIVER_AUTHOR
);
3435 MODULE_DESCRIPTION(DRIVER_DESC
);
3436 MODULE_LICENSE("GPL");