2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
36 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME "lan78xx"
39 #define DRIVER_VERSION "1.0.3"
41 #define TX_TIMEOUT_JIFFIES (5 * HZ)
42 #define THROTTLE_JIFFIES (HZ / 8)
43 #define UNLINK_TIMEOUT_MS 3
45 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
47 #define SS_USB_PKT_SIZE (1024)
48 #define HS_USB_PKT_SIZE (512)
49 #define FS_USB_PKT_SIZE (64)
51 #define MAX_RX_FIFO_SIZE (12 * 1024)
52 #define MAX_TX_FIFO_SIZE (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE (9000)
56 #define DEFAULT_TX_CSUM_ENABLE (true)
57 #define DEFAULT_RX_CSUM_ENABLE (true)
58 #define DEFAULT_TSO_CSUM_ENABLE (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE (true)
60 #define TX_OVERHEAD (8)
63 #define LAN78XX_USB_VENDOR_ID (0x0424)
64 #define LAN7800_USB_PRODUCT_ID (0x7800)
65 #define LAN7850_USB_PRODUCT_ID (0x7850)
66 #define LAN78XX_EEPROM_MAGIC (0x78A5)
67 #define LAN78XX_OTP_MAGIC (0x78F3)
72 #define EEPROM_INDICATOR (0xA5)
73 #define EEPROM_MAC_OFFSET (0x01)
74 #define MAX_EEPROM_SIZE 512
75 #define OTP_INDICATOR_1 (0xF3)
76 #define OTP_INDICATOR_2 (0xF7)
78 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
82 /* USB related defines */
83 #define BULK_IN_PIPE 1
84 #define BULK_OUT_PIPE 2
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
89 static const char lan78xx_gstrings
[][ETH_GSTRING_LEN
] = {
91 "RX Alignment Errors",
94 "RX Undersize Frame Errors",
95 "RX Oversize Frame Errors",
97 "RX Unicast Byte Count",
98 "RX Broadcast Byte Count",
99 "RX Multicast Byte Count",
101 "RX Broadcast Frames",
102 "RX Multicast Frames",
105 "RX 65 - 127 Byte Frames",
106 "RX 128 - 255 Byte Frames",
107 "RX 256 - 511 Bytes Frames",
108 "RX 512 - 1023 Byte Frames",
109 "RX 1024 - 1518 Byte Frames",
110 "RX Greater 1518 Byte Frames",
111 "EEE RX LPI Transitions",
114 "TX Excess Deferral Errors",
117 "TX Single Collisions",
118 "TX Multiple Collisions",
119 "TX Excessive Collision",
120 "TX Late Collisions",
121 "TX Unicast Byte Count",
122 "TX Broadcast Byte Count",
123 "TX Multicast Byte Count",
125 "TX Broadcast Frames",
126 "TX Multicast Frames",
129 "TX 65 - 127 Byte Frames",
130 "TX 128 - 255 Byte Frames",
131 "TX 256 - 511 Bytes Frames",
132 "TX 512 - 1023 Byte Frames",
133 "TX 1024 - 1518 Byte Frames",
134 "TX Greater 1518 Byte Frames",
135 "EEE TX LPI Transitions",
139 struct lan78xx_statstage
{
141 u32 rx_alignment_errors
;
142 u32 rx_fragment_errors
;
143 u32 rx_jabber_errors
;
144 u32 rx_undersize_frame_errors
;
145 u32 rx_oversize_frame_errors
;
146 u32 rx_dropped_frames
;
147 u32 rx_unicast_byte_count
;
148 u32 rx_broadcast_byte_count
;
149 u32 rx_multicast_byte_count
;
150 u32 rx_unicast_frames
;
151 u32 rx_broadcast_frames
;
152 u32 rx_multicast_frames
;
154 u32 rx_64_byte_frames
;
155 u32 rx_65_127_byte_frames
;
156 u32 rx_128_255_byte_frames
;
157 u32 rx_256_511_bytes_frames
;
158 u32 rx_512_1023_byte_frames
;
159 u32 rx_1024_1518_byte_frames
;
160 u32 rx_greater_1518_byte_frames
;
161 u32 eee_rx_lpi_transitions
;
164 u32 tx_excess_deferral_errors
;
165 u32 tx_carrier_errors
;
166 u32 tx_bad_byte_count
;
167 u32 tx_single_collisions
;
168 u32 tx_multiple_collisions
;
169 u32 tx_excessive_collision
;
170 u32 tx_late_collisions
;
171 u32 tx_unicast_byte_count
;
172 u32 tx_broadcast_byte_count
;
173 u32 tx_multicast_byte_count
;
174 u32 tx_unicast_frames
;
175 u32 tx_broadcast_frames
;
176 u32 tx_multicast_frames
;
178 u32 tx_64_byte_frames
;
179 u32 tx_65_127_byte_frames
;
180 u32 tx_128_255_byte_frames
;
181 u32 tx_256_511_bytes_frames
;
182 u32 tx_512_1023_byte_frames
;
183 u32 tx_1024_1518_byte_frames
;
184 u32 tx_greater_1518_byte_frames
;
185 u32 eee_tx_lpi_transitions
;
191 struct lan78xx_priv
{
192 struct lan78xx_net
*dev
;
194 u32 mchash_table
[DP_SEL_VHF_HASH_LEN
]; /* multicat hash table */
195 u32 pfilter_table
[NUM_OF_MAF
][2]; /* perfect filter table */
196 u32 vlan_table
[DP_SEL_VHF_VLAN_LEN
];
197 struct mutex dataport_mutex
; /* for dataport access */
198 spinlock_t rfe_ctl_lock
; /* for rfe register access */
199 struct work_struct set_multicast
;
200 struct work_struct set_vlan
;
214 struct skb_data
{ /* skb->cb is one of these */
216 struct lan78xx_net
*dev
;
217 enum skb_state state
;
222 struct usb_ctrlrequest req
;
223 struct lan78xx_net
*dev
;
226 #define EVENT_TX_HALT 0
227 #define EVENT_RX_HALT 1
228 #define EVENT_RX_MEMORY 2
229 #define EVENT_STS_SPLIT 3
230 #define EVENT_LINK_RESET 4
231 #define EVENT_RX_PAUSED 5
232 #define EVENT_DEV_WAKING 6
233 #define EVENT_DEV_ASLEEP 7
234 #define EVENT_DEV_OPEN 8
237 struct net_device
*net
;
238 struct usb_device
*udev
;
239 struct usb_interface
*intf
;
244 struct sk_buff_head rxq
;
245 struct sk_buff_head txq
;
246 struct sk_buff_head done
;
247 struct sk_buff_head rxq_pause
;
248 struct sk_buff_head txq_pend
;
250 struct tasklet_struct bh
;
251 struct delayed_work wq
;
253 struct usb_host_endpoint
*ep_blkin
;
254 struct usb_host_endpoint
*ep_blkout
;
255 struct usb_host_endpoint
*ep_intr
;
259 struct urb
*urb_intr
;
260 struct usb_anchor deferred
;
262 struct mutex phy_mutex
; /* for phy access */
263 unsigned pipe_in
, pipe_out
, pipe_intr
;
265 u32 hard_mtu
; /* count any extra framing */
266 size_t rx_urb_size
; /* size for rx urbs */
270 wait_queue_head_t
*wait
;
271 unsigned char suspend_count
;
274 struct timer_list delay
;
276 unsigned long data
[5];
283 struct mii_bus
*mdiobus
;
286 u8 fc_request_control
;
289 /* use ethtool to change the level for any given device */
290 static int msg_level
= -1;
291 module_param(msg_level
, int, 0);
292 MODULE_PARM_DESC(msg_level
, "Override default message level");
294 static int lan78xx_read_reg(struct lan78xx_net
*dev
, u32 index
, u32
*data
)
296 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
302 ret
= usb_control_msg(dev
->udev
, usb_rcvctrlpipe(dev
->udev
, 0),
303 USB_VENDOR_REQUEST_READ_REGISTER
,
304 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
305 0, index
, buf
, 4, USB_CTRL_GET_TIMEOUT
);
306 if (likely(ret
>= 0)) {
310 netdev_warn(dev
->net
,
311 "Failed to read register index 0x%08x. ret = %d",
320 static int lan78xx_write_reg(struct lan78xx_net
*dev
, u32 index
, u32 data
)
322 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
331 ret
= usb_control_msg(dev
->udev
, usb_sndctrlpipe(dev
->udev
, 0),
332 USB_VENDOR_REQUEST_WRITE_REGISTER
,
333 USB_DIR_OUT
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
334 0, index
, buf
, 4, USB_CTRL_SET_TIMEOUT
);
335 if (unlikely(ret
< 0)) {
336 netdev_warn(dev
->net
,
337 "Failed to write register index 0x%08x. ret = %d",
346 static int lan78xx_read_stats(struct lan78xx_net
*dev
,
347 struct lan78xx_statstage
*data
)
351 struct lan78xx_statstage
*stats
;
355 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
359 ret
= usb_control_msg(dev
->udev
,
360 usb_rcvctrlpipe(dev
->udev
, 0),
361 USB_VENDOR_REQUEST_GET_STATS
,
362 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
367 USB_CTRL_SET_TIMEOUT
);
368 if (likely(ret
>= 0)) {
371 for (i
= 0; i
< sizeof(*stats
)/sizeof(u32
); i
++) {
372 le32_to_cpus(&src
[i
]);
376 netdev_warn(dev
->net
,
377 "Failed to read stat ret = 0x%x", ret
);
385 /* Loop until the read is completed with timeout called with phy_mutex held */
386 static int lan78xx_phy_wait_not_busy(struct lan78xx_net
*dev
)
388 unsigned long start_time
= jiffies
;
393 ret
= lan78xx_read_reg(dev
, MII_ACC
, &val
);
394 if (unlikely(ret
< 0))
397 if (!(val
& MII_ACC_MII_BUSY_
))
399 } while (!time_after(jiffies
, start_time
+ HZ
));
404 static inline u32
mii_access(int id
, int index
, int read
)
408 ret
= ((u32
)id
<< MII_ACC_PHY_ADDR_SHIFT_
) & MII_ACC_PHY_ADDR_MASK_
;
409 ret
|= ((u32
)index
<< MII_ACC_MIIRINDA_SHIFT_
) & MII_ACC_MIIRINDA_MASK_
;
411 ret
|= MII_ACC_MII_READ_
;
413 ret
|= MII_ACC_MII_WRITE_
;
414 ret
|= MII_ACC_MII_BUSY_
;
419 static int lan78xx_wait_eeprom(struct lan78xx_net
*dev
)
421 unsigned long start_time
= jiffies
;
426 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
427 if (unlikely(ret
< 0))
430 if (!(val
& E2P_CMD_EPC_BUSY_
) ||
431 (val
& E2P_CMD_EPC_TIMEOUT_
))
433 usleep_range(40, 100);
434 } while (!time_after(jiffies
, start_time
+ HZ
));
436 if (val
& (E2P_CMD_EPC_TIMEOUT_
| E2P_CMD_EPC_BUSY_
)) {
437 netdev_warn(dev
->net
, "EEPROM read operation timeout");
444 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net
*dev
)
446 unsigned long start_time
= jiffies
;
451 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
452 if (unlikely(ret
< 0))
455 if (!(val
& E2P_CMD_EPC_BUSY_
))
458 usleep_range(40, 100);
459 } while (!time_after(jiffies
, start_time
+ HZ
));
461 netdev_warn(dev
->net
, "EEPROM is busy");
465 static int lan78xx_read_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
466 u32 length
, u8
*data
)
473 /* depends on chip, some EEPROM pins are muxed with LED function.
474 * disable & restore LED function to access EEPROM.
476 ret
= lan78xx_read_reg(dev
, HW_CFG
, &val
);
478 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
) {
479 val
&= ~(HW_CFG_LED1_EN_
| HW_CFG_LED0_EN_
);
480 ret
= lan78xx_write_reg(dev
, HW_CFG
, val
);
483 retval
= lan78xx_eeprom_confirm_not_busy(dev
);
487 for (i
= 0; i
< length
; i
++) {
488 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_READ_
;
489 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
490 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
491 if (unlikely(ret
< 0)) {
496 retval
= lan78xx_wait_eeprom(dev
);
500 ret
= lan78xx_read_reg(dev
, E2P_DATA
, &val
);
501 if (unlikely(ret
< 0)) {
506 data
[i
] = val
& 0xFF;
512 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
)
513 ret
= lan78xx_write_reg(dev
, HW_CFG
, saved
);
518 static int lan78xx_read_eeprom(struct lan78xx_net
*dev
, u32 offset
,
519 u32 length
, u8
*data
)
524 ret
= lan78xx_read_raw_eeprom(dev
, 0, 1, &sig
);
525 if ((ret
== 0) && (sig
== EEPROM_INDICATOR
))
526 ret
= lan78xx_read_raw_eeprom(dev
, offset
, length
, data
);
533 static int lan78xx_write_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
534 u32 length
, u8
*data
)
541 /* depends on chip, some EEPROM pins are muxed with LED function.
542 * disable & restore LED function to access EEPROM.
544 ret
= lan78xx_read_reg(dev
, HW_CFG
, &val
);
546 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
) {
547 val
&= ~(HW_CFG_LED1_EN_
| HW_CFG_LED0_EN_
);
548 ret
= lan78xx_write_reg(dev
, HW_CFG
, val
);
551 retval
= lan78xx_eeprom_confirm_not_busy(dev
);
555 /* Issue write/erase enable command */
556 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_EWEN_
;
557 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
558 if (unlikely(ret
< 0)) {
563 retval
= lan78xx_wait_eeprom(dev
);
567 for (i
= 0; i
< length
; i
++) {
568 /* Fill data register */
570 ret
= lan78xx_write_reg(dev
, E2P_DATA
, val
);
576 /* Send "write" command */
577 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_WRITE_
;
578 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
579 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
585 retval
= lan78xx_wait_eeprom(dev
);
594 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
)
595 ret
= lan78xx_write_reg(dev
, HW_CFG
, saved
);
600 static int lan78xx_read_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
601 u32 length
, u8
*data
)
606 unsigned long timeout
;
608 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
610 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
611 /* clear it and wait to be cleared */
612 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
614 timeout
= jiffies
+ HZ
;
617 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
618 if (time_after(jiffies
, timeout
)) {
619 netdev_warn(dev
->net
,
620 "timeout on OTP_PWR_DN");
623 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
626 for (i
= 0; i
< length
; i
++) {
627 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
628 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
629 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
630 ((offset
+ i
) & OTP_ADDR2_10_3
));
632 ret
= lan78xx_write_reg(dev
, OTP_FUNC_CMD
, OTP_FUNC_CMD_READ_
);
633 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
635 timeout
= jiffies
+ HZ
;
638 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
639 if (time_after(jiffies
, timeout
)) {
640 netdev_warn(dev
->net
,
641 "timeout on OTP_STATUS");
644 } while (buf
& OTP_STATUS_BUSY_
);
646 ret
= lan78xx_read_reg(dev
, OTP_RD_DATA
, &buf
);
648 data
[i
] = (u8
)(buf
& 0xFF);
654 static int lan78xx_write_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
655 u32 length
, u8
*data
)
660 unsigned long timeout
;
662 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
664 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
665 /* clear it and wait to be cleared */
666 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
668 timeout
= jiffies
+ HZ
;
671 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
672 if (time_after(jiffies
, timeout
)) {
673 netdev_warn(dev
->net
,
674 "timeout on OTP_PWR_DN completion");
677 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
680 /* set to BYTE program mode */
681 ret
= lan78xx_write_reg(dev
, OTP_PRGM_MODE
, OTP_PRGM_MODE_BYTE_
);
683 for (i
= 0; i
< length
; i
++) {
684 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
685 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
686 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
687 ((offset
+ i
) & OTP_ADDR2_10_3
));
688 ret
= lan78xx_write_reg(dev
, OTP_PRGM_DATA
, data
[i
]);
689 ret
= lan78xx_write_reg(dev
, OTP_TST_CMD
, OTP_TST_CMD_PRGVRFY_
);
690 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
692 timeout
= jiffies
+ HZ
;
695 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
696 if (time_after(jiffies
, timeout
)) {
697 netdev_warn(dev
->net
,
698 "Timeout on OTP_STATUS completion");
701 } while (buf
& OTP_STATUS_BUSY_
);
707 static int lan78xx_read_otp(struct lan78xx_net
*dev
, u32 offset
,
708 u32 length
, u8
*data
)
713 ret
= lan78xx_read_raw_otp(dev
, 0, 1, &sig
);
716 if (sig
== OTP_INDICATOR_1
)
718 else if (sig
== OTP_INDICATOR_2
)
722 ret
= lan78xx_read_raw_otp(dev
, offset
, length
, data
);
728 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net
*dev
)
732 for (i
= 0; i
< 100; i
++) {
735 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
736 if (unlikely(ret
< 0))
739 if (dp_sel
& DP_SEL_DPRDY_
)
742 usleep_range(40, 100);
745 netdev_warn(dev
->net
, "lan78xx_dataport_wait_not_busy timed out");
750 static int lan78xx_dataport_write(struct lan78xx_net
*dev
, u32 ram_select
,
751 u32 addr
, u32 length
, u32
*buf
)
753 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
757 if (usb_autopm_get_interface(dev
->intf
) < 0)
760 mutex_lock(&pdata
->dataport_mutex
);
762 ret
= lan78xx_dataport_wait_not_busy(dev
);
766 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
768 dp_sel
&= ~DP_SEL_RSEL_MASK_
;
769 dp_sel
|= ram_select
;
770 ret
= lan78xx_write_reg(dev
, DP_SEL
, dp_sel
);
772 for (i
= 0; i
< length
; i
++) {
773 ret
= lan78xx_write_reg(dev
, DP_ADDR
, addr
+ i
);
775 ret
= lan78xx_write_reg(dev
, DP_DATA
, buf
[i
]);
777 ret
= lan78xx_write_reg(dev
, DP_CMD
, DP_CMD_WRITE_
);
779 ret
= lan78xx_dataport_wait_not_busy(dev
);
785 mutex_unlock(&pdata
->dataport_mutex
);
786 usb_autopm_put_interface(dev
->intf
);
791 static void lan78xx_set_addr_filter(struct lan78xx_priv
*pdata
,
792 int index
, u8 addr
[ETH_ALEN
])
796 if ((pdata
) && (index
> 0) && (index
< NUM_OF_MAF
)) {
798 temp
= addr
[2] | (temp
<< 8);
799 temp
= addr
[1] | (temp
<< 8);
800 temp
= addr
[0] | (temp
<< 8);
801 pdata
->pfilter_table
[index
][1] = temp
;
803 temp
= addr
[4] | (temp
<< 8);
804 temp
|= MAF_HI_VALID_
| MAF_HI_TYPE_DST_
;
805 pdata
->pfilter_table
[index
][0] = temp
;
809 /* returns hash bit number for given MAC address */
810 static inline u32
lan78xx_hash(char addr
[ETH_ALEN
])
812 return (ether_crc(ETH_ALEN
, addr
) >> 23) & 0x1ff;
815 static void lan78xx_deferred_multicast_write(struct work_struct
*param
)
817 struct lan78xx_priv
*pdata
=
818 container_of(param
, struct lan78xx_priv
, set_multicast
);
819 struct lan78xx_net
*dev
= pdata
->dev
;
823 netif_dbg(dev
, drv
, dev
->net
, "deferred multicast write 0x%08x\n",
826 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, DP_SEL_VHF_VLAN_LEN
,
827 DP_SEL_VHF_HASH_LEN
, pdata
->mchash_table
);
829 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
830 ret
= lan78xx_write_reg(dev
, MAF_HI(i
), 0);
831 ret
= lan78xx_write_reg(dev
, MAF_LO(i
),
832 pdata
->pfilter_table
[i
][1]);
833 ret
= lan78xx_write_reg(dev
, MAF_HI(i
),
834 pdata
->pfilter_table
[i
][0]);
837 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
840 static void lan78xx_set_multicast(struct net_device
*netdev
)
842 struct lan78xx_net
*dev
= netdev_priv(netdev
);
843 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
847 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
849 pdata
->rfe_ctl
&= ~(RFE_CTL_UCAST_EN_
| RFE_CTL_MCAST_EN_
|
850 RFE_CTL_DA_PERFECT_
| RFE_CTL_MCAST_HASH_
);
852 for (i
= 0; i
< DP_SEL_VHF_HASH_LEN
; i
++)
853 pdata
->mchash_table
[i
] = 0;
854 /* pfilter_table[0] has own HW address */
855 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
856 pdata
->pfilter_table
[i
][0] =
857 pdata
->pfilter_table
[i
][1] = 0;
860 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
;
862 if (dev
->net
->flags
& IFF_PROMISC
) {
863 netif_dbg(dev
, drv
, dev
->net
, "promiscuous mode enabled");
864 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
| RFE_CTL_UCAST_EN_
;
866 if (dev
->net
->flags
& IFF_ALLMULTI
) {
867 netif_dbg(dev
, drv
, dev
->net
,
868 "receive all multicast enabled");
869 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
;
873 if (netdev_mc_count(dev
->net
)) {
874 struct netdev_hw_addr
*ha
;
877 netif_dbg(dev
, drv
, dev
->net
, "receive multicast hash filter");
879 pdata
->rfe_ctl
|= RFE_CTL_DA_PERFECT_
;
882 netdev_for_each_mc_addr(ha
, netdev
) {
883 /* set first 32 into Perfect Filter */
885 lan78xx_set_addr_filter(pdata
, i
, ha
->addr
);
887 u32 bitnum
= lan78xx_hash(ha
->addr
);
889 pdata
->mchash_table
[bitnum
/ 32] |=
890 (1 << (bitnum
% 32));
891 pdata
->rfe_ctl
|= RFE_CTL_MCAST_HASH_
;
897 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
899 /* defer register writes to a sleepable context */
900 schedule_work(&pdata
->set_multicast
);
903 static int lan78xx_update_flowcontrol(struct lan78xx_net
*dev
, u8 duplex
,
904 u16 lcladv
, u16 rmtadv
)
906 u32 flow
= 0, fct_flow
= 0;
911 cap
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
913 cap
= dev
->fc_request_control
;
915 if (cap
& FLOW_CTRL_TX
)
916 flow
|= (FLOW_CR_TX_FCEN_
| 0xFFFF);
918 if (cap
& FLOW_CTRL_RX
)
919 flow
|= FLOW_CR_RX_FCEN_
;
921 if (dev
->udev
->speed
== USB_SPEED_SUPER
)
923 else if (dev
->udev
->speed
== USB_SPEED_HIGH
)
926 netif_dbg(dev
, link
, dev
->net
, "rx pause %s, tx pause %s",
927 (cap
& FLOW_CTRL_RX
? "enabled" : "disabled"),
928 (cap
& FLOW_CTRL_TX
? "enabled" : "disabled"));
930 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, fct_flow
);
932 /* threshold value should be set before enabling flow */
933 ret
= lan78xx_write_reg(dev
, FLOW
, flow
);
938 static int lan78xx_link_reset(struct lan78xx_net
*dev
)
940 struct phy_device
*phydev
= dev
->net
->phydev
;
941 struct ethtool_cmd ecmd
= { .cmd
= ETHTOOL_GSET
};
945 /* clear PHY interrupt status */
946 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
947 if (unlikely(ret
< 0))
950 /* clear LAN78xx interrupt status */
951 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_PHY_INT_
);
952 if (unlikely(ret
< 0))
955 phy_read_status(phydev
);
957 if (!phydev
->link
&& dev
->link_on
) {
958 dev
->link_on
= false;
961 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
962 if (unlikely(ret
< 0))
965 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
966 if (unlikely(ret
< 0))
969 phy_mac_interrupt(phydev
, 0);
970 } else if (phydev
->link
&& !dev
->link_on
) {
973 phy_ethtool_gset(phydev
, &ecmd
);
975 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
977 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
978 if (ethtool_cmd_speed(&ecmd
) == 1000) {
980 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
981 buf
&= ~USB_CFG1_DEV_U2_INIT_EN_
;
982 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
984 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
985 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
986 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
989 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
990 buf
|= USB_CFG1_DEV_U2_INIT_EN_
;
991 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
992 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
996 ladv
= phy_read(phydev
, MII_ADVERTISE
);
1000 radv
= phy_read(phydev
, MII_LPA
);
1004 netif_dbg(dev
, link
, dev
->net
,
1005 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1006 ethtool_cmd_speed(&ecmd
), ecmd
.duplex
, ladv
, radv
);
1008 ret
= lan78xx_update_flowcontrol(dev
, ecmd
.duplex
, ladv
, radv
);
1009 phy_mac_interrupt(phydev
, 1);
1015 /* some work can't be done in tasklets, so we use keventd
1017 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1018 * but tasklet_schedule() doesn't. hope the failure is rare.
1020 void lan78xx_defer_kevent(struct lan78xx_net
*dev
, int work
)
1022 set_bit(work
, &dev
->flags
);
1023 if (!schedule_delayed_work(&dev
->wq
, 0))
1024 netdev_err(dev
->net
, "kevent %d may have been dropped\n", work
);
1027 static void lan78xx_status(struct lan78xx_net
*dev
, struct urb
*urb
)
1031 if (urb
->actual_length
!= 4) {
1032 netdev_warn(dev
->net
,
1033 "unexpected urb length %d", urb
->actual_length
);
1037 memcpy(&intdata
, urb
->transfer_buffer
, 4);
1038 le32_to_cpus(&intdata
);
1040 if (intdata
& INT_ENP_PHY_INT
) {
1041 netif_dbg(dev
, link
, dev
->net
, "PHY INTR: 0x%08x\n", intdata
);
1042 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
1044 netdev_warn(dev
->net
,
1045 "unexpected interrupt: 0x%08x\n", intdata
);
1048 static int lan78xx_ethtool_get_eeprom_len(struct net_device
*netdev
)
1050 return MAX_EEPROM_SIZE
;
1053 static int lan78xx_ethtool_get_eeprom(struct net_device
*netdev
,
1054 struct ethtool_eeprom
*ee
, u8
*data
)
1056 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1058 ee
->magic
= LAN78XX_EEPROM_MAGIC
;
1060 return lan78xx_read_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1063 static int lan78xx_ethtool_set_eeprom(struct net_device
*netdev
,
1064 struct ethtool_eeprom
*ee
, u8
*data
)
1066 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1068 /* Allow entire eeprom update only */
1069 if ((ee
->magic
== LAN78XX_EEPROM_MAGIC
) &&
1070 (ee
->offset
== 0) &&
1072 (data
[0] == EEPROM_INDICATOR
))
1073 return lan78xx_write_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1074 else if ((ee
->magic
== LAN78XX_OTP_MAGIC
) &&
1075 (ee
->offset
== 0) &&
1077 (data
[0] == OTP_INDICATOR_1
))
1078 return lan78xx_write_raw_otp(dev
, ee
->offset
, ee
->len
, data
);
1083 static void lan78xx_get_strings(struct net_device
*netdev
, u32 stringset
,
1086 if (stringset
== ETH_SS_STATS
)
1087 memcpy(data
, lan78xx_gstrings
, sizeof(lan78xx_gstrings
));
1090 static int lan78xx_get_sset_count(struct net_device
*netdev
, int sset
)
1092 if (sset
== ETH_SS_STATS
)
1093 return ARRAY_SIZE(lan78xx_gstrings
);
1098 static void lan78xx_get_stats(struct net_device
*netdev
,
1099 struct ethtool_stats
*stats
, u64
*data
)
1101 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1102 struct lan78xx_statstage lan78xx_stat
;
1106 if (usb_autopm_get_interface(dev
->intf
) < 0)
1109 if (lan78xx_read_stats(dev
, &lan78xx_stat
) > 0) {
1110 p
= (u32
*)&lan78xx_stat
;
1111 for (i
= 0; i
< (sizeof(lan78xx_stat
) / (sizeof(u32
))); i
++)
1115 usb_autopm_put_interface(dev
->intf
);
1118 static void lan78xx_get_wol(struct net_device
*netdev
,
1119 struct ethtool_wolinfo
*wol
)
1121 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1124 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1126 if (usb_autopm_get_interface(dev
->intf
) < 0)
1129 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1130 if (unlikely(ret
< 0)) {
1134 if (buf
& USB_CFG_RMT_WKP_
) {
1135 wol
->supported
= WAKE_ALL
;
1136 wol
->wolopts
= pdata
->wol
;
1143 usb_autopm_put_interface(dev
->intf
);
1146 static int lan78xx_set_wol(struct net_device
*netdev
,
1147 struct ethtool_wolinfo
*wol
)
1149 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1150 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1153 ret
= usb_autopm_get_interface(dev
->intf
);
1158 if (wol
->wolopts
& WAKE_UCAST
)
1159 pdata
->wol
|= WAKE_UCAST
;
1160 if (wol
->wolopts
& WAKE_MCAST
)
1161 pdata
->wol
|= WAKE_MCAST
;
1162 if (wol
->wolopts
& WAKE_BCAST
)
1163 pdata
->wol
|= WAKE_BCAST
;
1164 if (wol
->wolopts
& WAKE_MAGIC
)
1165 pdata
->wol
|= WAKE_MAGIC
;
1166 if (wol
->wolopts
& WAKE_PHY
)
1167 pdata
->wol
|= WAKE_PHY
;
1168 if (wol
->wolopts
& WAKE_ARP
)
1169 pdata
->wol
|= WAKE_ARP
;
1171 device_set_wakeup_enable(&dev
->udev
->dev
, (bool)wol
->wolopts
);
1173 phy_ethtool_set_wol(netdev
->phydev
, wol
);
1175 usb_autopm_put_interface(dev
->intf
);
1180 static int lan78xx_get_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1182 struct lan78xx_net
*dev
= netdev_priv(net
);
1183 struct phy_device
*phydev
= net
->phydev
;
1187 ret
= usb_autopm_get_interface(dev
->intf
);
1191 ret
= phy_ethtool_get_eee(phydev
, edata
);
1195 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1196 if (buf
& MAC_CR_EEE_EN_
) {
1197 edata
->eee_enabled
= true;
1198 edata
->eee_active
= !!(edata
->advertised
&
1199 edata
->lp_advertised
);
1200 edata
->tx_lpi_enabled
= true;
1201 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1202 ret
= lan78xx_read_reg(dev
, EEE_TX_LPI_REQ_DLY
, &buf
);
1203 edata
->tx_lpi_timer
= buf
;
1205 edata
->eee_enabled
= false;
1206 edata
->eee_active
= false;
1207 edata
->tx_lpi_enabled
= false;
1208 edata
->tx_lpi_timer
= 0;
1213 usb_autopm_put_interface(dev
->intf
);
1218 static int lan78xx_set_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1220 struct lan78xx_net
*dev
= netdev_priv(net
);
1224 ret
= usb_autopm_get_interface(dev
->intf
);
1228 if (edata
->eee_enabled
) {
1229 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1230 buf
|= MAC_CR_EEE_EN_
;
1231 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1233 phy_ethtool_set_eee(net
->phydev
, edata
);
1235 buf
= (u32
)edata
->tx_lpi_timer
;
1236 ret
= lan78xx_write_reg(dev
, EEE_TX_LPI_REQ_DLY
, buf
);
1238 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1239 buf
&= ~MAC_CR_EEE_EN_
;
1240 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1243 usb_autopm_put_interface(dev
->intf
);
1248 static u32
lan78xx_get_link(struct net_device
*net
)
1250 phy_read_status(net
->phydev
);
1252 return net
->phydev
->link
;
1255 int lan78xx_nway_reset(struct net_device
*net
)
1257 return phy_start_aneg(net
->phydev
);
1260 static void lan78xx_get_drvinfo(struct net_device
*net
,
1261 struct ethtool_drvinfo
*info
)
1263 struct lan78xx_net
*dev
= netdev_priv(net
);
1265 strncpy(info
->driver
, DRIVER_NAME
, sizeof(info
->driver
));
1266 strncpy(info
->version
, DRIVER_VERSION
, sizeof(info
->version
));
1267 usb_make_path(dev
->udev
, info
->bus_info
, sizeof(info
->bus_info
));
1270 static u32
lan78xx_get_msglevel(struct net_device
*net
)
1272 struct lan78xx_net
*dev
= netdev_priv(net
);
1274 return dev
->msg_enable
;
1277 static void lan78xx_set_msglevel(struct net_device
*net
, u32 level
)
1279 struct lan78xx_net
*dev
= netdev_priv(net
);
1281 dev
->msg_enable
= level
;
1284 static int lan78xx_get_mdix_status(struct net_device
*net
)
1286 struct phy_device
*phydev
= net
->phydev
;
1289 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
, LAN88XX_EXT_PAGE_SPACE_1
);
1290 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1291 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
, LAN88XX_EXT_PAGE_SPACE_0
);
1296 static void lan78xx_set_mdix_status(struct net_device
*net
, __u8 mdix_ctrl
)
1298 struct lan78xx_net
*dev
= netdev_priv(net
);
1299 struct phy_device
*phydev
= net
->phydev
;
1302 if (mdix_ctrl
== ETH_TP_MDI
) {
1303 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1304 LAN88XX_EXT_PAGE_SPACE_1
);
1305 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1306 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1307 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1308 buf
| LAN88XX_EXT_MODE_CTRL_MDI_
);
1309 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1310 LAN88XX_EXT_PAGE_SPACE_0
);
1311 } else if (mdix_ctrl
== ETH_TP_MDI_X
) {
1312 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1313 LAN88XX_EXT_PAGE_SPACE_1
);
1314 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1315 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1316 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1317 buf
| LAN88XX_EXT_MODE_CTRL_MDI_X_
);
1318 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1319 LAN88XX_EXT_PAGE_SPACE_0
);
1320 } else if (mdix_ctrl
== ETH_TP_MDI_AUTO
) {
1321 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1322 LAN88XX_EXT_PAGE_SPACE_1
);
1323 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1324 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1325 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1326 buf
| LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_
);
1327 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1328 LAN88XX_EXT_PAGE_SPACE_0
);
1330 dev
->mdix_ctrl
= mdix_ctrl
;
1333 static int lan78xx_get_settings(struct net_device
*net
, struct ethtool_cmd
*cmd
)
1335 struct lan78xx_net
*dev
= netdev_priv(net
);
1336 struct phy_device
*phydev
= net
->phydev
;
1340 ret
= usb_autopm_get_interface(dev
->intf
);
1344 ret
= phy_ethtool_gset(phydev
, cmd
);
1346 buf
= lan78xx_get_mdix_status(net
);
1348 buf
&= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1349 if (buf
== LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_
) {
1350 cmd
->eth_tp_mdix
= ETH_TP_MDI_AUTO
;
1351 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
1352 } else if (buf
== LAN88XX_EXT_MODE_CTRL_MDI_
) {
1353 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
1354 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI
;
1355 } else if (buf
== LAN88XX_EXT_MODE_CTRL_MDI_X_
) {
1356 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
1357 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI_X
;
1360 usb_autopm_put_interface(dev
->intf
);
1365 static int lan78xx_set_settings(struct net_device
*net
, struct ethtool_cmd
*cmd
)
1367 struct lan78xx_net
*dev
= netdev_priv(net
);
1368 struct phy_device
*phydev
= net
->phydev
;
1372 ret
= usb_autopm_get_interface(dev
->intf
);
1376 if (dev
->mdix_ctrl
!= cmd
->eth_tp_mdix_ctrl
) {
1377 lan78xx_set_mdix_status(net
, cmd
->eth_tp_mdix_ctrl
);
1380 /* change speed & duplex */
1381 ret
= phy_ethtool_sset(phydev
, cmd
);
1383 if (!cmd
->autoneg
) {
1384 /* force link down */
1385 temp
= phy_read(phydev
, MII_BMCR
);
1386 phy_write(phydev
, MII_BMCR
, temp
| BMCR_LOOPBACK
);
1388 phy_write(phydev
, MII_BMCR
, temp
);
1391 usb_autopm_put_interface(dev
->intf
);
1396 static void lan78xx_get_pause(struct net_device
*net
,
1397 struct ethtool_pauseparam
*pause
)
1399 struct lan78xx_net
*dev
= netdev_priv(net
);
1400 struct phy_device
*phydev
= net
->phydev
;
1401 struct ethtool_cmd ecmd
= { .cmd
= ETHTOOL_GSET
};
1403 phy_ethtool_gset(phydev
, &ecmd
);
1405 pause
->autoneg
= dev
->fc_autoneg
;
1407 if (dev
->fc_request_control
& FLOW_CTRL_TX
)
1408 pause
->tx_pause
= 1;
1410 if (dev
->fc_request_control
& FLOW_CTRL_RX
)
1411 pause
->rx_pause
= 1;
1414 static int lan78xx_set_pause(struct net_device
*net
,
1415 struct ethtool_pauseparam
*pause
)
1417 struct lan78xx_net
*dev
= netdev_priv(net
);
1418 struct phy_device
*phydev
= net
->phydev
;
1419 struct ethtool_cmd ecmd
= { .cmd
= ETHTOOL_GSET
};
1422 phy_ethtool_gset(phydev
, &ecmd
);
1424 if (pause
->autoneg
&& !ecmd
.autoneg
) {
1429 dev
->fc_request_control
= 0;
1430 if (pause
->rx_pause
)
1431 dev
->fc_request_control
|= FLOW_CTRL_RX
;
1433 if (pause
->tx_pause
)
1434 dev
->fc_request_control
|= FLOW_CTRL_TX
;
1439 ecmd
.advertising
&= ~(ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
1440 mii_adv
= (u32
)mii_advertise_flowctrl(dev
->fc_request_control
);
1441 ecmd
.advertising
|= mii_adv_to_ethtool_adv_t(mii_adv
);
1442 phy_ethtool_sset(phydev
, &ecmd
);
1445 dev
->fc_autoneg
= pause
->autoneg
;
1452 static const struct ethtool_ops lan78xx_ethtool_ops
= {
1453 .get_link
= lan78xx_get_link
,
1454 .nway_reset
= lan78xx_nway_reset
,
1455 .get_drvinfo
= lan78xx_get_drvinfo
,
1456 .get_msglevel
= lan78xx_get_msglevel
,
1457 .set_msglevel
= lan78xx_set_msglevel
,
1458 .get_settings
= lan78xx_get_settings
,
1459 .set_settings
= lan78xx_set_settings
,
1460 .get_eeprom_len
= lan78xx_ethtool_get_eeprom_len
,
1461 .get_eeprom
= lan78xx_ethtool_get_eeprom
,
1462 .set_eeprom
= lan78xx_ethtool_set_eeprom
,
1463 .get_ethtool_stats
= lan78xx_get_stats
,
1464 .get_sset_count
= lan78xx_get_sset_count
,
1465 .get_strings
= lan78xx_get_strings
,
1466 .get_wol
= lan78xx_get_wol
,
1467 .set_wol
= lan78xx_set_wol
,
1468 .get_eee
= lan78xx_get_eee
,
1469 .set_eee
= lan78xx_set_eee
,
1470 .get_pauseparam
= lan78xx_get_pause
,
1471 .set_pauseparam
= lan78xx_set_pause
,
1474 static int lan78xx_ioctl(struct net_device
*netdev
, struct ifreq
*rq
, int cmd
)
1476 if (!netif_running(netdev
))
1479 return phy_mii_ioctl(netdev
->phydev
, rq
, cmd
);
1482 static void lan78xx_init_mac_address(struct lan78xx_net
*dev
)
1484 u32 addr_lo
, addr_hi
;
1488 ret
= lan78xx_read_reg(dev
, RX_ADDRL
, &addr_lo
);
1489 ret
= lan78xx_read_reg(dev
, RX_ADDRH
, &addr_hi
);
1491 addr
[0] = addr_lo
& 0xFF;
1492 addr
[1] = (addr_lo
>> 8) & 0xFF;
1493 addr
[2] = (addr_lo
>> 16) & 0xFF;
1494 addr
[3] = (addr_lo
>> 24) & 0xFF;
1495 addr
[4] = addr_hi
& 0xFF;
1496 addr
[5] = (addr_hi
>> 8) & 0xFF;
1498 if (!is_valid_ether_addr(addr
)) {
1499 /* reading mac address from EEPROM or OTP */
1500 if ((lan78xx_read_eeprom(dev
, EEPROM_MAC_OFFSET
, ETH_ALEN
,
1502 (lan78xx_read_otp(dev
, EEPROM_MAC_OFFSET
, ETH_ALEN
,
1504 if (is_valid_ether_addr(addr
)) {
1505 /* eeprom values are valid so use them */
1506 netif_dbg(dev
, ifup
, dev
->net
,
1507 "MAC address read from EEPROM");
1509 /* generate random MAC */
1510 random_ether_addr(addr
);
1511 netif_dbg(dev
, ifup
, dev
->net
,
1512 "MAC address set to random addr");
1515 addr_lo
= addr
[0] | (addr
[1] << 8) |
1516 (addr
[2] << 16) | (addr
[3] << 24);
1517 addr_hi
= addr
[4] | (addr
[5] << 8);
1519 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
1520 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
1522 /* generate random MAC */
1523 random_ether_addr(addr
);
1524 netif_dbg(dev
, ifup
, dev
->net
,
1525 "MAC address set to random addr");
1529 ret
= lan78xx_write_reg(dev
, MAF_LO(0), addr_lo
);
1530 ret
= lan78xx_write_reg(dev
, MAF_HI(0), addr_hi
| MAF_HI_VALID_
);
1532 ether_addr_copy(dev
->net
->dev_addr
, addr
);
1535 /* MDIO read and write wrappers for phylib */
1536 static int lan78xx_mdiobus_read(struct mii_bus
*bus
, int phy_id
, int idx
)
1538 struct lan78xx_net
*dev
= bus
->priv
;
1542 ret
= usb_autopm_get_interface(dev
->intf
);
1546 mutex_lock(&dev
->phy_mutex
);
1548 /* confirm MII not busy */
1549 ret
= lan78xx_phy_wait_not_busy(dev
);
1553 /* set the address, index & direction (read from PHY) */
1554 addr
= mii_access(phy_id
, idx
, MII_READ
);
1555 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1557 ret
= lan78xx_phy_wait_not_busy(dev
);
1561 ret
= lan78xx_read_reg(dev
, MII_DATA
, &val
);
1563 ret
= (int)(val
& 0xFFFF);
1566 mutex_unlock(&dev
->phy_mutex
);
1567 usb_autopm_put_interface(dev
->intf
);
1571 static int lan78xx_mdiobus_write(struct mii_bus
*bus
, int phy_id
, int idx
,
1574 struct lan78xx_net
*dev
= bus
->priv
;
1578 ret
= usb_autopm_get_interface(dev
->intf
);
1582 mutex_lock(&dev
->phy_mutex
);
1584 /* confirm MII not busy */
1585 ret
= lan78xx_phy_wait_not_busy(dev
);
1590 ret
= lan78xx_write_reg(dev
, MII_DATA
, val
);
1592 /* set the address, index & direction (write to PHY) */
1593 addr
= mii_access(phy_id
, idx
, MII_WRITE
);
1594 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1596 ret
= lan78xx_phy_wait_not_busy(dev
);
1601 mutex_unlock(&dev
->phy_mutex
);
1602 usb_autopm_put_interface(dev
->intf
);
1606 static int lan78xx_mdio_init(struct lan78xx_net
*dev
)
1610 dev
->mdiobus
= mdiobus_alloc();
1611 if (!dev
->mdiobus
) {
1612 netdev_err(dev
->net
, "can't allocate MDIO bus\n");
1616 dev
->mdiobus
->priv
= (void *)dev
;
1617 dev
->mdiobus
->read
= lan78xx_mdiobus_read
;
1618 dev
->mdiobus
->write
= lan78xx_mdiobus_write
;
1619 dev
->mdiobus
->name
= "lan78xx-mdiobus";
1621 snprintf(dev
->mdiobus
->id
, MII_BUS_ID_SIZE
, "usb-%03d:%03d",
1622 dev
->udev
->bus
->busnum
, dev
->udev
->devnum
);
1624 switch (dev
->chipid
) {
1625 case ID_REV_CHIP_ID_7800_
:
1626 case ID_REV_CHIP_ID_7850_
:
1627 /* set to internal PHY id */
1628 dev
->mdiobus
->phy_mask
= ~(1 << 1);
1632 ret
= mdiobus_register(dev
->mdiobus
);
1634 netdev_err(dev
->net
, "can't register MDIO bus\n");
1638 netdev_dbg(dev
->net
, "registered mdiobus bus %s\n", dev
->mdiobus
->id
);
1641 mdiobus_free(dev
->mdiobus
);
1645 static void lan78xx_remove_mdio(struct lan78xx_net
*dev
)
1647 mdiobus_unregister(dev
->mdiobus
);
1648 mdiobus_free(dev
->mdiobus
);
1651 static void lan78xx_link_status_change(struct net_device
*net
)
1656 static int lan78xx_phy_init(struct lan78xx_net
*dev
)
1660 struct phy_device
*phydev
= dev
->net
->phydev
;
1662 phydev
= phy_find_first(dev
->mdiobus
);
1664 netdev_err(dev
->net
, "no PHY found\n");
1668 /* Enable PHY interrupts.
1669 * We handle our own interrupt
1671 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
1672 ret
= phy_write(phydev
, LAN88XX_INT_MASK
,
1673 LAN88XX_INT_MASK_MDINTPIN_EN_
|
1674 LAN88XX_INT_MASK_LINK_CHANGE_
);
1676 phydev
->irq
= PHY_IGNORE_INTERRUPT
;
1678 ret
= phy_connect_direct(dev
->net
, phydev
,
1679 lan78xx_link_status_change
,
1680 PHY_INTERFACE_MODE_GMII
);
1682 netdev_err(dev
->net
, "can't attach PHY to %s\n",
1687 /* set to AUTOMDIX */
1688 lan78xx_set_mdix_status(dev
->net
, ETH_TP_MDI_AUTO
);
1690 /* MAC doesn't support 1000T Half */
1691 phydev
->supported
&= ~SUPPORTED_1000baseT_Half
;
1693 /* support both flow controls */
1694 dev
->fc_request_control
= (FLOW_CTRL_RX
| FLOW_CTRL_TX
);
1695 phydev
->advertising
&= ~(ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
1696 mii_adv
= (u32
)mii_advertise_flowctrl(dev
->fc_request_control
);
1697 phydev
->advertising
|= mii_adv_to_ethtool_adv_t(mii_adv
);
1699 genphy_config_aneg(phydev
);
1701 dev
->fc_autoneg
= phydev
->autoneg
;
1705 netif_dbg(dev
, ifup
, dev
->net
, "phy initialised successfully");
1710 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net
*dev
, int size
)
1716 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
1718 rxenabled
= ((buf
& MAC_RX_RXEN_
) != 0);
1721 buf
&= ~MAC_RX_RXEN_
;
1722 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1725 /* add 4 to size for FCS */
1726 buf
&= ~MAC_RX_MAX_SIZE_MASK_
;
1727 buf
|= (((size
+ 4) << MAC_RX_MAX_SIZE_SHIFT_
) & MAC_RX_MAX_SIZE_MASK_
);
1729 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1732 buf
|= MAC_RX_RXEN_
;
1733 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1739 static int unlink_urbs(struct lan78xx_net
*dev
, struct sk_buff_head
*q
)
1741 struct sk_buff
*skb
;
1742 unsigned long flags
;
1745 spin_lock_irqsave(&q
->lock
, flags
);
1746 while (!skb_queue_empty(q
)) {
1747 struct skb_data
*entry
;
1751 skb_queue_walk(q
, skb
) {
1752 entry
= (struct skb_data
*)skb
->cb
;
1753 if (entry
->state
!= unlink_start
)
1758 entry
->state
= unlink_start
;
1761 /* Get reference count of the URB to avoid it to be
1762 * freed during usb_unlink_urb, which may trigger
1763 * use-after-free problem inside usb_unlink_urb since
1764 * usb_unlink_urb is always racing with .complete
1765 * handler(include defer_bh).
1768 spin_unlock_irqrestore(&q
->lock
, flags
);
1769 /* during some PM-driven resume scenarios,
1770 * these (async) unlinks complete immediately
1772 ret
= usb_unlink_urb(urb
);
1773 if (ret
!= -EINPROGRESS
&& ret
!= 0)
1774 netdev_dbg(dev
->net
, "unlink urb err, %d\n", ret
);
1778 spin_lock_irqsave(&q
->lock
, flags
);
1780 spin_unlock_irqrestore(&q
->lock
, flags
);
1784 static int lan78xx_change_mtu(struct net_device
*netdev
, int new_mtu
)
1786 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1787 int ll_mtu
= new_mtu
+ netdev
->hard_header_len
;
1788 int old_hard_mtu
= dev
->hard_mtu
;
1789 int old_rx_urb_size
= dev
->rx_urb_size
;
1792 if (new_mtu
> MAX_SINGLE_PACKET_SIZE
)
1797 /* no second zero-length packet read wanted after mtu-sized packets */
1798 if ((ll_mtu
% dev
->maxpacket
) == 0)
1801 ret
= lan78xx_set_rx_max_frame_length(dev
, new_mtu
+ ETH_HLEN
);
1803 netdev
->mtu
= new_mtu
;
1805 dev
->hard_mtu
= netdev
->mtu
+ netdev
->hard_header_len
;
1806 if (dev
->rx_urb_size
== old_hard_mtu
) {
1807 dev
->rx_urb_size
= dev
->hard_mtu
;
1808 if (dev
->rx_urb_size
> old_rx_urb_size
) {
1809 if (netif_running(dev
->net
)) {
1810 unlink_urbs(dev
, &dev
->rxq
);
1811 tasklet_schedule(&dev
->bh
);
1819 int lan78xx_set_mac_addr(struct net_device
*netdev
, void *p
)
1821 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1822 struct sockaddr
*addr
= p
;
1823 u32 addr_lo
, addr_hi
;
1826 if (netif_running(netdev
))
1829 if (!is_valid_ether_addr(addr
->sa_data
))
1830 return -EADDRNOTAVAIL
;
1832 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
1834 addr_lo
= netdev
->dev_addr
[0] |
1835 netdev
->dev_addr
[1] << 8 |
1836 netdev
->dev_addr
[2] << 16 |
1837 netdev
->dev_addr
[3] << 24;
1838 addr_hi
= netdev
->dev_addr
[4] |
1839 netdev
->dev_addr
[5] << 8;
1841 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
1842 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
1847 /* Enable or disable Rx checksum offload engine */
1848 static int lan78xx_set_features(struct net_device
*netdev
,
1849 netdev_features_t features
)
1851 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1852 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1853 unsigned long flags
;
1856 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
1858 if (features
& NETIF_F_RXCSUM
) {
1859 pdata
->rfe_ctl
|= RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
;
1860 pdata
->rfe_ctl
|= RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
;
1862 pdata
->rfe_ctl
&= ~(RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
);
1863 pdata
->rfe_ctl
&= ~(RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
);
1866 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1867 pdata
->rfe_ctl
|= RFE_CTL_VLAN_FILTER_
;
1869 pdata
->rfe_ctl
&= ~RFE_CTL_VLAN_FILTER_
;
1871 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
1873 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
1878 static void lan78xx_deferred_vlan_write(struct work_struct
*param
)
1880 struct lan78xx_priv
*pdata
=
1881 container_of(param
, struct lan78xx_priv
, set_vlan
);
1882 struct lan78xx_net
*dev
= pdata
->dev
;
1884 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, 0,
1885 DP_SEL_VHF_VLAN_LEN
, pdata
->vlan_table
);
1888 static int lan78xx_vlan_rx_add_vid(struct net_device
*netdev
,
1889 __be16 proto
, u16 vid
)
1891 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1892 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1894 u16 vid_dword_index
;
1896 vid_dword_index
= (vid
>> 5) & 0x7F;
1897 vid_bit_index
= vid
& 0x1F;
1899 pdata
->vlan_table
[vid_dword_index
] |= (1 << vid_bit_index
);
1901 /* defer register writes to a sleepable context */
1902 schedule_work(&pdata
->set_vlan
);
1907 static int lan78xx_vlan_rx_kill_vid(struct net_device
*netdev
,
1908 __be16 proto
, u16 vid
)
1910 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1911 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1913 u16 vid_dword_index
;
1915 vid_dword_index
= (vid
>> 5) & 0x7F;
1916 vid_bit_index
= vid
& 0x1F;
1918 pdata
->vlan_table
[vid_dword_index
] &= ~(1 << vid_bit_index
);
1920 /* defer register writes to a sleepable context */
1921 schedule_work(&pdata
->set_vlan
);
1926 static void lan78xx_init_ltm(struct lan78xx_net
*dev
)
1930 u32 regs
[6] = { 0 };
1932 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
1933 if (buf
& USB_CFG1_LTM_ENABLE_
) {
1935 /* Get values from EEPROM first */
1936 if (lan78xx_read_eeprom(dev
, 0x3F, 2, temp
) == 0) {
1937 if (temp
[0] == 24) {
1938 ret
= lan78xx_read_raw_eeprom(dev
,
1945 } else if (lan78xx_read_otp(dev
, 0x3F, 2, temp
) == 0) {
1946 if (temp
[0] == 24) {
1947 ret
= lan78xx_read_raw_otp(dev
,
1957 lan78xx_write_reg(dev
, LTM_BELT_IDLE0
, regs
[0]);
1958 lan78xx_write_reg(dev
, LTM_BELT_IDLE1
, regs
[1]);
1959 lan78xx_write_reg(dev
, LTM_BELT_ACT0
, regs
[2]);
1960 lan78xx_write_reg(dev
, LTM_BELT_ACT1
, regs
[3]);
1961 lan78xx_write_reg(dev
, LTM_INACTIVE0
, regs
[4]);
1962 lan78xx_write_reg(dev
, LTM_INACTIVE1
, regs
[5]);
1965 static int lan78xx_reset(struct lan78xx_net
*dev
)
1967 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1970 unsigned long timeout
;
1972 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1973 buf
|= HW_CFG_LRST_
;
1974 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
1976 timeout
= jiffies
+ HZ
;
1979 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1980 if (time_after(jiffies
, timeout
)) {
1981 netdev_warn(dev
->net
,
1982 "timeout on completion of LiteReset");
1985 } while (buf
& HW_CFG_LRST_
);
1987 lan78xx_init_mac_address(dev
);
1989 /* save DEVID for later usage */
1990 ret
= lan78xx_read_reg(dev
, ID_REV
, &buf
);
1991 dev
->chipid
= (buf
& ID_REV_CHIP_ID_MASK_
) >> 16;
1992 dev
->chiprev
= buf
& ID_REV_CHIP_REV_MASK_
;
1994 /* Respond to the IN token with a NAK */
1995 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1996 buf
|= USB_CFG_BIR_
;
1997 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
2000 lan78xx_init_ltm(dev
);
2002 dev
->net
->hard_header_len
+= TX_OVERHEAD
;
2003 dev
->hard_mtu
= dev
->net
->mtu
+ dev
->net
->hard_header_len
;
2005 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
2006 buf
= DEFAULT_BURST_CAP_SIZE
/ SS_USB_PKT_SIZE
;
2007 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
2010 } else if (dev
->udev
->speed
== USB_SPEED_HIGH
) {
2011 buf
= DEFAULT_BURST_CAP_SIZE
/ HS_USB_PKT_SIZE
;
2012 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
2013 dev
->rx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->rx_urb_size
;
2014 dev
->tx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->hard_mtu
;
2016 buf
= DEFAULT_BURST_CAP_SIZE
/ FS_USB_PKT_SIZE
;
2017 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
2021 ret
= lan78xx_write_reg(dev
, BURST_CAP
, buf
);
2022 ret
= lan78xx_write_reg(dev
, BULK_IN_DLY
, DEFAULT_BULK_IN_DELAY
);
2024 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
2026 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
2028 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
2029 buf
|= USB_CFG_BCE_
;
2030 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
2032 /* set FIFO sizes */
2033 buf
= (MAX_RX_FIFO_SIZE
- 512) / 512;
2034 ret
= lan78xx_write_reg(dev
, FCT_RX_FIFO_END
, buf
);
2036 buf
= (MAX_TX_FIFO_SIZE
- 512) / 512;
2037 ret
= lan78xx_write_reg(dev
, FCT_TX_FIFO_END
, buf
);
2039 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_CLEAR_ALL_
);
2040 ret
= lan78xx_write_reg(dev
, FLOW
, 0);
2041 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, 0);
2043 /* Don't need rfe_ctl_lock during initialisation */
2044 ret
= lan78xx_read_reg(dev
, RFE_CTL
, &pdata
->rfe_ctl
);
2045 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
| RFE_CTL_DA_PERFECT_
;
2046 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
2048 /* Enable or disable checksum offload engines */
2049 lan78xx_set_features(dev
->net
, dev
->net
->features
);
2051 lan78xx_set_multicast(dev
->net
);
2054 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
2055 buf
|= PMT_CTL_PHY_RST_
;
2056 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
2058 timeout
= jiffies
+ HZ
;
2061 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
2062 if (time_after(jiffies
, timeout
)) {
2063 netdev_warn(dev
->net
, "timeout waiting for PHY Reset");
2066 } while ((buf
& PMT_CTL_PHY_RST_
) || !(buf
& PMT_CTL_READY_
));
2068 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
2069 buf
|= MAC_CR_AUTO_DUPLEX_
| MAC_CR_AUTO_SPEED_
;
2070 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
2072 /* enable PHY interrupts */
2073 ret
= lan78xx_read_reg(dev
, INT_EP_CTL
, &buf
);
2074 buf
|= INT_ENP_PHY_INT
;
2075 ret
= lan78xx_write_reg(dev
, INT_EP_CTL
, buf
);
2077 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
2078 buf
|= MAC_TX_TXEN_
;
2079 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
2081 ret
= lan78xx_read_reg(dev
, FCT_TX_CTL
, &buf
);
2082 buf
|= FCT_TX_CTL_EN_
;
2083 ret
= lan78xx_write_reg(dev
, FCT_TX_CTL
, buf
);
2085 ret
= lan78xx_set_rx_max_frame_length(dev
, dev
->net
->mtu
+ ETH_HLEN
);
2087 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
2088 buf
|= MAC_RX_RXEN_
;
2089 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
2091 ret
= lan78xx_read_reg(dev
, FCT_RX_CTL
, &buf
);
2092 buf
|= FCT_RX_CTL_EN_
;
2093 ret
= lan78xx_write_reg(dev
, FCT_RX_CTL
, buf
);
2098 static int lan78xx_open(struct net_device
*net
)
2100 struct lan78xx_net
*dev
= netdev_priv(net
);
2103 ret
= usb_autopm_get_interface(dev
->intf
);
2107 ret
= lan78xx_reset(dev
);
2111 ret
= lan78xx_phy_init(dev
);
2115 /* for Link Check */
2116 if (dev
->urb_intr
) {
2117 ret
= usb_submit_urb(dev
->urb_intr
, GFP_KERNEL
);
2119 netif_err(dev
, ifup
, dev
->net
,
2120 "intr submit %d\n", ret
);
2125 set_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2127 netif_start_queue(net
);
2129 dev
->link_on
= false;
2131 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
2133 usb_autopm_put_interface(dev
->intf
);
2139 static void lan78xx_terminate_urbs(struct lan78xx_net
*dev
)
2141 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup
);
2142 DECLARE_WAITQUEUE(wait
, current
);
2145 /* ensure there are no more active urbs */
2146 add_wait_queue(&unlink_wakeup
, &wait
);
2147 set_current_state(TASK_UNINTERRUPTIBLE
);
2148 dev
->wait
= &unlink_wakeup
;
2149 temp
= unlink_urbs(dev
, &dev
->txq
) + unlink_urbs(dev
, &dev
->rxq
);
2151 /* maybe wait for deletions to finish. */
2152 while (!skb_queue_empty(&dev
->rxq
) &&
2153 !skb_queue_empty(&dev
->txq
) &&
2154 !skb_queue_empty(&dev
->done
)) {
2155 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS
));
2156 set_current_state(TASK_UNINTERRUPTIBLE
);
2157 netif_dbg(dev
, ifdown
, dev
->net
,
2158 "waited for %d urb completions\n", temp
);
2160 set_current_state(TASK_RUNNING
);
2162 remove_wait_queue(&unlink_wakeup
, &wait
);
2165 int lan78xx_stop(struct net_device
*net
)
2167 struct lan78xx_net
*dev
= netdev_priv(net
);
2169 phy_stop(net
->phydev
);
2170 phy_disconnect(net
->phydev
);
2173 clear_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2174 netif_stop_queue(net
);
2176 netif_info(dev
, ifdown
, dev
->net
,
2177 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2178 net
->stats
.rx_packets
, net
->stats
.tx_packets
,
2179 net
->stats
.rx_errors
, net
->stats
.tx_errors
);
2181 lan78xx_terminate_urbs(dev
);
2183 usb_kill_urb(dev
->urb_intr
);
2185 skb_queue_purge(&dev
->rxq_pause
);
2187 /* deferred work (task, timer, softirq) must also stop.
2188 * can't flush_scheduled_work() until we drop rtnl (later),
2189 * else workers could deadlock; so make workers a NOP.
2192 cancel_delayed_work_sync(&dev
->wq
);
2193 tasklet_kill(&dev
->bh
);
2195 usb_autopm_put_interface(dev
->intf
);
2200 static int lan78xx_linearize(struct sk_buff
*skb
)
2202 return skb_linearize(skb
);
2205 static struct sk_buff
*lan78xx_tx_prep(struct lan78xx_net
*dev
,
2206 struct sk_buff
*skb
, gfp_t flags
)
2208 u32 tx_cmd_a
, tx_cmd_b
;
2210 if (skb_headroom(skb
) < TX_OVERHEAD
) {
2211 struct sk_buff
*skb2
;
2213 skb2
= skb_copy_expand(skb
, TX_OVERHEAD
, 0, flags
);
2214 dev_kfree_skb_any(skb
);
2220 if (lan78xx_linearize(skb
) < 0)
2223 tx_cmd_a
= (u32
)(skb
->len
& TX_CMD_A_LEN_MASK_
) | TX_CMD_A_FCS_
;
2225 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2226 tx_cmd_a
|= TX_CMD_A_IPE_
| TX_CMD_A_TPE_
;
2229 if (skb_is_gso(skb
)) {
2230 u16 mss
= max(skb_shinfo(skb
)->gso_size
, TX_CMD_B_MSS_MIN_
);
2232 tx_cmd_b
= (mss
<< TX_CMD_B_MSS_SHIFT_
) & TX_CMD_B_MSS_MASK_
;
2234 tx_cmd_a
|= TX_CMD_A_LSO_
;
2237 if (skb_vlan_tag_present(skb
)) {
2238 tx_cmd_a
|= TX_CMD_A_IVTG_
;
2239 tx_cmd_b
|= skb_vlan_tag_get(skb
) & TX_CMD_B_VTAG_MASK_
;
2243 cpu_to_le32s(&tx_cmd_b
);
2244 memcpy(skb
->data
, &tx_cmd_b
, 4);
2247 cpu_to_le32s(&tx_cmd_a
);
2248 memcpy(skb
->data
, &tx_cmd_a
, 4);
2253 static enum skb_state
defer_bh(struct lan78xx_net
*dev
, struct sk_buff
*skb
,
2254 struct sk_buff_head
*list
, enum skb_state state
)
2256 unsigned long flags
;
2257 enum skb_state old_state
;
2258 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2260 spin_lock_irqsave(&list
->lock
, flags
);
2261 old_state
= entry
->state
;
2262 entry
->state
= state
;
2264 __skb_unlink(skb
, list
);
2265 spin_unlock(&list
->lock
);
2266 spin_lock(&dev
->done
.lock
);
2268 __skb_queue_tail(&dev
->done
, skb
);
2269 if (skb_queue_len(&dev
->done
) == 1)
2270 tasklet_schedule(&dev
->bh
);
2271 spin_unlock_irqrestore(&dev
->done
.lock
, flags
);
2276 static void tx_complete(struct urb
*urb
)
2278 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
2279 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2280 struct lan78xx_net
*dev
= entry
->dev
;
2282 if (urb
->status
== 0) {
2283 dev
->net
->stats
.tx_packets
++;
2284 dev
->net
->stats
.tx_bytes
+= entry
->length
;
2286 dev
->net
->stats
.tx_errors
++;
2288 switch (urb
->status
) {
2290 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
2293 /* software-driven interface shutdown */
2301 netif_stop_queue(dev
->net
);
2304 netif_dbg(dev
, tx_err
, dev
->net
,
2305 "tx err %d\n", entry
->urb
->status
);
2310 usb_autopm_put_interface_async(dev
->intf
);
2312 defer_bh(dev
, skb
, &dev
->txq
, tx_done
);
2315 static void lan78xx_queue_skb(struct sk_buff_head
*list
,
2316 struct sk_buff
*newsk
, enum skb_state state
)
2318 struct skb_data
*entry
= (struct skb_data
*)newsk
->cb
;
2320 __skb_queue_tail(list
, newsk
);
2321 entry
->state
= state
;
2324 netdev_tx_t
lan78xx_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
2326 struct lan78xx_net
*dev
= netdev_priv(net
);
2327 struct sk_buff
*skb2
= NULL
;
2330 skb_tx_timestamp(skb
);
2331 skb2
= lan78xx_tx_prep(dev
, skb
, GFP_ATOMIC
);
2335 skb_queue_tail(&dev
->txq_pend
, skb2
);
2337 /* throttle TX patch at slower than SUPER SPEED USB */
2338 if ((dev
->udev
->speed
< USB_SPEED_SUPER
) &&
2339 (skb_queue_len(&dev
->txq_pend
) > 10))
2340 netif_stop_queue(net
);
2342 netif_dbg(dev
, tx_err
, dev
->net
,
2343 "lan78xx_tx_prep return NULL\n");
2344 dev
->net
->stats
.tx_errors
++;
2345 dev
->net
->stats
.tx_dropped
++;
2348 tasklet_schedule(&dev
->bh
);
2350 return NETDEV_TX_OK
;
2353 int lan78xx_get_endpoints(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2356 struct usb_host_interface
*alt
= NULL
;
2357 struct usb_host_endpoint
*in
= NULL
, *out
= NULL
;
2358 struct usb_host_endpoint
*status
= NULL
;
2360 for (tmp
= 0; tmp
< intf
->num_altsetting
; tmp
++) {
2366 alt
= intf
->altsetting
+ tmp
;
2368 for (ep
= 0; ep
< alt
->desc
.bNumEndpoints
; ep
++) {
2369 struct usb_host_endpoint
*e
;
2372 e
= alt
->endpoint
+ ep
;
2373 switch (e
->desc
.bmAttributes
) {
2374 case USB_ENDPOINT_XFER_INT
:
2375 if (!usb_endpoint_dir_in(&e
->desc
))
2379 case USB_ENDPOINT_XFER_BULK
:
2384 if (usb_endpoint_dir_in(&e
->desc
)) {
2387 else if (intr
&& !status
)
2397 if (!alt
|| !in
|| !out
)
2400 dev
->pipe_in
= usb_rcvbulkpipe(dev
->udev
,
2401 in
->desc
.bEndpointAddress
&
2402 USB_ENDPOINT_NUMBER_MASK
);
2403 dev
->pipe_out
= usb_sndbulkpipe(dev
->udev
,
2404 out
->desc
.bEndpointAddress
&
2405 USB_ENDPOINT_NUMBER_MASK
);
2406 dev
->ep_intr
= status
;
2411 static int lan78xx_bind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2413 struct lan78xx_priv
*pdata
= NULL
;
2417 ret
= lan78xx_get_endpoints(dev
, intf
);
2419 dev
->data
[0] = (unsigned long)kzalloc(sizeof(*pdata
), GFP_KERNEL
);
2421 pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2423 netdev_warn(dev
->net
, "Unable to allocate lan78xx_priv");
2429 spin_lock_init(&pdata
->rfe_ctl_lock
);
2430 mutex_init(&pdata
->dataport_mutex
);
2432 INIT_WORK(&pdata
->set_multicast
, lan78xx_deferred_multicast_write
);
2434 for (i
= 0; i
< DP_SEL_VHF_VLAN_LEN
; i
++)
2435 pdata
->vlan_table
[i
] = 0;
2437 INIT_WORK(&pdata
->set_vlan
, lan78xx_deferred_vlan_write
);
2439 dev
->net
->features
= 0;
2441 if (DEFAULT_TX_CSUM_ENABLE
)
2442 dev
->net
->features
|= NETIF_F_HW_CSUM
;
2444 if (DEFAULT_RX_CSUM_ENABLE
)
2445 dev
->net
->features
|= NETIF_F_RXCSUM
;
2447 if (DEFAULT_TSO_CSUM_ENABLE
)
2448 dev
->net
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_SG
;
2450 dev
->net
->hw_features
= dev
->net
->features
;
2452 /* Init all registers */
2453 ret
= lan78xx_reset(dev
);
2455 lan78xx_mdio_init(dev
);
2457 dev
->net
->flags
|= IFF_MULTICAST
;
2459 pdata
->wol
= WAKE_MAGIC
;
2464 static void lan78xx_unbind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2466 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2468 lan78xx_remove_mdio(dev
);
2471 netif_dbg(dev
, ifdown
, dev
->net
, "free pdata");
2478 static void lan78xx_rx_csum_offload(struct lan78xx_net
*dev
,
2479 struct sk_buff
*skb
,
2480 u32 rx_cmd_a
, u32 rx_cmd_b
)
2482 if (!(dev
->net
->features
& NETIF_F_RXCSUM
) ||
2483 unlikely(rx_cmd_a
& RX_CMD_A_ICSM_
)) {
2484 skb
->ip_summed
= CHECKSUM_NONE
;
2486 skb
->csum
= ntohs((u16
)(rx_cmd_b
>> RX_CMD_B_CSUM_SHIFT_
));
2487 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2491 void lan78xx_skb_return(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2495 if (test_bit(EVENT_RX_PAUSED
, &dev
->flags
)) {
2496 skb_queue_tail(&dev
->rxq_pause
, skb
);
2500 skb
->protocol
= eth_type_trans(skb
, dev
->net
);
2501 dev
->net
->stats
.rx_packets
++;
2502 dev
->net
->stats
.rx_bytes
+= skb
->len
;
2504 netif_dbg(dev
, rx_status
, dev
->net
, "< rx, len %zu, type 0x%x\n",
2505 skb
->len
+ sizeof(struct ethhdr
), skb
->protocol
);
2506 memset(skb
->cb
, 0, sizeof(struct skb_data
));
2508 if (skb_defer_rx_timestamp(skb
))
2511 status
= netif_rx(skb
);
2512 if (status
!= NET_RX_SUCCESS
)
2513 netif_dbg(dev
, rx_err
, dev
->net
,
2514 "netif_rx status %d\n", status
);
2517 static int lan78xx_rx(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2519 if (skb
->len
< dev
->net
->hard_header_len
)
2522 while (skb
->len
> 0) {
2523 u32 rx_cmd_a
, rx_cmd_b
, align_count
, size
;
2525 struct sk_buff
*skb2
;
2526 unsigned char *packet
;
2528 memcpy(&rx_cmd_a
, skb
->data
, sizeof(rx_cmd_a
));
2529 le32_to_cpus(&rx_cmd_a
);
2530 skb_pull(skb
, sizeof(rx_cmd_a
));
2532 memcpy(&rx_cmd_b
, skb
->data
, sizeof(rx_cmd_b
));
2533 le32_to_cpus(&rx_cmd_b
);
2534 skb_pull(skb
, sizeof(rx_cmd_b
));
2536 memcpy(&rx_cmd_c
, skb
->data
, sizeof(rx_cmd_c
));
2537 le16_to_cpus(&rx_cmd_c
);
2538 skb_pull(skb
, sizeof(rx_cmd_c
));
2542 /* get the packet length */
2543 size
= (rx_cmd_a
& RX_CMD_A_LEN_MASK_
);
2544 align_count
= (4 - ((size
+ RXW_PADDING
) % 4)) % 4;
2546 if (unlikely(rx_cmd_a
& RX_CMD_A_RED_
)) {
2547 netif_dbg(dev
, rx_err
, dev
->net
,
2548 "Error rx_cmd_a=0x%08x", rx_cmd_a
);
2550 /* last frame in this batch */
2551 if (skb
->len
== size
) {
2552 lan78xx_rx_csum_offload(dev
, skb
,
2553 rx_cmd_a
, rx_cmd_b
);
2555 skb_trim(skb
, skb
->len
- 4); /* remove fcs */
2556 skb
->truesize
= size
+ sizeof(struct sk_buff
);
2561 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2562 if (unlikely(!skb2
)) {
2563 netdev_warn(dev
->net
, "Error allocating skb");
2568 skb2
->data
= packet
;
2569 skb_set_tail_pointer(skb2
, size
);
2571 lan78xx_rx_csum_offload(dev
, skb2
, rx_cmd_a
, rx_cmd_b
);
2573 skb_trim(skb2
, skb2
->len
- 4); /* remove fcs */
2574 skb2
->truesize
= size
+ sizeof(struct sk_buff
);
2576 lan78xx_skb_return(dev
, skb2
);
2579 skb_pull(skb
, size
);
2581 /* padding bytes before the next frame starts */
2583 skb_pull(skb
, align_count
);
2589 static inline void rx_process(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2591 if (!lan78xx_rx(dev
, skb
)) {
2592 dev
->net
->stats
.rx_errors
++;
2597 lan78xx_skb_return(dev
, skb
);
2601 netif_dbg(dev
, rx_err
, dev
->net
, "drop\n");
2602 dev
->net
->stats
.rx_errors
++;
2604 skb_queue_tail(&dev
->done
, skb
);
2607 static void rx_complete(struct urb
*urb
);
2609 static int rx_submit(struct lan78xx_net
*dev
, struct urb
*urb
, gfp_t flags
)
2611 struct sk_buff
*skb
;
2612 struct skb_data
*entry
;
2613 unsigned long lockflags
;
2614 size_t size
= dev
->rx_urb_size
;
2617 skb
= netdev_alloc_skb_ip_align(dev
->net
, size
);
2623 entry
= (struct skb_data
*)skb
->cb
;
2628 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_in
,
2629 skb
->data
, size
, rx_complete
, skb
);
2631 spin_lock_irqsave(&dev
->rxq
.lock
, lockflags
);
2633 if (netif_device_present(dev
->net
) &&
2634 netif_running(dev
->net
) &&
2635 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
2636 !test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
2637 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
2640 lan78xx_queue_skb(&dev
->rxq
, skb
, rx_start
);
2643 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
2646 netif_dbg(dev
, ifdown
, dev
->net
, "device gone\n");
2647 netif_device_detach(dev
->net
);
2653 netif_dbg(dev
, rx_err
, dev
->net
,
2654 "rx submit, %d\n", ret
);
2655 tasklet_schedule(&dev
->bh
);
2658 netif_dbg(dev
, ifdown
, dev
->net
, "rx: stopped\n");
2661 spin_unlock_irqrestore(&dev
->rxq
.lock
, lockflags
);
2663 dev_kfree_skb_any(skb
);
2669 static void rx_complete(struct urb
*urb
)
2671 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
2672 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2673 struct lan78xx_net
*dev
= entry
->dev
;
2674 int urb_status
= urb
->status
;
2675 enum skb_state state
;
2677 skb_put(skb
, urb
->actual_length
);
2681 switch (urb_status
) {
2683 if (skb
->len
< dev
->net
->hard_header_len
) {
2685 dev
->net
->stats
.rx_errors
++;
2686 dev
->net
->stats
.rx_length_errors
++;
2687 netif_dbg(dev
, rx_err
, dev
->net
,
2688 "rx length %d\n", skb
->len
);
2690 usb_mark_last_busy(dev
->udev
);
2693 dev
->net
->stats
.rx_errors
++;
2694 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
2696 case -ECONNRESET
: /* async unlink */
2697 case -ESHUTDOWN
: /* hardware gone */
2698 netif_dbg(dev
, ifdown
, dev
->net
,
2699 "rx shutdown, code %d\n", urb_status
);
2707 dev
->net
->stats
.rx_errors
++;
2713 /* data overrun ... flush fifo? */
2715 dev
->net
->stats
.rx_over_errors
++;
2720 dev
->net
->stats
.rx_errors
++;
2721 netif_dbg(dev
, rx_err
, dev
->net
, "rx status %d\n", urb_status
);
2725 state
= defer_bh(dev
, skb
, &dev
->rxq
, state
);
2728 if (netif_running(dev
->net
) &&
2729 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
2730 state
!= unlink_start
) {
2731 rx_submit(dev
, urb
, GFP_ATOMIC
);
2736 netif_dbg(dev
, rx_err
, dev
->net
, "no read resubmitted\n");
2739 static void lan78xx_tx_bh(struct lan78xx_net
*dev
)
2742 struct urb
*urb
= NULL
;
2743 struct skb_data
*entry
;
2744 unsigned long flags
;
2745 struct sk_buff_head
*tqp
= &dev
->txq_pend
;
2746 struct sk_buff
*skb
, *skb2
;
2749 int skb_totallen
, pkt_cnt
;
2753 for (skb
= tqp
->next
; pkt_cnt
< tqp
->qlen
; skb
= skb
->next
) {
2754 if (skb_is_gso(skb
)) {
2756 /* handle previous packets first */
2760 skb2
= skb_dequeue(tqp
);
2764 if ((skb_totallen
+ skb
->len
) > MAX_SINGLE_PACKET_SIZE
)
2766 skb_totallen
= skb
->len
+ roundup(skb_totallen
, sizeof(u32
));
2770 /* copy to a single skb */
2771 skb
= alloc_skb(skb_totallen
, GFP_ATOMIC
);
2775 skb_put(skb
, skb_totallen
);
2777 for (count
= pos
= 0; count
< pkt_cnt
; count
++) {
2778 skb2
= skb_dequeue(tqp
);
2780 memcpy(skb
->data
+ pos
, skb2
->data
, skb2
->len
);
2781 pos
+= roundup(skb2
->len
, sizeof(u32
));
2782 dev_kfree_skb(skb2
);
2786 length
= skb_totallen
;
2789 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
2791 netif_dbg(dev
, tx_err
, dev
->net
, "no urb\n");
2795 entry
= (struct skb_data
*)skb
->cb
;
2798 entry
->length
= length
;
2800 spin_lock_irqsave(&dev
->txq
.lock
, flags
);
2801 ret
= usb_autopm_get_interface_async(dev
->intf
);
2803 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2807 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_out
,
2808 skb
->data
, skb
->len
, tx_complete
, skb
);
2810 if (length
% dev
->maxpacket
== 0) {
2811 /* send USB_ZERO_PACKET */
2812 urb
->transfer_flags
|= URB_ZERO_PACKET
;
2816 /* if this triggers the device is still a sleep */
2817 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
2818 /* transmission will be done in resume */
2819 usb_anchor_urb(urb
, &dev
->deferred
);
2820 /* no use to process more packets */
2821 netif_stop_queue(dev
->net
);
2823 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2824 netdev_dbg(dev
->net
, "Delaying transmission for resumption\n");
2829 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
2832 dev
->net
->trans_start
= jiffies
;
2833 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
2834 if (skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
)
2835 netif_stop_queue(dev
->net
);
2838 netif_stop_queue(dev
->net
);
2839 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
2840 usb_autopm_put_interface_async(dev
->intf
);
2843 usb_autopm_put_interface_async(dev
->intf
);
2844 netif_dbg(dev
, tx_err
, dev
->net
,
2845 "tx: submit urb err %d\n", ret
);
2849 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2852 netif_dbg(dev
, tx_err
, dev
->net
, "drop, code %d\n", ret
);
2854 dev
->net
->stats
.tx_dropped
++;
2856 dev_kfree_skb_any(skb
);
2859 netif_dbg(dev
, tx_queued
, dev
->net
,
2860 "> tx, len %d, type 0x%x\n", length
, skb
->protocol
);
2863 static void lan78xx_rx_bh(struct lan78xx_net
*dev
)
2868 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
) {
2869 for (i
= 0; i
< 10; i
++) {
2870 if (skb_queue_len(&dev
->rxq
) >= dev
->rx_qlen
)
2872 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
2874 if (rx_submit(dev
, urb
, GFP_ATOMIC
) == -ENOLINK
)
2878 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
)
2879 tasklet_schedule(&dev
->bh
);
2881 if (skb_queue_len(&dev
->txq
) < dev
->tx_qlen
)
2882 netif_wake_queue(dev
->net
);
2885 static void lan78xx_bh(unsigned long param
)
2887 struct lan78xx_net
*dev
= (struct lan78xx_net
*)param
;
2888 struct sk_buff
*skb
;
2889 struct skb_data
*entry
;
2891 while ((skb
= skb_dequeue(&dev
->done
))) {
2892 entry
= (struct skb_data
*)(skb
->cb
);
2893 switch (entry
->state
) {
2895 entry
->state
= rx_cleanup
;
2896 rx_process(dev
, skb
);
2899 usb_free_urb(entry
->urb
);
2903 usb_free_urb(entry
->urb
);
2907 netdev_dbg(dev
->net
, "skb state %d\n", entry
->state
);
2912 if (netif_device_present(dev
->net
) && netif_running(dev
->net
)) {
2913 if (!skb_queue_empty(&dev
->txq_pend
))
2916 if (!timer_pending(&dev
->delay
) &&
2917 !test_bit(EVENT_RX_HALT
, &dev
->flags
))
2922 static void lan78xx_delayedwork(struct work_struct
*work
)
2925 struct lan78xx_net
*dev
;
2927 dev
= container_of(work
, struct lan78xx_net
, wq
.work
);
2929 if (test_bit(EVENT_TX_HALT
, &dev
->flags
)) {
2930 unlink_urbs(dev
, &dev
->txq
);
2931 status
= usb_autopm_get_interface(dev
->intf
);
2934 status
= usb_clear_halt(dev
->udev
, dev
->pipe_out
);
2935 usb_autopm_put_interface(dev
->intf
);
2938 status
!= -ESHUTDOWN
) {
2939 if (netif_msg_tx_err(dev
))
2941 netdev_err(dev
->net
,
2942 "can't clear tx halt, status %d\n",
2945 clear_bit(EVENT_TX_HALT
, &dev
->flags
);
2946 if (status
!= -ESHUTDOWN
)
2947 netif_wake_queue(dev
->net
);
2950 if (test_bit(EVENT_RX_HALT
, &dev
->flags
)) {
2951 unlink_urbs(dev
, &dev
->rxq
);
2952 status
= usb_autopm_get_interface(dev
->intf
);
2955 status
= usb_clear_halt(dev
->udev
, dev
->pipe_in
);
2956 usb_autopm_put_interface(dev
->intf
);
2959 status
!= -ESHUTDOWN
) {
2960 if (netif_msg_rx_err(dev
))
2962 netdev_err(dev
->net
,
2963 "can't clear rx halt, status %d\n",
2966 clear_bit(EVENT_RX_HALT
, &dev
->flags
);
2967 tasklet_schedule(&dev
->bh
);
2971 if (test_bit(EVENT_LINK_RESET
, &dev
->flags
)) {
2974 clear_bit(EVENT_LINK_RESET
, &dev
->flags
);
2975 status
= usb_autopm_get_interface(dev
->intf
);
2978 if (lan78xx_link_reset(dev
) < 0) {
2979 usb_autopm_put_interface(dev
->intf
);
2981 netdev_info(dev
->net
, "link reset failed (%d)\n",
2984 usb_autopm_put_interface(dev
->intf
);
2989 static void intr_complete(struct urb
*urb
)
2991 struct lan78xx_net
*dev
= urb
->context
;
2992 int status
= urb
->status
;
2997 lan78xx_status(dev
, urb
);
3000 /* software-driven interface shutdown */
3001 case -ENOENT
: /* urb killed */
3002 case -ESHUTDOWN
: /* hardware gone */
3003 netif_dbg(dev
, ifdown
, dev
->net
,
3004 "intr shutdown, code %d\n", status
);
3007 /* NOTE: not throttling like RX/TX, since this endpoint
3008 * already polls infrequently
3011 netdev_dbg(dev
->net
, "intr status %d\n", status
);
3015 if (!netif_running(dev
->net
))
3018 memset(urb
->transfer_buffer
, 0, urb
->transfer_buffer_length
);
3019 status
= usb_submit_urb(urb
, GFP_ATOMIC
);
3021 netif_err(dev
, timer
, dev
->net
,
3022 "intr resubmit --> %d\n", status
);
3025 static void lan78xx_disconnect(struct usb_interface
*intf
)
3027 struct lan78xx_net
*dev
;
3028 struct usb_device
*udev
;
3029 struct net_device
*net
;
3031 dev
= usb_get_intfdata(intf
);
3032 usb_set_intfdata(intf
, NULL
);
3036 udev
= interface_to_usbdev(intf
);
3039 unregister_netdev(net
);
3041 cancel_delayed_work_sync(&dev
->wq
);
3043 usb_scuttle_anchored_urbs(&dev
->deferred
);
3045 lan78xx_unbind(dev
, intf
);
3047 usb_kill_urb(dev
->urb_intr
);
3048 usb_free_urb(dev
->urb_intr
);
3054 void lan78xx_tx_timeout(struct net_device
*net
)
3056 struct lan78xx_net
*dev
= netdev_priv(net
);
3058 unlink_urbs(dev
, &dev
->txq
);
3059 tasklet_schedule(&dev
->bh
);
3062 static const struct net_device_ops lan78xx_netdev_ops
= {
3063 .ndo_open
= lan78xx_open
,
3064 .ndo_stop
= lan78xx_stop
,
3065 .ndo_start_xmit
= lan78xx_start_xmit
,
3066 .ndo_tx_timeout
= lan78xx_tx_timeout
,
3067 .ndo_change_mtu
= lan78xx_change_mtu
,
3068 .ndo_set_mac_address
= lan78xx_set_mac_addr
,
3069 .ndo_validate_addr
= eth_validate_addr
,
3070 .ndo_do_ioctl
= lan78xx_ioctl
,
3071 .ndo_set_rx_mode
= lan78xx_set_multicast
,
3072 .ndo_set_features
= lan78xx_set_features
,
3073 .ndo_vlan_rx_add_vid
= lan78xx_vlan_rx_add_vid
,
3074 .ndo_vlan_rx_kill_vid
= lan78xx_vlan_rx_kill_vid
,
3077 static int lan78xx_probe(struct usb_interface
*intf
,
3078 const struct usb_device_id
*id
)
3080 struct lan78xx_net
*dev
;
3081 struct net_device
*netdev
;
3082 struct usb_device
*udev
;
3088 udev
= interface_to_usbdev(intf
);
3089 udev
= usb_get_dev(udev
);
3092 netdev
= alloc_etherdev(sizeof(struct lan78xx_net
));
3094 dev_err(&intf
->dev
, "Error: OOM\n");
3098 /* netdev_printk() needs this */
3099 SET_NETDEV_DEV(netdev
, &intf
->dev
);
3101 dev
= netdev_priv(netdev
);
3105 dev
->msg_enable
= netif_msg_init(msg_level
, NETIF_MSG_DRV
3106 | NETIF_MSG_PROBE
| NETIF_MSG_LINK
);
3108 skb_queue_head_init(&dev
->rxq
);
3109 skb_queue_head_init(&dev
->txq
);
3110 skb_queue_head_init(&dev
->done
);
3111 skb_queue_head_init(&dev
->rxq_pause
);
3112 skb_queue_head_init(&dev
->txq_pend
);
3113 mutex_init(&dev
->phy_mutex
);
3115 tasklet_init(&dev
->bh
, lan78xx_bh
, (unsigned long)dev
);
3116 INIT_DELAYED_WORK(&dev
->wq
, lan78xx_delayedwork
);
3117 init_usb_anchor(&dev
->deferred
);
3119 netdev
->netdev_ops
= &lan78xx_netdev_ops
;
3120 netdev
->watchdog_timeo
= TX_TIMEOUT_JIFFIES
;
3121 netdev
->ethtool_ops
= &lan78xx_ethtool_ops
;
3123 ret
= lan78xx_bind(dev
, intf
);
3126 strcpy(netdev
->name
, "eth%d");
3128 if (netdev
->mtu
> (dev
->hard_mtu
- netdev
->hard_header_len
))
3129 netdev
->mtu
= dev
->hard_mtu
- netdev
->hard_header_len
;
3131 dev
->ep_blkin
= (intf
->cur_altsetting
)->endpoint
+ 0;
3132 dev
->ep_blkout
= (intf
->cur_altsetting
)->endpoint
+ 1;
3133 dev
->ep_intr
= (intf
->cur_altsetting
)->endpoint
+ 2;
3135 dev
->pipe_in
= usb_rcvbulkpipe(udev
, BULK_IN_PIPE
);
3136 dev
->pipe_out
= usb_sndbulkpipe(udev
, BULK_OUT_PIPE
);
3138 dev
->pipe_intr
= usb_rcvintpipe(dev
->udev
,
3139 dev
->ep_intr
->desc
.bEndpointAddress
&
3140 USB_ENDPOINT_NUMBER_MASK
);
3141 period
= dev
->ep_intr
->desc
.bInterval
;
3143 maxp
= usb_maxpacket(dev
->udev
, dev
->pipe_intr
, 0);
3144 buf
= kmalloc(maxp
, GFP_KERNEL
);
3146 dev
->urb_intr
= usb_alloc_urb(0, GFP_KERNEL
);
3147 if (!dev
->urb_intr
) {
3151 usb_fill_int_urb(dev
->urb_intr
, dev
->udev
,
3152 dev
->pipe_intr
, buf
, maxp
,
3153 intr_complete
, dev
, period
);
3157 dev
->maxpacket
= usb_maxpacket(dev
->udev
, dev
->pipe_out
, 1);
3159 /* driver requires remote-wakeup capability during autosuspend. */
3160 intf
->needs_remote_wakeup
= 1;
3162 ret
= register_netdev(netdev
);
3164 netif_err(dev
, probe
, netdev
, "couldn't register the device\n");
3168 usb_set_intfdata(intf
, dev
);
3170 ret
= device_set_wakeup_enable(&udev
->dev
, true);
3172 /* Default delay of 2sec has more overhead than advantage.
3173 * Set to 10sec as default.
3175 pm_runtime_set_autosuspend_delay(&udev
->dev
,
3176 DEFAULT_AUTOSUSPEND_DELAY
);
3181 lan78xx_unbind(dev
, intf
);
3183 free_netdev(netdev
);
3190 static u16
lan78xx_wakeframe_crc16(const u8
*buf
, int len
)
3192 const u16 crc16poly
= 0x8005;
3198 for (i
= 0; i
< len
; i
++) {
3200 for (bit
= 0; bit
< 8; bit
++) {
3204 if (msb
^ (u16
)(data
& 1)) {
3206 crc
|= (u16
)0x0001U
;
3215 static int lan78xx_set_suspend(struct lan78xx_net
*dev
, u32 wol
)
3223 const u8 ipv4_multicast
[3] = { 0x01, 0x00, 0x5E };
3224 const u8 ipv6_multicast
[3] = { 0x33, 0x33 };
3225 const u8 arp_type
[2] = { 0x08, 0x06 };
3227 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3228 buf
&= ~MAC_TX_TXEN_
;
3229 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3230 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3231 buf
&= ~MAC_RX_RXEN_
;
3232 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3234 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3235 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3236 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3241 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &temp_pmt_ctl
);
3242 temp_pmt_ctl
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3243 temp_pmt_ctl
|= PMT_CTL_RES_CLR_WKP_STS_
;
3245 for (mask_index
= 0; mask_index
< NUM_OF_WUF_CFG
; mask_index
++)
3246 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
), 0);
3249 if (wol
& WAKE_PHY
) {
3250 temp_pmt_ctl
|= PMT_CTL_PHY_WAKE_EN_
;
3252 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3253 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3254 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3256 if (wol
& WAKE_MAGIC
) {
3257 temp_wucsr
|= WUCSR_MPEN_
;
3259 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3260 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3261 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_3_
;
3263 if (wol
& WAKE_BCAST
) {
3264 temp_wucsr
|= WUCSR_BCST_EN_
;
3266 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3267 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3268 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3270 if (wol
& WAKE_MCAST
) {
3271 temp_wucsr
|= WUCSR_WAKE_EN_
;
3273 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3274 crc
= lan78xx_wakeframe_crc16(ipv4_multicast
, 3);
3275 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3277 WUF_CFGX_TYPE_MCAST_
|
3278 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3279 (crc
& WUF_CFGX_CRC16_MASK_
));
3281 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 7);
3282 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3283 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3284 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3287 /* for IPv6 Multicast */
3288 crc
= lan78xx_wakeframe_crc16(ipv6_multicast
, 2);
3289 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3291 WUF_CFGX_TYPE_MCAST_
|
3292 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3293 (crc
& WUF_CFGX_CRC16_MASK_
));
3295 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 3);
3296 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3297 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3298 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3301 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3302 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3303 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3305 if (wol
& WAKE_UCAST
) {
3306 temp_wucsr
|= WUCSR_PFDA_EN_
;
3308 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3309 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3310 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3312 if (wol
& WAKE_ARP
) {
3313 temp_wucsr
|= WUCSR_WAKE_EN_
;
3315 /* set WUF_CFG & WUF_MASK
3316 * for packettype (offset 12,13) = ARP (0x0806)
3318 crc
= lan78xx_wakeframe_crc16(arp_type
, 2);
3319 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3321 WUF_CFGX_TYPE_ALL_
|
3322 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3323 (crc
& WUF_CFGX_CRC16_MASK_
));
3325 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 0x3000);
3326 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3327 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3328 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3331 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3332 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3333 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3336 ret
= lan78xx_write_reg(dev
, WUCSR
, temp_wucsr
);
3338 /* when multiple WOL bits are set */
3339 if (hweight_long((unsigned long)wol
) > 1) {
3340 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3341 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3342 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3344 ret
= lan78xx_write_reg(dev
, PMT_CTL
, temp_pmt_ctl
);
3347 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3348 buf
|= PMT_CTL_WUPS_MASK_
;
3349 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3351 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3352 buf
|= MAC_RX_RXEN_
;
3353 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3358 int lan78xx_suspend(struct usb_interface
*intf
, pm_message_t message
)
3360 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3361 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
3366 event
= message
.event
;
3368 if (!dev
->suspend_count
++) {
3369 spin_lock_irq(&dev
->txq
.lock
);
3370 /* don't autosuspend while transmitting */
3371 if ((skb_queue_len(&dev
->txq
) ||
3372 skb_queue_len(&dev
->txq_pend
)) &&
3373 PMSG_IS_AUTO(message
)) {
3374 spin_unlock_irq(&dev
->txq
.lock
);
3378 set_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
3379 spin_unlock_irq(&dev
->txq
.lock
);
3383 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3384 buf
&= ~MAC_TX_TXEN_
;
3385 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3386 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3387 buf
&= ~MAC_RX_RXEN_
;
3388 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3390 /* empty out the rx and queues */
3391 netif_device_detach(dev
->net
);
3392 lan78xx_terminate_urbs(dev
);
3393 usb_kill_urb(dev
->urb_intr
);
3396 netif_device_attach(dev
->net
);
3399 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
3400 if (PMSG_IS_AUTO(message
)) {
3401 /* auto suspend (selective suspend) */
3402 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3403 buf
&= ~MAC_TX_TXEN_
;
3404 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3405 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3406 buf
&= ~MAC_RX_RXEN_
;
3407 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3409 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3410 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3411 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3413 /* set goodframe wakeup */
3414 ret
= lan78xx_read_reg(dev
, WUCSR
, &buf
);
3416 buf
|= WUCSR_RFE_WAKE_EN_
;
3417 buf
|= WUCSR_STORE_WAKE_
;
3419 ret
= lan78xx_write_reg(dev
, WUCSR
, buf
);
3421 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3423 buf
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3424 buf
|= PMT_CTL_RES_CLR_WKP_STS_
;
3426 buf
|= PMT_CTL_PHY_WAKE_EN_
;
3427 buf
|= PMT_CTL_WOL_EN_
;
3428 buf
&= ~PMT_CTL_SUS_MODE_MASK_
;
3429 buf
|= PMT_CTL_SUS_MODE_3_
;
3431 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3433 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3435 buf
|= PMT_CTL_WUPS_MASK_
;
3437 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3439 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3440 buf
|= MAC_RX_RXEN_
;
3441 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3443 lan78xx_set_suspend(dev
, pdata
->wol
);
3452 int lan78xx_resume(struct usb_interface
*intf
)
3454 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3455 struct sk_buff
*skb
;
3460 if (!--dev
->suspend_count
) {
3461 /* resume interrupt URBs */
3462 if (dev
->urb_intr
&& test_bit(EVENT_DEV_OPEN
, &dev
->flags
))
3463 usb_submit_urb(dev
->urb_intr
, GFP_NOIO
);
3465 spin_lock_irq(&dev
->txq
.lock
);
3466 while ((res
= usb_get_from_anchor(&dev
->deferred
))) {
3467 skb
= (struct sk_buff
*)res
->context
;
3468 ret
= usb_submit_urb(res
, GFP_ATOMIC
);
3470 dev_kfree_skb_any(skb
);
3472 usb_autopm_put_interface_async(dev
->intf
);
3474 dev
->net
->trans_start
= jiffies
;
3475 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
3479 clear_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
3480 spin_unlock_irq(&dev
->txq
.lock
);
3482 if (test_bit(EVENT_DEV_OPEN
, &dev
->flags
)) {
3483 if (!(skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
))
3484 netif_start_queue(dev
->net
);
3485 tasklet_schedule(&dev
->bh
);
3489 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3490 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3491 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3493 ret
= lan78xx_write_reg(dev
, WUCSR2
, WUCSR2_NS_RCD_
|
3495 WUCSR2_IPV6_TCPSYN_RCD_
|
3496 WUCSR2_IPV4_TCPSYN_RCD_
);
3498 ret
= lan78xx_write_reg(dev
, WUCSR
, WUCSR_EEE_TX_WAKE_
|
3499 WUCSR_EEE_RX_WAKE_
|
3501 WUCSR_RFE_WAKE_FR_
|
3506 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3507 buf
|= MAC_TX_TXEN_
;
3508 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3513 int lan78xx_reset_resume(struct usb_interface
*intf
)
3515 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3519 lan78xx_phy_init(dev
);
3521 return lan78xx_resume(intf
);
3524 static const struct usb_device_id products
[] = {
3526 /* LAN7800 USB Gigabit Ethernet Device */
3527 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7800_USB_PRODUCT_ID
),
3530 /* LAN7850 USB Gigabit Ethernet Device */
3531 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7850_USB_PRODUCT_ID
),
3535 MODULE_DEVICE_TABLE(usb
, products
);
3537 static struct usb_driver lan78xx_driver
= {
3538 .name
= DRIVER_NAME
,
3539 .id_table
= products
,
3540 .probe
= lan78xx_probe
,
3541 .disconnect
= lan78xx_disconnect
,
3542 .suspend
= lan78xx_suspend
,
3543 .resume
= lan78xx_resume
,
3544 .reset_resume
= lan78xx_reset_resume
,
3545 .supports_autosuspend
= 1,
3546 .disable_hub_initiated_lpm
= 1,
3549 module_usb_driver(lan78xx_driver
);
3551 MODULE_AUTHOR(DRIVER_AUTHOR
);
3552 MODULE_DESCRIPTION(DRIVER_DESC
);
3553 MODULE_LICENSE("GPL");