lan78xx: change to use updated phy-ignore-interrupts
[deliverable/linux.git] / drivers / net / usb / lan78xx.c
CommitLineData
55d7de9d
WH
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
55d7de9d
WH
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
32#include <net/ip6_checksum.h>
bdfba55e 33#include <linux/microchipphy.h>
55d7de9d
WH
34#include "lan78xx.h"
35
36#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38#define DRIVER_NAME "lan78xx"
e4953910 39#define DRIVER_VERSION "1.0.2"
55d7de9d
WH
40
41#define TX_TIMEOUT_JIFFIES (5 * HZ)
42#define THROTTLE_JIFFIES (HZ / 8)
43#define UNLINK_TIMEOUT_MS 3
44
45#define RX_MAX_QUEUE_MEMORY (60 * 1518)
46
47#define SS_USB_PKT_SIZE (1024)
48#define HS_USB_PKT_SIZE (512)
49#define FS_USB_PKT_SIZE (64)
50
51#define MAX_RX_FIFO_SIZE (12 * 1024)
52#define MAX_TX_FIFO_SIZE (12 * 1024)
53#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54#define DEFAULT_BULK_IN_DELAY (0x0800)
55#define MAX_SINGLE_PACKET_SIZE (9000)
56#define DEFAULT_TX_CSUM_ENABLE (true)
57#define DEFAULT_RX_CSUM_ENABLE (true)
58#define DEFAULT_TSO_CSUM_ENABLE (true)
59#define DEFAULT_VLAN_FILTER_ENABLE (true)
55d7de9d
WH
60#define TX_OVERHEAD (8)
61#define RXW_PADDING 2
62
63#define LAN78XX_USB_VENDOR_ID (0x0424)
64#define LAN7800_USB_PRODUCT_ID (0x7800)
65#define LAN7850_USB_PRODUCT_ID (0x7850)
66#define LAN78XX_EEPROM_MAGIC (0x78A5)
67#define LAN78XX_OTP_MAGIC (0x78F3)
68
69#define MII_READ 1
70#define MII_WRITE 0
71
72#define EEPROM_INDICATOR (0xA5)
73#define EEPROM_MAC_OFFSET (0x01)
74#define MAX_EEPROM_SIZE 512
75#define OTP_INDICATOR_1 (0xF3)
76#define OTP_INDICATOR_2 (0xF7)
77
78#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
81
82/* USB related defines */
83#define BULK_IN_PIPE 1
84#define BULK_OUT_PIPE 2
85
86/* default autosuspend delay (mSec)*/
87#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
88
89static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
90 "RX FCS Errors",
91 "RX Alignment Errors",
92 "Rx Fragment Errors",
93 "RX Jabber Errors",
94 "RX Undersize Frame Errors",
95 "RX Oversize Frame Errors",
96 "RX Dropped Frames",
97 "RX Unicast Byte Count",
98 "RX Broadcast Byte Count",
99 "RX Multicast Byte Count",
100 "RX Unicast Frames",
101 "RX Broadcast Frames",
102 "RX Multicast Frames",
103 "RX Pause Frames",
104 "RX 64 Byte Frames",
105 "RX 65 - 127 Byte Frames",
106 "RX 128 - 255 Byte Frames",
107 "RX 256 - 511 Bytes Frames",
108 "RX 512 - 1023 Byte Frames",
109 "RX 1024 - 1518 Byte Frames",
110 "RX Greater 1518 Byte Frames",
111 "EEE RX LPI Transitions",
112 "EEE RX LPI Time",
113 "TX FCS Errors",
114 "TX Excess Deferral Errors",
115 "TX Carrier Errors",
116 "TX Bad Byte Count",
117 "TX Single Collisions",
118 "TX Multiple Collisions",
119 "TX Excessive Collision",
120 "TX Late Collisions",
121 "TX Unicast Byte Count",
122 "TX Broadcast Byte Count",
123 "TX Multicast Byte Count",
124 "TX Unicast Frames",
125 "TX Broadcast Frames",
126 "TX Multicast Frames",
127 "TX Pause Frames",
128 "TX 64 Byte Frames",
129 "TX 65 - 127 Byte Frames",
130 "TX 128 - 255 Byte Frames",
131 "TX 256 - 511 Bytes Frames",
132 "TX 512 - 1023 Byte Frames",
133 "TX 1024 - 1518 Byte Frames",
134 "TX Greater 1518 Byte Frames",
135 "EEE TX LPI Transitions",
136 "EEE TX LPI Time",
137};
138
139struct lan78xx_statstage {
140 u32 rx_fcs_errors;
141 u32 rx_alignment_errors;
142 u32 rx_fragment_errors;
143 u32 rx_jabber_errors;
144 u32 rx_undersize_frame_errors;
145 u32 rx_oversize_frame_errors;
146 u32 rx_dropped_frames;
147 u32 rx_unicast_byte_count;
148 u32 rx_broadcast_byte_count;
149 u32 rx_multicast_byte_count;
150 u32 rx_unicast_frames;
151 u32 rx_broadcast_frames;
152 u32 rx_multicast_frames;
153 u32 rx_pause_frames;
154 u32 rx_64_byte_frames;
155 u32 rx_65_127_byte_frames;
156 u32 rx_128_255_byte_frames;
157 u32 rx_256_511_bytes_frames;
158 u32 rx_512_1023_byte_frames;
159 u32 rx_1024_1518_byte_frames;
160 u32 rx_greater_1518_byte_frames;
161 u32 eee_rx_lpi_transitions;
162 u32 eee_rx_lpi_time;
163 u32 tx_fcs_errors;
164 u32 tx_excess_deferral_errors;
165 u32 tx_carrier_errors;
166 u32 tx_bad_byte_count;
167 u32 tx_single_collisions;
168 u32 tx_multiple_collisions;
169 u32 tx_excessive_collision;
170 u32 tx_late_collisions;
171 u32 tx_unicast_byte_count;
172 u32 tx_broadcast_byte_count;
173 u32 tx_multicast_byte_count;
174 u32 tx_unicast_frames;
175 u32 tx_broadcast_frames;
176 u32 tx_multicast_frames;
177 u32 tx_pause_frames;
178 u32 tx_64_byte_frames;
179 u32 tx_65_127_byte_frames;
180 u32 tx_128_255_byte_frames;
181 u32 tx_256_511_bytes_frames;
182 u32 tx_512_1023_byte_frames;
183 u32 tx_1024_1518_byte_frames;
184 u32 tx_greater_1518_byte_frames;
185 u32 eee_tx_lpi_transitions;
186 u32 eee_tx_lpi_time;
187};
188
189struct lan78xx_net;
190
191struct lan78xx_priv {
192 struct lan78xx_net *dev;
193 u32 rfe_ctl;
194 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
195 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
196 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
197 struct mutex dataport_mutex; /* for dataport access */
198 spinlock_t rfe_ctl_lock; /* for rfe register access */
199 struct work_struct set_multicast;
200 struct work_struct set_vlan;
201 u32 wol;
202};
203
204enum skb_state {
205 illegal = 0,
206 tx_start,
207 tx_done,
208 rx_start,
209 rx_done,
210 rx_cleanup,
211 unlink_start
212};
213
214struct skb_data { /* skb->cb is one of these */
215 struct urb *urb;
216 struct lan78xx_net *dev;
217 enum skb_state state;
218 size_t length;
219};
220
221struct usb_context {
222 struct usb_ctrlrequest req;
223 struct lan78xx_net *dev;
224};
225
226#define EVENT_TX_HALT 0
227#define EVENT_RX_HALT 1
228#define EVENT_RX_MEMORY 2
229#define EVENT_STS_SPLIT 3
230#define EVENT_LINK_RESET 4
231#define EVENT_RX_PAUSED 5
232#define EVENT_DEV_WAKING 6
233#define EVENT_DEV_ASLEEP 7
234#define EVENT_DEV_OPEN 8
235
236struct lan78xx_net {
237 struct net_device *net;
238 struct usb_device *udev;
239 struct usb_interface *intf;
240 void *driver_priv;
241
242 int rx_qlen;
243 int tx_qlen;
244 struct sk_buff_head rxq;
245 struct sk_buff_head txq;
246 struct sk_buff_head done;
247 struct sk_buff_head rxq_pause;
248 struct sk_buff_head txq_pend;
249
250 struct tasklet_struct bh;
251 struct delayed_work wq;
252
253 struct usb_host_endpoint *ep_blkin;
254 struct usb_host_endpoint *ep_blkout;
255 struct usb_host_endpoint *ep_intr;
256
257 int msg_enable;
258
259 struct urb *urb_intr;
260 struct usb_anchor deferred;
261
262 struct mutex phy_mutex; /* for phy access */
263 unsigned pipe_in, pipe_out, pipe_intr;
264
265 u32 hard_mtu; /* count any extra framing */
266 size_t rx_urb_size; /* size for rx urbs */
267
268 unsigned long flags;
269
270 wait_queue_head_t *wait;
271 unsigned char suspend_count;
272
273 unsigned maxpacket;
274 struct timer_list delay;
275
276 unsigned long data[5];
55d7de9d
WH
277
278 int link_on;
279 u8 mdix_ctrl;
ce85e13a
WH
280
281 u32 devid;
282 struct mii_bus *mdiobus;
55d7de9d
WH
283};
284
285/* use ethtool to change the level for any given device */
286static int msg_level = -1;
287module_param(msg_level, int, 0);
288MODULE_PARM_DESC(msg_level, "Override default message level");
289
290static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
291{
292 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
293 int ret;
294
55d7de9d
WH
295 if (!buf)
296 return -ENOMEM;
297
298 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
299 USB_VENDOR_REQUEST_READ_REGISTER,
300 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
301 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
302 if (likely(ret >= 0)) {
303 le32_to_cpus(buf);
304 *data = *buf;
305 } else {
306 netdev_warn(dev->net,
307 "Failed to read register index 0x%08x. ret = %d",
308 index, ret);
309 }
310
311 kfree(buf);
312
313 return ret;
314}
315
316static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
317{
318 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
319 int ret;
320
55d7de9d
WH
321 if (!buf)
322 return -ENOMEM;
323
324 *buf = data;
325 cpu_to_le32s(buf);
326
327 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
328 USB_VENDOR_REQUEST_WRITE_REGISTER,
329 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
330 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
331 if (unlikely(ret < 0)) {
332 netdev_warn(dev->net,
333 "Failed to write register index 0x%08x. ret = %d",
334 index, ret);
335 }
336
337 kfree(buf);
338
339 return ret;
340}
341
342static int lan78xx_read_stats(struct lan78xx_net *dev,
343 struct lan78xx_statstage *data)
344{
345 int ret = 0;
346 int i;
347 struct lan78xx_statstage *stats;
348 u32 *src;
349 u32 *dst;
350
55d7de9d
WH
351 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
352 if (!stats)
353 return -ENOMEM;
354
355 ret = usb_control_msg(dev->udev,
356 usb_rcvctrlpipe(dev->udev, 0),
357 USB_VENDOR_REQUEST_GET_STATS,
358 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
359 0,
360 0,
361 (void *)stats,
362 sizeof(*stats),
363 USB_CTRL_SET_TIMEOUT);
364 if (likely(ret >= 0)) {
365 src = (u32 *)stats;
366 dst = (u32 *)data;
367 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
368 le32_to_cpus(&src[i]);
369 dst[i] = src[i];
370 }
371 } else {
372 netdev_warn(dev->net,
373 "Failed to read stat ret = 0x%x", ret);
374 }
375
376 kfree(stats);
377
378 return ret;
379}
380
381/* Loop until the read is completed with timeout called with phy_mutex held */
382static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
383{
384 unsigned long start_time = jiffies;
385 u32 val;
386 int ret;
387
388 do {
389 ret = lan78xx_read_reg(dev, MII_ACC, &val);
390 if (unlikely(ret < 0))
391 return -EIO;
392
393 if (!(val & MII_ACC_MII_BUSY_))
394 return 0;
395 } while (!time_after(jiffies, start_time + HZ));
396
397 return -EIO;
398}
399
400static inline u32 mii_access(int id, int index, int read)
401{
402 u32 ret;
403
404 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
405 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
406 if (read)
407 ret |= MII_ACC_MII_READ_;
408 else
409 ret |= MII_ACC_MII_WRITE_;
410 ret |= MII_ACC_MII_BUSY_;
411
412 return ret;
413}
414
55d7de9d
WH
415static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
416{
417 unsigned long start_time = jiffies;
418 u32 val;
419 int ret;
420
421 do {
422 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
423 if (unlikely(ret < 0))
424 return -EIO;
425
426 if (!(val & E2P_CMD_EPC_BUSY_) ||
427 (val & E2P_CMD_EPC_TIMEOUT_))
428 break;
429 usleep_range(40, 100);
430 } while (!time_after(jiffies, start_time + HZ));
431
432 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
433 netdev_warn(dev->net, "EEPROM read operation timeout");
434 return -EIO;
435 }
436
437 return 0;
438}
439
440static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
441{
442 unsigned long start_time = jiffies;
443 u32 val;
444 int ret;
445
446 do {
447 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
448 if (unlikely(ret < 0))
449 return -EIO;
450
451 if (!(val & E2P_CMD_EPC_BUSY_))
452 return 0;
453
454 usleep_range(40, 100);
455 } while (!time_after(jiffies, start_time + HZ));
456
457 netdev_warn(dev->net, "EEPROM is busy");
458 return -EIO;
459}
460
461static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
462 u32 length, u8 *data)
463{
464 u32 val;
465 int i, ret;
466
55d7de9d
WH
467 ret = lan78xx_eeprom_confirm_not_busy(dev);
468 if (ret)
469 return ret;
470
471 for (i = 0; i < length; i++) {
472 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
473 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
474 ret = lan78xx_write_reg(dev, E2P_CMD, val);
475 if (unlikely(ret < 0))
476 return -EIO;
477
478 ret = lan78xx_wait_eeprom(dev);
479 if (ret < 0)
480 return ret;
481
482 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
483 if (unlikely(ret < 0))
484 return -EIO;
485
486 data[i] = val & 0xFF;
487 offset++;
488 }
489
490 return 0;
491}
492
493static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
494 u32 length, u8 *data)
495{
496 u8 sig;
497 int ret;
498
499 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
500 if ((ret == 0) && (sig == EEPROM_INDICATOR))
501 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
502 else
503 ret = -EINVAL;
504
505 return ret;
506}
507
508static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
509 u32 length, u8 *data)
510{
511 u32 val;
512 int i, ret;
513
55d7de9d
WH
514 ret = lan78xx_eeprom_confirm_not_busy(dev);
515 if (ret)
516 return ret;
517
518 /* Issue write/erase enable command */
519 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
520 ret = lan78xx_write_reg(dev, E2P_CMD, val);
521 if (unlikely(ret < 0))
522 return -EIO;
523
524 ret = lan78xx_wait_eeprom(dev);
525 if (ret < 0)
526 return ret;
527
528 for (i = 0; i < length; i++) {
529 /* Fill data register */
530 val = data[i];
531 ret = lan78xx_write_reg(dev, E2P_DATA, val);
532 if (ret < 0)
533 return ret;
534
535 /* Send "write" command */
536 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
537 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
538 ret = lan78xx_write_reg(dev, E2P_CMD, val);
539 if (ret < 0)
540 return ret;
541
542 ret = lan78xx_wait_eeprom(dev);
543 if (ret < 0)
544 return ret;
545
546 offset++;
547 }
548
549 return 0;
550}
551
552static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
553 u32 length, u8 *data)
554{
555 int i;
556 int ret;
557 u32 buf;
558 unsigned long timeout;
559
560 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
561
562 if (buf & OTP_PWR_DN_PWRDN_N_) {
563 /* clear it and wait to be cleared */
564 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
565
566 timeout = jiffies + HZ;
567 do {
568 usleep_range(1, 10);
569 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
570 if (time_after(jiffies, timeout)) {
571 netdev_warn(dev->net,
572 "timeout on OTP_PWR_DN");
573 return -EIO;
574 }
575 } while (buf & OTP_PWR_DN_PWRDN_N_);
576 }
577
578 for (i = 0; i < length; i++) {
579 ret = lan78xx_write_reg(dev, OTP_ADDR1,
580 ((offset + i) >> 8) & OTP_ADDR1_15_11);
581 ret = lan78xx_write_reg(dev, OTP_ADDR2,
582 ((offset + i) & OTP_ADDR2_10_3));
583
584 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
585 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
586
587 timeout = jiffies + HZ;
588 do {
589 udelay(1);
590 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
591 if (time_after(jiffies, timeout)) {
592 netdev_warn(dev->net,
593 "timeout on OTP_STATUS");
594 return -EIO;
595 }
596 } while (buf & OTP_STATUS_BUSY_);
597
598 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
599
600 data[i] = (u8)(buf & 0xFF);
601 }
602
603 return 0;
604}
605
9fb6066d
WH
606static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
607 u32 length, u8 *data)
608{
609 int i;
610 int ret;
611 u32 buf;
612 unsigned long timeout;
613
614 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
615
616 if (buf & OTP_PWR_DN_PWRDN_N_) {
617 /* clear it and wait to be cleared */
618 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
619
620 timeout = jiffies + HZ;
621 do {
622 udelay(1);
623 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
624 if (time_after(jiffies, timeout)) {
625 netdev_warn(dev->net,
626 "timeout on OTP_PWR_DN completion");
627 return -EIO;
628 }
629 } while (buf & OTP_PWR_DN_PWRDN_N_);
630 }
631
632 /* set to BYTE program mode */
633 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
634
635 for (i = 0; i < length; i++) {
636 ret = lan78xx_write_reg(dev, OTP_ADDR1,
637 ((offset + i) >> 8) & OTP_ADDR1_15_11);
638 ret = lan78xx_write_reg(dev, OTP_ADDR2,
639 ((offset + i) & OTP_ADDR2_10_3));
640 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
641 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
642 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
643
644 timeout = jiffies + HZ;
645 do {
646 udelay(1);
647 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
648 if (time_after(jiffies, timeout)) {
649 netdev_warn(dev->net,
650 "Timeout on OTP_STATUS completion");
651 return -EIO;
652 }
653 } while (buf & OTP_STATUS_BUSY_);
654 }
655
656 return 0;
657}
658
55d7de9d
WH
659static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
660 u32 length, u8 *data)
661{
662 u8 sig;
663 int ret;
664
665 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
666
667 if (ret == 0) {
668 if (sig == OTP_INDICATOR_1)
669 offset = offset;
670 else if (sig == OTP_INDICATOR_2)
671 offset += 0x100;
672 else
673 ret = -EINVAL;
674 ret = lan78xx_read_raw_otp(dev, offset, length, data);
675 }
676
677 return ret;
678}
679
680static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
681{
682 int i, ret;
683
684 for (i = 0; i < 100; i++) {
685 u32 dp_sel;
686
687 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
688 if (unlikely(ret < 0))
689 return -EIO;
690
691 if (dp_sel & DP_SEL_DPRDY_)
692 return 0;
693
694 usleep_range(40, 100);
695 }
696
697 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
698
699 return -EIO;
700}
701
702static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
703 u32 addr, u32 length, u32 *buf)
704{
705 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
706 u32 dp_sel;
707 int i, ret;
708
709 if (usb_autopm_get_interface(dev->intf) < 0)
710 return 0;
711
712 mutex_lock(&pdata->dataport_mutex);
713
714 ret = lan78xx_dataport_wait_not_busy(dev);
715 if (ret < 0)
716 goto done;
717
718 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
719
720 dp_sel &= ~DP_SEL_RSEL_MASK_;
721 dp_sel |= ram_select;
722 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
723
724 for (i = 0; i < length; i++) {
725 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
726
727 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
728
729 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
730
731 ret = lan78xx_dataport_wait_not_busy(dev);
732 if (ret < 0)
733 goto done;
734 }
735
736done:
737 mutex_unlock(&pdata->dataport_mutex);
738 usb_autopm_put_interface(dev->intf);
739
740 return ret;
741}
742
743static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
744 int index, u8 addr[ETH_ALEN])
745{
746 u32 temp;
747
748 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
749 temp = addr[3];
750 temp = addr[2] | (temp << 8);
751 temp = addr[1] | (temp << 8);
752 temp = addr[0] | (temp << 8);
753 pdata->pfilter_table[index][1] = temp;
754 temp = addr[5];
755 temp = addr[4] | (temp << 8);
756 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
757 pdata->pfilter_table[index][0] = temp;
758 }
759}
760
761/* returns hash bit number for given MAC address */
762static inline u32 lan78xx_hash(char addr[ETH_ALEN])
763{
764 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
765}
766
767static void lan78xx_deferred_multicast_write(struct work_struct *param)
768{
769 struct lan78xx_priv *pdata =
770 container_of(param, struct lan78xx_priv, set_multicast);
771 struct lan78xx_net *dev = pdata->dev;
772 int i;
773 int ret;
774
775 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
776 pdata->rfe_ctl);
777
778 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
779 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
780
781 for (i = 1; i < NUM_OF_MAF; i++) {
782 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
783 ret = lan78xx_write_reg(dev, MAF_LO(i),
784 pdata->pfilter_table[i][1]);
785 ret = lan78xx_write_reg(dev, MAF_HI(i),
786 pdata->pfilter_table[i][0]);
787 }
788
789 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
790}
791
792static void lan78xx_set_multicast(struct net_device *netdev)
793{
794 struct lan78xx_net *dev = netdev_priv(netdev);
795 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
796 unsigned long flags;
797 int i;
798
799 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
800
801 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
802 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
803
804 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
805 pdata->mchash_table[i] = 0;
806 /* pfilter_table[0] has own HW address */
807 for (i = 1; i < NUM_OF_MAF; i++) {
808 pdata->pfilter_table[i][0] =
809 pdata->pfilter_table[i][1] = 0;
810 }
811
812 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
813
814 if (dev->net->flags & IFF_PROMISC) {
815 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
816 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
817 } else {
818 if (dev->net->flags & IFF_ALLMULTI) {
819 netif_dbg(dev, drv, dev->net,
820 "receive all multicast enabled");
821 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
822 }
823 }
824
825 if (netdev_mc_count(dev->net)) {
826 struct netdev_hw_addr *ha;
827 int i;
828
829 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
830
831 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
832
833 i = 1;
834 netdev_for_each_mc_addr(ha, netdev) {
835 /* set first 32 into Perfect Filter */
836 if (i < 33) {
837 lan78xx_set_addr_filter(pdata, i, ha->addr);
838 } else {
839 u32 bitnum = lan78xx_hash(ha->addr);
840
841 pdata->mchash_table[bitnum / 32] |=
842 (1 << (bitnum % 32));
843 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
844 }
845 i++;
846 }
847 }
848
849 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
850
851 /* defer register writes to a sleepable context */
852 schedule_work(&pdata->set_multicast);
853}
854
855static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
856 u16 lcladv, u16 rmtadv)
857{
858 u32 flow = 0, fct_flow = 0;
859 int ret;
860
861 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
862
863 if (cap & FLOW_CTRL_TX)
864 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
865
866 if (cap & FLOW_CTRL_RX)
867 flow |= FLOW_CR_RX_FCEN_;
868
869 if (dev->udev->speed == USB_SPEED_SUPER)
870 fct_flow = 0x817;
871 else if (dev->udev->speed == USB_SPEED_HIGH)
872 fct_flow = 0x211;
873
874 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
875 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
876 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
877
878 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
879
880 /* threshold value should be set before enabling flow */
881 ret = lan78xx_write_reg(dev, FLOW, flow);
882
883 return 0;
884}
885
886static int lan78xx_link_reset(struct lan78xx_net *dev)
887{
ce85e13a 888 struct phy_device *phydev = dev->net->phydev;
55d7de9d 889 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
99c79ece 890 int ladv, radv, ret;
55d7de9d
WH
891 u32 buf;
892
893 /* clear PHY interrupt status */
bdfba55e 894 ret = phy_read(phydev, LAN88XX_INT_STS);
55d7de9d
WH
895 if (unlikely(ret < 0))
896 return -EIO;
897
898 /* clear LAN78xx interrupt status */
899 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
900 if (unlikely(ret < 0))
901 return -EIO;
902
ce85e13a
WH
903 phy_read_status(phydev);
904
905 if (!phydev->link && dev->link_on) {
55d7de9d 906 dev->link_on = false;
55d7de9d
WH
907
908 /* reset MAC */
909 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
910 if (unlikely(ret < 0))
911 return -EIO;
912 buf |= MAC_CR_RST_;
913 ret = lan78xx_write_reg(dev, MAC_CR, buf);
914 if (unlikely(ret < 0))
915 return -EIO;
e4953910
WH
916
917 phy_mac_interrupt(phydev, 0);
ce85e13a 918 } else if (phydev->link && !dev->link_on) {
55d7de9d
WH
919 dev->link_on = true;
920
ce85e13a 921 phy_ethtool_gset(phydev, &ecmd);
55d7de9d 922
bdfba55e 923 ret = phy_read(phydev, LAN88XX_INT_STS);
55d7de9d
WH
924
925 if (dev->udev->speed == USB_SPEED_SUPER) {
926 if (ethtool_cmd_speed(&ecmd) == 1000) {
927 /* disable U2 */
928 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
929 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
930 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
931 /* enable U1 */
932 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
933 buf |= USB_CFG1_DEV_U1_INIT_EN_;
934 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
935 } else {
936 /* enable U1 & U2 */
937 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
938 buf |= USB_CFG1_DEV_U2_INIT_EN_;
939 buf |= USB_CFG1_DEV_U1_INIT_EN_;
940 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
941 }
942 }
943
ce85e13a 944 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
945 if (ladv < 0)
946 return ladv;
55d7de9d 947
ce85e13a 948 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
949 if (radv < 0)
950 return radv;
55d7de9d
WH
951
952 netif_dbg(dev, link, dev->net,
953 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
954 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
955
956 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
e4953910 957 phy_mac_interrupt(phydev, 1);
55d7de9d
WH
958 }
959
960 return ret;
961}
962
963/* some work can't be done in tasklets, so we use keventd
964 *
965 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
966 * but tasklet_schedule() doesn't. hope the failure is rare.
967 */
968void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
969{
970 set_bit(work, &dev->flags);
971 if (!schedule_delayed_work(&dev->wq, 0))
972 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
973}
974
975static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
976{
977 u32 intdata;
978
979 if (urb->actual_length != 4) {
980 netdev_warn(dev->net,
981 "unexpected urb length %d", urb->actual_length);
982 return;
983 }
984
985 memcpy(&intdata, urb->transfer_buffer, 4);
986 le32_to_cpus(&intdata);
987
988 if (intdata & INT_ENP_PHY_INT) {
989 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
990 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
991 } else
992 netdev_warn(dev->net,
993 "unexpected interrupt: 0x%08x\n", intdata);
994}
995
996static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
997{
998 return MAX_EEPROM_SIZE;
999}
1000
1001static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1002 struct ethtool_eeprom *ee, u8 *data)
1003{
1004 struct lan78xx_net *dev = netdev_priv(netdev);
1005
1006 ee->magic = LAN78XX_EEPROM_MAGIC;
1007
1008 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1009}
1010
1011static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1012 struct ethtool_eeprom *ee, u8 *data)
1013{
1014 struct lan78xx_net *dev = netdev_priv(netdev);
1015
1016 /* Allow entire eeprom update only */
1017 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1018 (ee->offset == 0) &&
1019 (ee->len == 512) &&
1020 (data[0] == EEPROM_INDICATOR))
1021 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1022 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1023 (ee->offset == 0) &&
1024 (ee->len == 512) &&
1025 (data[0] == OTP_INDICATOR_1))
9fb6066d 1026 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
55d7de9d
WH
1027
1028 return -EINVAL;
1029}
1030
1031static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1032 u8 *data)
1033{
1034 if (stringset == ETH_SS_STATS)
1035 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1036}
1037
1038static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1039{
1040 if (sset == ETH_SS_STATS)
1041 return ARRAY_SIZE(lan78xx_gstrings);
1042 else
1043 return -EOPNOTSUPP;
1044}
1045
1046static void lan78xx_get_stats(struct net_device *netdev,
1047 struct ethtool_stats *stats, u64 *data)
1048{
1049 struct lan78xx_net *dev = netdev_priv(netdev);
1050 struct lan78xx_statstage lan78xx_stat;
1051 u32 *p;
1052 int i;
1053
1054 if (usb_autopm_get_interface(dev->intf) < 0)
1055 return;
1056
1057 if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1058 p = (u32 *)&lan78xx_stat;
1059 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1060 data[i] = p[i];
1061 }
1062
1063 usb_autopm_put_interface(dev->intf);
1064}
1065
1066static void lan78xx_get_wol(struct net_device *netdev,
1067 struct ethtool_wolinfo *wol)
1068{
1069 struct lan78xx_net *dev = netdev_priv(netdev);
1070 int ret;
1071 u32 buf;
1072 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1073
1074 if (usb_autopm_get_interface(dev->intf) < 0)
1075 return;
1076
1077 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1078 if (unlikely(ret < 0)) {
1079 wol->supported = 0;
1080 wol->wolopts = 0;
1081 } else {
1082 if (buf & USB_CFG_RMT_WKP_) {
1083 wol->supported = WAKE_ALL;
1084 wol->wolopts = pdata->wol;
1085 } else {
1086 wol->supported = 0;
1087 wol->wolopts = 0;
1088 }
1089 }
1090
1091 usb_autopm_put_interface(dev->intf);
1092}
1093
1094static int lan78xx_set_wol(struct net_device *netdev,
1095 struct ethtool_wolinfo *wol)
1096{
1097 struct lan78xx_net *dev = netdev_priv(netdev);
1098 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1099 int ret;
1100
1101 ret = usb_autopm_get_interface(dev->intf);
1102 if (ret < 0)
1103 return ret;
1104
1105 pdata->wol = 0;
1106 if (wol->wolopts & WAKE_UCAST)
1107 pdata->wol |= WAKE_UCAST;
1108 if (wol->wolopts & WAKE_MCAST)
1109 pdata->wol |= WAKE_MCAST;
1110 if (wol->wolopts & WAKE_BCAST)
1111 pdata->wol |= WAKE_BCAST;
1112 if (wol->wolopts & WAKE_MAGIC)
1113 pdata->wol |= WAKE_MAGIC;
1114 if (wol->wolopts & WAKE_PHY)
1115 pdata->wol |= WAKE_PHY;
1116 if (wol->wolopts & WAKE_ARP)
1117 pdata->wol |= WAKE_ARP;
1118
1119 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1120
ce85e13a
WH
1121 phy_ethtool_set_wol(netdev->phydev, wol);
1122
55d7de9d
WH
1123 usb_autopm_put_interface(dev->intf);
1124
1125 return ret;
1126}
1127
1128static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1129{
1130 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1131 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1132 int ret;
1133 u32 buf;
55d7de9d
WH
1134
1135 ret = usb_autopm_get_interface(dev->intf);
1136 if (ret < 0)
1137 return ret;
1138
ce85e13a
WH
1139 ret = phy_ethtool_get_eee(phydev, edata);
1140 if (ret < 0)
1141 goto exit;
1142
55d7de9d
WH
1143 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1144 if (buf & MAC_CR_EEE_EN_) {
55d7de9d 1145 edata->eee_enabled = true;
ce85e13a
WH
1146 edata->eee_active = !!(edata->advertised &
1147 edata->lp_advertised);
55d7de9d
WH
1148 edata->tx_lpi_enabled = true;
1149 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1150 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1151 edata->tx_lpi_timer = buf;
1152 } else {
55d7de9d
WH
1153 edata->eee_enabled = false;
1154 edata->eee_active = false;
55d7de9d
WH
1155 edata->tx_lpi_enabled = false;
1156 edata->tx_lpi_timer = 0;
1157 }
1158
ce85e13a
WH
1159 ret = 0;
1160exit:
55d7de9d
WH
1161 usb_autopm_put_interface(dev->intf);
1162
ce85e13a 1163 return ret;
55d7de9d
WH
1164}
1165
1166static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1167{
1168 struct lan78xx_net *dev = netdev_priv(net);
1169 int ret;
1170 u32 buf;
1171
1172 ret = usb_autopm_get_interface(dev->intf);
1173 if (ret < 0)
1174 return ret;
1175
1176 if (edata->eee_enabled) {
1177 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1178 buf |= MAC_CR_EEE_EN_;
1179 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1180
ce85e13a
WH
1181 phy_ethtool_set_eee(net->phydev, edata);
1182
1183 buf = (u32)edata->tx_lpi_timer;
1184 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
55d7de9d
WH
1185 } else {
1186 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1187 buf &= ~MAC_CR_EEE_EN_;
1188 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1189 }
1190
1191 usb_autopm_put_interface(dev->intf);
1192
1193 return 0;
1194}
1195
1196static u32 lan78xx_get_link(struct net_device *net)
1197{
ce85e13a 1198 phy_read_status(net->phydev);
55d7de9d 1199
ce85e13a 1200 return net->phydev->link;
55d7de9d
WH
1201}
1202
1203int lan78xx_nway_reset(struct net_device *net)
1204{
ce85e13a 1205 return phy_start_aneg(net->phydev);
55d7de9d
WH
1206}
1207
1208static void lan78xx_get_drvinfo(struct net_device *net,
1209 struct ethtool_drvinfo *info)
1210{
1211 struct lan78xx_net *dev = netdev_priv(net);
1212
1213 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1214 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1215 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1216}
1217
1218static u32 lan78xx_get_msglevel(struct net_device *net)
1219{
1220 struct lan78xx_net *dev = netdev_priv(net);
1221
1222 return dev->msg_enable;
1223}
1224
1225static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1226{
1227 struct lan78xx_net *dev = netdev_priv(net);
1228
1229 dev->msg_enable = level;
1230}
1231
758c5c11
WH
1232static int lan78xx_get_mdix_status(struct net_device *net)
1233{
1234 struct phy_device *phydev = net->phydev;
1235 int buf;
1236
1237 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1238 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1239 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1240
1241 return buf;
1242}
1243
1244static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1245{
1246 struct lan78xx_net *dev = netdev_priv(net);
1247 struct phy_device *phydev = net->phydev;
1248 int buf;
1249
1250 if (mdix_ctrl == ETH_TP_MDI) {
1251 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1252 LAN88XX_EXT_PAGE_SPACE_1);
1253 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1254 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1255 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1256 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1257 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1258 LAN88XX_EXT_PAGE_SPACE_0);
1259 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1260 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1261 LAN88XX_EXT_PAGE_SPACE_1);
1262 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1263 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1264 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1265 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1266 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1267 LAN88XX_EXT_PAGE_SPACE_0);
1268 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1269 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1270 LAN88XX_EXT_PAGE_SPACE_1);
1271 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1272 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1273 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1274 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1275 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1276 LAN88XX_EXT_PAGE_SPACE_0);
1277 }
1278 dev->mdix_ctrl = mdix_ctrl;
1279}
1280
55d7de9d
WH
1281static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1282{
1283 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1284 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1285 int ret;
1286 int buf;
1287
55d7de9d
WH
1288 ret = usb_autopm_get_interface(dev->intf);
1289 if (ret < 0)
1290 return ret;
1291
ce85e13a 1292 ret = phy_ethtool_gset(phydev, cmd);
55d7de9d 1293
758c5c11 1294 buf = lan78xx_get_mdix_status(net);
55d7de9d 1295
bdfba55e
WH
1296 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1297 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
55d7de9d
WH
1298 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1299 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
bdfba55e 1300 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
55d7de9d
WH
1301 cmd->eth_tp_mdix = ETH_TP_MDI;
1302 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
bdfba55e 1303 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
55d7de9d
WH
1304 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1305 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1306 }
1307
1308 usb_autopm_put_interface(dev->intf);
1309
1310 return ret;
1311}
1312
1313static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1314{
1315 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1316 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1317 int ret = 0;
1318 int temp;
1319
55d7de9d
WH
1320 ret = usb_autopm_get_interface(dev->intf);
1321 if (ret < 0)
1322 return ret;
1323
1324 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
758c5c11 1325 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
55d7de9d
WH
1326 }
1327
1328 /* change speed & duplex */
ce85e13a 1329 ret = phy_ethtool_sset(phydev, cmd);
55d7de9d
WH
1330
1331 if (!cmd->autoneg) {
1332 /* force link down */
ce85e13a
WH
1333 temp = phy_read(phydev, MII_BMCR);
1334 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 1335 mdelay(1);
ce85e13a 1336 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
1337 }
1338
1339 usb_autopm_put_interface(dev->intf);
1340
1341 return ret;
1342}
1343
1344static const struct ethtool_ops lan78xx_ethtool_ops = {
1345 .get_link = lan78xx_get_link,
1346 .nway_reset = lan78xx_nway_reset,
1347 .get_drvinfo = lan78xx_get_drvinfo,
1348 .get_msglevel = lan78xx_get_msglevel,
1349 .set_msglevel = lan78xx_set_msglevel,
1350 .get_settings = lan78xx_get_settings,
1351 .set_settings = lan78xx_set_settings,
1352 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1353 .get_eeprom = lan78xx_ethtool_get_eeprom,
1354 .set_eeprom = lan78xx_ethtool_set_eeprom,
1355 .get_ethtool_stats = lan78xx_get_stats,
1356 .get_sset_count = lan78xx_get_sset_count,
1357 .get_strings = lan78xx_get_strings,
1358 .get_wol = lan78xx_get_wol,
1359 .set_wol = lan78xx_set_wol,
1360 .get_eee = lan78xx_get_eee,
1361 .set_eee = lan78xx_set_eee,
1362};
1363
1364static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1365{
55d7de9d
WH
1366 if (!netif_running(netdev))
1367 return -EINVAL;
1368
ce85e13a 1369 return phy_mii_ioctl(netdev->phydev, rq, cmd);
55d7de9d
WH
1370}
1371
1372static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1373{
1374 u32 addr_lo, addr_hi;
1375 int ret;
1376 u8 addr[6];
1377
1378 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1379 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1380
1381 addr[0] = addr_lo & 0xFF;
1382 addr[1] = (addr_lo >> 8) & 0xFF;
1383 addr[2] = (addr_lo >> 16) & 0xFF;
1384 addr[3] = (addr_lo >> 24) & 0xFF;
1385 addr[4] = addr_hi & 0xFF;
1386 addr[5] = (addr_hi >> 8) & 0xFF;
1387
1388 if (!is_valid_ether_addr(addr)) {
1389 /* reading mac address from EEPROM or OTP */
1390 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1391 addr) == 0) ||
1392 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1393 addr) == 0)) {
1394 if (is_valid_ether_addr(addr)) {
1395 /* eeprom values are valid so use them */
1396 netif_dbg(dev, ifup, dev->net,
1397 "MAC address read from EEPROM");
1398 } else {
1399 /* generate random MAC */
1400 random_ether_addr(addr);
1401 netif_dbg(dev, ifup, dev->net,
1402 "MAC address set to random addr");
1403 }
1404
1405 addr_lo = addr[0] | (addr[1] << 8) |
1406 (addr[2] << 16) | (addr[3] << 24);
1407 addr_hi = addr[4] | (addr[5] << 8);
1408
1409 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1410 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1411 } else {
1412 /* generate random MAC */
1413 random_ether_addr(addr);
1414 netif_dbg(dev, ifup, dev->net,
1415 "MAC address set to random addr");
1416 }
1417 }
1418
1419 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1420 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1421
1422 ether_addr_copy(dev->net->dev_addr, addr);
1423}
1424
ce85e13a
WH
1425/* MDIO read and write wrappers for phylib */
1426static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1427{
1428 struct lan78xx_net *dev = bus->priv;
1429 u32 val, addr;
1430 int ret;
1431
1432 ret = usb_autopm_get_interface(dev->intf);
1433 if (ret < 0)
1434 return ret;
1435
1436 mutex_lock(&dev->phy_mutex);
1437
1438 /* confirm MII not busy */
1439 ret = lan78xx_phy_wait_not_busy(dev);
1440 if (ret < 0)
1441 goto done;
1442
1443 /* set the address, index & direction (read from PHY) */
1444 addr = mii_access(phy_id, idx, MII_READ);
1445 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1446
1447 ret = lan78xx_phy_wait_not_busy(dev);
1448 if (ret < 0)
1449 goto done;
1450
1451 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1452
1453 ret = (int)(val & 0xFFFF);
1454
1455done:
1456 mutex_unlock(&dev->phy_mutex);
1457 usb_autopm_put_interface(dev->intf);
1458 return ret;
1459}
1460
1461static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1462 u16 regval)
1463{
1464 struct lan78xx_net *dev = bus->priv;
1465 u32 val, addr;
1466 int ret;
1467
1468 ret = usb_autopm_get_interface(dev->intf);
1469 if (ret < 0)
1470 return ret;
1471
1472 mutex_lock(&dev->phy_mutex);
1473
1474 /* confirm MII not busy */
1475 ret = lan78xx_phy_wait_not_busy(dev);
1476 if (ret < 0)
1477 goto done;
1478
1479 val = (u32)regval;
1480 ret = lan78xx_write_reg(dev, MII_DATA, val);
1481
1482 /* set the address, index & direction (write to PHY) */
1483 addr = mii_access(phy_id, idx, MII_WRITE);
1484 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1485
1486 ret = lan78xx_phy_wait_not_busy(dev);
1487 if (ret < 0)
1488 goto done;
1489
1490done:
1491 mutex_unlock(&dev->phy_mutex);
1492 usb_autopm_put_interface(dev->intf);
1493 return 0;
1494}
1495
1496static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 1497{
ce85e13a 1498 int ret;
ce85e13a
WH
1499
1500 dev->mdiobus = mdiobus_alloc();
1501 if (!dev->mdiobus) {
1502 netdev_err(dev->net, "can't allocate MDIO bus\n");
1503 return -ENOMEM;
1504 }
1505
1506 dev->mdiobus->priv = (void *)dev;
1507 dev->mdiobus->read = lan78xx_mdiobus_read;
1508 dev->mdiobus->write = lan78xx_mdiobus_write;
1509 dev->mdiobus->name = "lan78xx-mdiobus";
1510
1511 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1512 dev->udev->bus->busnum, dev->udev->devnum);
1513
ce85e13a
WH
1514 switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1515 case 0x78000000:
1516 case 0x78500000:
1517 /* set to internal PHY id */
1518 dev->mdiobus->phy_mask = ~(1 << 1);
1519 break;
1520 }
1521
1522 ret = mdiobus_register(dev->mdiobus);
1523 if (ret) {
1524 netdev_err(dev->net, "can't register MDIO bus\n");
e7f4dc35 1525 goto exit1;
ce85e13a
WH
1526 }
1527
1528 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1529 return 0;
ce85e13a
WH
1530exit1:
1531 mdiobus_free(dev->mdiobus);
1532 return ret;
1533}
1534
1535static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1536{
1537 mdiobus_unregister(dev->mdiobus);
ce85e13a
WH
1538 mdiobus_free(dev->mdiobus);
1539}
1540
1541static void lan78xx_link_status_change(struct net_device *net)
1542{
1543 /* nothing to do */
55d7de9d
WH
1544}
1545
1546static int lan78xx_phy_init(struct lan78xx_net *dev)
1547{
ce85e13a
WH
1548 int ret;
1549 struct phy_device *phydev = dev->net->phydev;
55d7de9d 1550
ce85e13a
WH
1551 phydev = phy_find_first(dev->mdiobus);
1552 if (!phydev) {
1553 netdev_err(dev->net, "no PHY found\n");
1554 return -EIO;
1555 }
55d7de9d 1556
e4953910
WH
1557 /* Enable PHY interrupts.
1558 * We handle our own interrupt
1559 */
1560 ret = phy_read(phydev, LAN88XX_INT_STS);
1561 ret = phy_write(phydev, LAN88XX_INT_MASK,
1562 LAN88XX_INT_MASK_MDINTPIN_EN_ |
1563 LAN88XX_INT_MASK_LINK_CHANGE_);
1564
1565 phydev->irq = PHY_IGNORE_INTERRUPT;
1566
ce85e13a
WH
1567 ret = phy_connect_direct(dev->net, phydev,
1568 lan78xx_link_status_change,
1569 PHY_INTERFACE_MODE_GMII);
1570 if (ret) {
1571 netdev_err(dev->net, "can't attach PHY to %s\n",
1572 dev->mdiobus->id);
1573 return -EIO;
1574 }
55d7de9d
WH
1575
1576 /* set to AUTOMDIX */
758c5c11 1577 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
55d7de9d 1578
ce85e13a
WH
1579 /* MAC doesn't support 1000T Half */
1580 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1581 phydev->supported |= (SUPPORTED_10baseT_Half |
1582 SUPPORTED_10baseT_Full |
1583 SUPPORTED_100baseT_Half |
1584 SUPPORTED_100baseT_Full |
1585 SUPPORTED_1000baseT_Full |
1586 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1587 genphy_config_aneg(phydev);
1588
ce85e13a 1589 phy_start(phydev);
55d7de9d
WH
1590
1591 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1592
1593 return 0;
1594}
1595
1596static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1597{
1598 int ret = 0;
1599 u32 buf;
1600 bool rxenabled;
1601
1602 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1603
1604 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1605
1606 if (rxenabled) {
1607 buf &= ~MAC_RX_RXEN_;
1608 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1609 }
1610
1611 /* add 4 to size for FCS */
1612 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1613 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1614
1615 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1616
1617 if (rxenabled) {
1618 buf |= MAC_RX_RXEN_;
1619 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1620 }
1621
1622 return 0;
1623}
1624
1625static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1626{
1627 struct sk_buff *skb;
1628 unsigned long flags;
1629 int count = 0;
1630
1631 spin_lock_irqsave(&q->lock, flags);
1632 while (!skb_queue_empty(q)) {
1633 struct skb_data *entry;
1634 struct urb *urb;
1635 int ret;
1636
1637 skb_queue_walk(q, skb) {
1638 entry = (struct skb_data *)skb->cb;
1639 if (entry->state != unlink_start)
1640 goto found;
1641 }
1642 break;
1643found:
1644 entry->state = unlink_start;
1645 urb = entry->urb;
1646
1647 /* Get reference count of the URB to avoid it to be
1648 * freed during usb_unlink_urb, which may trigger
1649 * use-after-free problem inside usb_unlink_urb since
1650 * usb_unlink_urb is always racing with .complete
1651 * handler(include defer_bh).
1652 */
1653 usb_get_urb(urb);
1654 spin_unlock_irqrestore(&q->lock, flags);
1655 /* during some PM-driven resume scenarios,
1656 * these (async) unlinks complete immediately
1657 */
1658 ret = usb_unlink_urb(urb);
1659 if (ret != -EINPROGRESS && ret != 0)
1660 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1661 else
1662 count++;
1663 usb_put_urb(urb);
1664 spin_lock_irqsave(&q->lock, flags);
1665 }
1666 spin_unlock_irqrestore(&q->lock, flags);
1667 return count;
1668}
1669
1670static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1671{
1672 struct lan78xx_net *dev = netdev_priv(netdev);
1673 int ll_mtu = new_mtu + netdev->hard_header_len;
1674 int old_hard_mtu = dev->hard_mtu;
1675 int old_rx_urb_size = dev->rx_urb_size;
1676 int ret;
1677
1678 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1679 return -EINVAL;
1680
1681 if (new_mtu <= 0)
1682 return -EINVAL;
1683 /* no second zero-length packet read wanted after mtu-sized packets */
1684 if ((ll_mtu % dev->maxpacket) == 0)
1685 return -EDOM;
1686
1687 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1688
1689 netdev->mtu = new_mtu;
1690
1691 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1692 if (dev->rx_urb_size == old_hard_mtu) {
1693 dev->rx_urb_size = dev->hard_mtu;
1694 if (dev->rx_urb_size > old_rx_urb_size) {
1695 if (netif_running(dev->net)) {
1696 unlink_urbs(dev, &dev->rxq);
1697 tasklet_schedule(&dev->bh);
1698 }
1699 }
1700 }
1701
1702 return 0;
1703}
1704
1705int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1706{
1707 struct lan78xx_net *dev = netdev_priv(netdev);
1708 struct sockaddr *addr = p;
1709 u32 addr_lo, addr_hi;
1710 int ret;
1711
1712 if (netif_running(netdev))
1713 return -EBUSY;
1714
1715 if (!is_valid_ether_addr(addr->sa_data))
1716 return -EADDRNOTAVAIL;
1717
1718 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1719
1720 addr_lo = netdev->dev_addr[0] |
1721 netdev->dev_addr[1] << 8 |
1722 netdev->dev_addr[2] << 16 |
1723 netdev->dev_addr[3] << 24;
1724 addr_hi = netdev->dev_addr[4] |
1725 netdev->dev_addr[5] << 8;
1726
1727 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1728 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1729
1730 return 0;
1731}
1732
1733/* Enable or disable Rx checksum offload engine */
1734static int lan78xx_set_features(struct net_device *netdev,
1735 netdev_features_t features)
1736{
1737 struct lan78xx_net *dev = netdev_priv(netdev);
1738 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1739 unsigned long flags;
1740 int ret;
1741
1742 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1743
1744 if (features & NETIF_F_RXCSUM) {
1745 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1746 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1747 } else {
1748 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1749 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1750 }
1751
1752 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1753 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1754 else
1755 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1756
1757 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1758
1759 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1760
1761 return 0;
1762}
1763
1764static void lan78xx_deferred_vlan_write(struct work_struct *param)
1765{
1766 struct lan78xx_priv *pdata =
1767 container_of(param, struct lan78xx_priv, set_vlan);
1768 struct lan78xx_net *dev = pdata->dev;
1769
1770 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1771 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1772}
1773
1774static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1775 __be16 proto, u16 vid)
1776{
1777 struct lan78xx_net *dev = netdev_priv(netdev);
1778 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1779 u16 vid_bit_index;
1780 u16 vid_dword_index;
1781
1782 vid_dword_index = (vid >> 5) & 0x7F;
1783 vid_bit_index = vid & 0x1F;
1784
1785 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1786
1787 /* defer register writes to a sleepable context */
1788 schedule_work(&pdata->set_vlan);
1789
1790 return 0;
1791}
1792
1793static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1794 __be16 proto, u16 vid)
1795{
1796 struct lan78xx_net *dev = netdev_priv(netdev);
1797 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1798 u16 vid_bit_index;
1799 u16 vid_dword_index;
1800
1801 vid_dword_index = (vid >> 5) & 0x7F;
1802 vid_bit_index = vid & 0x1F;
1803
1804 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1805
1806 /* defer register writes to a sleepable context */
1807 schedule_work(&pdata->set_vlan);
1808
1809 return 0;
1810}
1811
1812static void lan78xx_init_ltm(struct lan78xx_net *dev)
1813{
1814 int ret;
1815 u32 buf;
1816 u32 regs[6] = { 0 };
1817
1818 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1819 if (buf & USB_CFG1_LTM_ENABLE_) {
1820 u8 temp[2];
1821 /* Get values from EEPROM first */
1822 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1823 if (temp[0] == 24) {
1824 ret = lan78xx_read_raw_eeprom(dev,
1825 temp[1] * 2,
1826 24,
1827 (u8 *)regs);
1828 if (ret < 0)
1829 return;
1830 }
1831 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1832 if (temp[0] == 24) {
1833 ret = lan78xx_read_raw_otp(dev,
1834 temp[1] * 2,
1835 24,
1836 (u8 *)regs);
1837 if (ret < 0)
1838 return;
1839 }
1840 }
1841 }
1842
1843 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1844 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1845 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1846 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1847 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1848 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1849}
1850
1851static int lan78xx_reset(struct lan78xx_net *dev)
1852{
1853 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1854 u32 buf;
1855 int ret = 0;
1856 unsigned long timeout;
1857
1858 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1859 buf |= HW_CFG_LRST_;
1860 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1861
1862 timeout = jiffies + HZ;
1863 do {
1864 mdelay(1);
1865 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1866 if (time_after(jiffies, timeout)) {
1867 netdev_warn(dev->net,
1868 "timeout on completion of LiteReset");
1869 return -EIO;
1870 }
1871 } while (buf & HW_CFG_LRST_);
1872
1873 lan78xx_init_mac_address(dev);
1874
ce85e13a
WH
1875 /* save DEVID for later usage */
1876 ret = lan78xx_read_reg(dev, ID_REV, &buf);
1877 dev->devid = buf;
1878
55d7de9d
WH
1879 /* Respond to the IN token with a NAK */
1880 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1881 buf |= USB_CFG_BIR_;
1882 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1883
1884 /* Init LTM */
1885 lan78xx_init_ltm(dev);
1886
1887 dev->net->hard_header_len += TX_OVERHEAD;
1888 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1889
1890 if (dev->udev->speed == USB_SPEED_SUPER) {
1891 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1892 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1893 dev->rx_qlen = 4;
1894 dev->tx_qlen = 4;
1895 } else if (dev->udev->speed == USB_SPEED_HIGH) {
1896 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1897 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1898 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1899 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1900 } else {
1901 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1902 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1903 dev->rx_qlen = 4;
1904 }
1905
1906 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1907 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1908
1909 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1910 buf |= HW_CFG_MEF_;
1911 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1912
1913 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1914 buf |= USB_CFG_BCE_;
1915 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1916
1917 /* set FIFO sizes */
1918 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1919 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1920
1921 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1922 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1923
1924 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1925 ret = lan78xx_write_reg(dev, FLOW, 0);
1926 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1927
1928 /* Don't need rfe_ctl_lock during initialisation */
1929 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1930 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1931 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1932
1933 /* Enable or disable checksum offload engines */
1934 lan78xx_set_features(dev->net, dev->net->features);
1935
1936 lan78xx_set_multicast(dev->net);
1937
1938 /* reset PHY */
1939 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1940 buf |= PMT_CTL_PHY_RST_;
1941 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1942
1943 timeout = jiffies + HZ;
1944 do {
1945 mdelay(1);
1946 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1947 if (time_after(jiffies, timeout)) {
1948 netdev_warn(dev->net, "timeout waiting for PHY Reset");
1949 return -EIO;
1950 }
6c595b03 1951 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 1952
55d7de9d 1953 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
55d7de9d 1954 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
55d7de9d
WH
1955 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1956
55d7de9d
WH
1957 /* enable PHY interrupts */
1958 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1959 buf |= INT_ENP_PHY_INT;
1960 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
1961
1962 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1963 buf |= MAC_TX_TXEN_;
1964 ret = lan78xx_write_reg(dev, MAC_TX, buf);
1965
1966 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
1967 buf |= FCT_TX_CTL_EN_;
1968 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
1969
1970 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1971
1972 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1973 buf |= MAC_RX_RXEN_;
1974 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1975
1976 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
1977 buf |= FCT_RX_CTL_EN_;
1978 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
1979
55d7de9d
WH
1980 return 0;
1981}
1982
1983static int lan78xx_open(struct net_device *net)
1984{
1985 struct lan78xx_net *dev = netdev_priv(net);
1986 int ret;
1987
1988 ret = usb_autopm_get_interface(dev->intf);
1989 if (ret < 0)
1990 goto out;
1991
1992 ret = lan78xx_reset(dev);
1993 if (ret < 0)
1994 goto done;
1995
ce85e13a
WH
1996 ret = lan78xx_phy_init(dev);
1997 if (ret < 0)
1998 goto done;
1999
55d7de9d
WH
2000 /* for Link Check */
2001 if (dev->urb_intr) {
2002 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2003 if (ret < 0) {
2004 netif_err(dev, ifup, dev->net,
2005 "intr submit %d\n", ret);
2006 goto done;
2007 }
2008 }
2009
2010 set_bit(EVENT_DEV_OPEN, &dev->flags);
2011
2012 netif_start_queue(net);
2013
2014 dev->link_on = false;
2015
2016 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2017done:
2018 usb_autopm_put_interface(dev->intf);
2019
2020out:
2021 return ret;
2022}
2023
2024static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2025{
2026 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2027 DECLARE_WAITQUEUE(wait, current);
2028 int temp;
2029
2030 /* ensure there are no more active urbs */
2031 add_wait_queue(&unlink_wakeup, &wait);
2032 set_current_state(TASK_UNINTERRUPTIBLE);
2033 dev->wait = &unlink_wakeup;
2034 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2035
2036 /* maybe wait for deletions to finish. */
2037 while (!skb_queue_empty(&dev->rxq) &&
2038 !skb_queue_empty(&dev->txq) &&
2039 !skb_queue_empty(&dev->done)) {
2040 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2041 set_current_state(TASK_UNINTERRUPTIBLE);
2042 netif_dbg(dev, ifdown, dev->net,
2043 "waited for %d urb completions\n", temp);
2044 }
2045 set_current_state(TASK_RUNNING);
2046 dev->wait = NULL;
2047 remove_wait_queue(&unlink_wakeup, &wait);
2048}
2049
2050int lan78xx_stop(struct net_device *net)
2051{
2052 struct lan78xx_net *dev = netdev_priv(net);
2053
ce85e13a
WH
2054 phy_stop(net->phydev);
2055 phy_disconnect(net->phydev);
2056 net->phydev = NULL;
2057
55d7de9d
WH
2058 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2059 netif_stop_queue(net);
2060
2061 netif_info(dev, ifdown, dev->net,
2062 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2063 net->stats.rx_packets, net->stats.tx_packets,
2064 net->stats.rx_errors, net->stats.tx_errors);
2065
2066 lan78xx_terminate_urbs(dev);
2067
2068 usb_kill_urb(dev->urb_intr);
2069
2070 skb_queue_purge(&dev->rxq_pause);
2071
2072 /* deferred work (task, timer, softirq) must also stop.
2073 * can't flush_scheduled_work() until we drop rtnl (later),
2074 * else workers could deadlock; so make workers a NOP.
2075 */
2076 dev->flags = 0;
2077 cancel_delayed_work_sync(&dev->wq);
2078 tasklet_kill(&dev->bh);
2079
2080 usb_autopm_put_interface(dev->intf);
2081
2082 return 0;
2083}
2084
2085static int lan78xx_linearize(struct sk_buff *skb)
2086{
2087 return skb_linearize(skb);
2088}
2089
2090static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2091 struct sk_buff *skb, gfp_t flags)
2092{
2093 u32 tx_cmd_a, tx_cmd_b;
2094
2095 if (skb_headroom(skb) < TX_OVERHEAD) {
2096 struct sk_buff *skb2;
2097
2098 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2099 dev_kfree_skb_any(skb);
2100 skb = skb2;
2101 if (!skb)
2102 return NULL;
2103 }
2104
2105 if (lan78xx_linearize(skb) < 0)
2106 return NULL;
2107
2108 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2109
2110 if (skb->ip_summed == CHECKSUM_PARTIAL)
2111 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2112
2113 tx_cmd_b = 0;
2114 if (skb_is_gso(skb)) {
2115 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2116
2117 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2118
2119 tx_cmd_a |= TX_CMD_A_LSO_;
2120 }
2121
2122 if (skb_vlan_tag_present(skb)) {
2123 tx_cmd_a |= TX_CMD_A_IVTG_;
2124 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2125 }
2126
2127 skb_push(skb, 4);
2128 cpu_to_le32s(&tx_cmd_b);
2129 memcpy(skb->data, &tx_cmd_b, 4);
2130
2131 skb_push(skb, 4);
2132 cpu_to_le32s(&tx_cmd_a);
2133 memcpy(skb->data, &tx_cmd_a, 4);
2134
2135 return skb;
2136}
2137
2138static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2139 struct sk_buff_head *list, enum skb_state state)
2140{
2141 unsigned long flags;
2142 enum skb_state old_state;
2143 struct skb_data *entry = (struct skb_data *)skb->cb;
2144
2145 spin_lock_irqsave(&list->lock, flags);
2146 old_state = entry->state;
2147 entry->state = state;
55d7de9d
WH
2148
2149 __skb_unlink(skb, list);
2150 spin_unlock(&list->lock);
2151 spin_lock(&dev->done.lock);
55d7de9d
WH
2152
2153 __skb_queue_tail(&dev->done, skb);
2154 if (skb_queue_len(&dev->done) == 1)
2155 tasklet_schedule(&dev->bh);
2156 spin_unlock_irqrestore(&dev->done.lock, flags);
2157
2158 return old_state;
2159}
2160
2161static void tx_complete(struct urb *urb)
2162{
2163 struct sk_buff *skb = (struct sk_buff *)urb->context;
2164 struct skb_data *entry = (struct skb_data *)skb->cb;
2165 struct lan78xx_net *dev = entry->dev;
2166
2167 if (urb->status == 0) {
2168 dev->net->stats.tx_packets++;
2169 dev->net->stats.tx_bytes += entry->length;
2170 } else {
2171 dev->net->stats.tx_errors++;
2172
2173 switch (urb->status) {
2174 case -EPIPE:
2175 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2176 break;
2177
2178 /* software-driven interface shutdown */
2179 case -ECONNRESET:
2180 case -ESHUTDOWN:
2181 break;
2182
2183 case -EPROTO:
2184 case -ETIME:
2185 case -EILSEQ:
2186 netif_stop_queue(dev->net);
2187 break;
2188 default:
2189 netif_dbg(dev, tx_err, dev->net,
2190 "tx err %d\n", entry->urb->status);
2191 break;
2192 }
2193 }
2194
2195 usb_autopm_put_interface_async(dev->intf);
2196
81c38e81 2197 defer_bh(dev, skb, &dev->txq, tx_done);
55d7de9d
WH
2198}
2199
2200static void lan78xx_queue_skb(struct sk_buff_head *list,
2201 struct sk_buff *newsk, enum skb_state state)
2202{
2203 struct skb_data *entry = (struct skb_data *)newsk->cb;
2204
2205 __skb_queue_tail(list, newsk);
2206 entry->state = state;
2207}
2208
2209netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2210{
2211 struct lan78xx_net *dev = netdev_priv(net);
81c38e81 2212 struct sk_buff *skb2 = NULL;
55d7de9d 2213
81c38e81 2214 if (skb) {
55d7de9d 2215 skb_tx_timestamp(skb);
81c38e81
WH
2216 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2217 }
55d7de9d 2218
81c38e81
WH
2219 if (skb2) {
2220 skb_queue_tail(&dev->txq_pend, skb2);
55d7de9d
WH
2221
2222 if (skb_queue_len(&dev->txq_pend) > 10)
2223 netif_stop_queue(net);
2224 } else {
2225 netif_dbg(dev, tx_err, dev->net,
2226 "lan78xx_tx_prep return NULL\n");
2227 dev->net->stats.tx_errors++;
2228 dev->net->stats.tx_dropped++;
2229 }
2230
2231 tasklet_schedule(&dev->bh);
2232
2233 return NETDEV_TX_OK;
2234}
2235
2236int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2237{
2238 int tmp;
2239 struct usb_host_interface *alt = NULL;
2240 struct usb_host_endpoint *in = NULL, *out = NULL;
2241 struct usb_host_endpoint *status = NULL;
2242
2243 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2244 unsigned ep;
2245
2246 in = NULL;
2247 out = NULL;
2248 status = NULL;
2249 alt = intf->altsetting + tmp;
2250
2251 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2252 struct usb_host_endpoint *e;
2253 int intr = 0;
2254
2255 e = alt->endpoint + ep;
2256 switch (e->desc.bmAttributes) {
2257 case USB_ENDPOINT_XFER_INT:
2258 if (!usb_endpoint_dir_in(&e->desc))
2259 continue;
2260 intr = 1;
2261 /* FALLTHROUGH */
2262 case USB_ENDPOINT_XFER_BULK:
2263 break;
2264 default:
2265 continue;
2266 }
2267 if (usb_endpoint_dir_in(&e->desc)) {
2268 if (!intr && !in)
2269 in = e;
2270 else if (intr && !status)
2271 status = e;
2272 } else {
2273 if (!out)
2274 out = e;
2275 }
2276 }
2277 if (in && out)
2278 break;
2279 }
2280 if (!alt || !in || !out)
2281 return -EINVAL;
2282
2283 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2284 in->desc.bEndpointAddress &
2285 USB_ENDPOINT_NUMBER_MASK);
2286 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2287 out->desc.bEndpointAddress &
2288 USB_ENDPOINT_NUMBER_MASK);
2289 dev->ep_intr = status;
2290
2291 return 0;
2292}
2293
2294static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2295{
2296 struct lan78xx_priv *pdata = NULL;
2297 int ret;
2298 int i;
2299
2300 ret = lan78xx_get_endpoints(dev, intf);
2301
2302 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2303
2304 pdata = (struct lan78xx_priv *)(dev->data[0]);
2305 if (!pdata) {
2306 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2307 return -ENOMEM;
2308 }
2309
2310 pdata->dev = dev;
2311
2312 spin_lock_init(&pdata->rfe_ctl_lock);
2313 mutex_init(&pdata->dataport_mutex);
2314
2315 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2316
2317 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2318 pdata->vlan_table[i] = 0;
2319
2320 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2321
2322 dev->net->features = 0;
2323
2324 if (DEFAULT_TX_CSUM_ENABLE)
2325 dev->net->features |= NETIF_F_HW_CSUM;
2326
2327 if (DEFAULT_RX_CSUM_ENABLE)
2328 dev->net->features |= NETIF_F_RXCSUM;
2329
2330 if (DEFAULT_TSO_CSUM_ENABLE)
2331 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2332
2333 dev->net->hw_features = dev->net->features;
2334
2335 /* Init all registers */
2336 ret = lan78xx_reset(dev);
2337
ce85e13a
WH
2338 lan78xx_mdio_init(dev);
2339
55d7de9d
WH
2340 dev->net->flags |= IFF_MULTICAST;
2341
2342 pdata->wol = WAKE_MAGIC;
2343
2344 return 0;
2345}
2346
2347static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2348{
2349 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2350
ce85e13a
WH
2351 lan78xx_remove_mdio(dev);
2352
55d7de9d
WH
2353 if (pdata) {
2354 netif_dbg(dev, ifdown, dev->net, "free pdata");
2355 kfree(pdata);
2356 pdata = NULL;
2357 dev->data[0] = 0;
2358 }
2359}
2360
2361static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2362 struct sk_buff *skb,
2363 u32 rx_cmd_a, u32 rx_cmd_b)
2364{
2365 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2366 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2367 skb->ip_summed = CHECKSUM_NONE;
2368 } else {
2369 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2370 skb->ip_summed = CHECKSUM_COMPLETE;
2371 }
2372}
2373
2374void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2375{
2376 int status;
2377
2378 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2379 skb_queue_tail(&dev->rxq_pause, skb);
2380 return;
2381 }
2382
2383 skb->protocol = eth_type_trans(skb, dev->net);
2384 dev->net->stats.rx_packets++;
2385 dev->net->stats.rx_bytes += skb->len;
2386
2387 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2388 skb->len + sizeof(struct ethhdr), skb->protocol);
2389 memset(skb->cb, 0, sizeof(struct skb_data));
2390
2391 if (skb_defer_rx_timestamp(skb))
2392 return;
2393
2394 status = netif_rx(skb);
2395 if (status != NET_RX_SUCCESS)
2396 netif_dbg(dev, rx_err, dev->net,
2397 "netif_rx status %d\n", status);
2398}
2399
2400static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2401{
2402 if (skb->len < dev->net->hard_header_len)
2403 return 0;
2404
2405 while (skb->len > 0) {
2406 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2407 u16 rx_cmd_c;
2408 struct sk_buff *skb2;
2409 unsigned char *packet;
2410
2411 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2412 le32_to_cpus(&rx_cmd_a);
2413 skb_pull(skb, sizeof(rx_cmd_a));
2414
2415 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2416 le32_to_cpus(&rx_cmd_b);
2417 skb_pull(skb, sizeof(rx_cmd_b));
2418
2419 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2420 le16_to_cpus(&rx_cmd_c);
2421 skb_pull(skb, sizeof(rx_cmd_c));
2422
2423 packet = skb->data;
2424
2425 /* get the packet length */
2426 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2427 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2428
2429 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2430 netif_dbg(dev, rx_err, dev->net,
2431 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2432 } else {
2433 /* last frame in this batch */
2434 if (skb->len == size) {
2435 lan78xx_rx_csum_offload(dev, skb,
2436 rx_cmd_a, rx_cmd_b);
2437
2438 skb_trim(skb, skb->len - 4); /* remove fcs */
2439 skb->truesize = size + sizeof(struct sk_buff);
2440
2441 return 1;
2442 }
2443
2444 skb2 = skb_clone(skb, GFP_ATOMIC);
2445 if (unlikely(!skb2)) {
2446 netdev_warn(dev->net, "Error allocating skb");
2447 return 0;
2448 }
2449
2450 skb2->len = size;
2451 skb2->data = packet;
2452 skb_set_tail_pointer(skb2, size);
2453
2454 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2455
2456 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2457 skb2->truesize = size + sizeof(struct sk_buff);
2458
2459 lan78xx_skb_return(dev, skb2);
2460 }
2461
2462 skb_pull(skb, size);
2463
2464 /* padding bytes before the next frame starts */
2465 if (skb->len)
2466 skb_pull(skb, align_count);
2467 }
2468
55d7de9d
WH
2469 return 1;
2470}
2471
2472static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2473{
2474 if (!lan78xx_rx(dev, skb)) {
2475 dev->net->stats.rx_errors++;
2476 goto done;
2477 }
2478
2479 if (skb->len) {
2480 lan78xx_skb_return(dev, skb);
2481 return;
2482 }
2483
2484 netif_dbg(dev, rx_err, dev->net, "drop\n");
2485 dev->net->stats.rx_errors++;
2486done:
2487 skb_queue_tail(&dev->done, skb);
2488}
2489
2490static void rx_complete(struct urb *urb);
2491
2492static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2493{
2494 struct sk_buff *skb;
2495 struct skb_data *entry;
2496 unsigned long lockflags;
2497 size_t size = dev->rx_urb_size;
2498 int ret = 0;
2499
2500 skb = netdev_alloc_skb_ip_align(dev->net, size);
2501 if (!skb) {
2502 usb_free_urb(urb);
2503 return -ENOMEM;
2504 }
2505
2506 entry = (struct skb_data *)skb->cb;
2507 entry->urb = urb;
2508 entry->dev = dev;
2509 entry->length = 0;
2510
2511 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2512 skb->data, size, rx_complete, skb);
2513
2514 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2515
2516 if (netif_device_present(dev->net) &&
2517 netif_running(dev->net) &&
2518 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2519 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2520 ret = usb_submit_urb(urb, GFP_ATOMIC);
2521 switch (ret) {
2522 case 0:
2523 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2524 break;
2525 case -EPIPE:
2526 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2527 break;
2528 case -ENODEV:
2529 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2530 netif_device_detach(dev->net);
2531 break;
2532 case -EHOSTUNREACH:
2533 ret = -ENOLINK;
2534 break;
2535 default:
2536 netif_dbg(dev, rx_err, dev->net,
2537 "rx submit, %d\n", ret);
2538 tasklet_schedule(&dev->bh);
2539 }
2540 } else {
2541 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2542 ret = -ENOLINK;
2543 }
2544 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2545 if (ret) {
2546 dev_kfree_skb_any(skb);
2547 usb_free_urb(urb);
2548 }
2549 return ret;
2550}
2551
2552static void rx_complete(struct urb *urb)
2553{
2554 struct sk_buff *skb = (struct sk_buff *)urb->context;
2555 struct skb_data *entry = (struct skb_data *)skb->cb;
2556 struct lan78xx_net *dev = entry->dev;
2557 int urb_status = urb->status;
2558 enum skb_state state;
2559
2560 skb_put(skb, urb->actual_length);
2561 state = rx_done;
2562 entry->urb = NULL;
2563
2564 switch (urb_status) {
2565 case 0:
2566 if (skb->len < dev->net->hard_header_len) {
2567 state = rx_cleanup;
2568 dev->net->stats.rx_errors++;
2569 dev->net->stats.rx_length_errors++;
2570 netif_dbg(dev, rx_err, dev->net,
2571 "rx length %d\n", skb->len);
2572 }
2573 usb_mark_last_busy(dev->udev);
2574 break;
2575 case -EPIPE:
2576 dev->net->stats.rx_errors++;
2577 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2578 /* FALLTHROUGH */
2579 case -ECONNRESET: /* async unlink */
2580 case -ESHUTDOWN: /* hardware gone */
2581 netif_dbg(dev, ifdown, dev->net,
2582 "rx shutdown, code %d\n", urb_status);
2583 state = rx_cleanup;
2584 entry->urb = urb;
2585 urb = NULL;
2586 break;
2587 case -EPROTO:
2588 case -ETIME:
2589 case -EILSEQ:
2590 dev->net->stats.rx_errors++;
2591 state = rx_cleanup;
2592 entry->urb = urb;
2593 urb = NULL;
2594 break;
2595
2596 /* data overrun ... flush fifo? */
2597 case -EOVERFLOW:
2598 dev->net->stats.rx_over_errors++;
2599 /* FALLTHROUGH */
2600
2601 default:
2602 state = rx_cleanup;
2603 dev->net->stats.rx_errors++;
2604 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2605 break;
2606 }
2607
2608 state = defer_bh(dev, skb, &dev->rxq, state);
2609
2610 if (urb) {
2611 if (netif_running(dev->net) &&
2612 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2613 state != unlink_start) {
2614 rx_submit(dev, urb, GFP_ATOMIC);
2615 return;
2616 }
2617 usb_free_urb(urb);
2618 }
2619 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2620}
2621
2622static void lan78xx_tx_bh(struct lan78xx_net *dev)
2623{
2624 int length;
2625 struct urb *urb = NULL;
2626 struct skb_data *entry;
2627 unsigned long flags;
2628 struct sk_buff_head *tqp = &dev->txq_pend;
2629 struct sk_buff *skb, *skb2;
2630 int ret;
2631 int count, pos;
2632 int skb_totallen, pkt_cnt;
2633
2634 skb_totallen = 0;
2635 pkt_cnt = 0;
2636 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2637 if (skb_is_gso(skb)) {
2638 if (pkt_cnt) {
2639 /* handle previous packets first */
2640 break;
2641 }
2642 length = skb->len;
2643 skb2 = skb_dequeue(tqp);
2644 goto gso_skb;
2645 }
2646
2647 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2648 break;
2649 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2650 pkt_cnt++;
2651 }
2652
2653 /* copy to a single skb */
2654 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2655 if (!skb)
2656 goto drop;
2657
2658 skb_put(skb, skb_totallen);
2659
2660 for (count = pos = 0; count < pkt_cnt; count++) {
2661 skb2 = skb_dequeue(tqp);
2662 if (skb2) {
2663 memcpy(skb->data + pos, skb2->data, skb2->len);
2664 pos += roundup(skb2->len, sizeof(u32));
2665 dev_kfree_skb(skb2);
55d7de9d
WH
2666 }
2667 }
2668
2669 length = skb_totallen;
2670
2671gso_skb:
2672 urb = usb_alloc_urb(0, GFP_ATOMIC);
2673 if (!urb) {
2674 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2675 goto drop;
2676 }
2677
2678 entry = (struct skb_data *)skb->cb;
2679 entry->urb = urb;
2680 entry->dev = dev;
2681 entry->length = length;
2682
2683 spin_lock_irqsave(&dev->txq.lock, flags);
2684 ret = usb_autopm_get_interface_async(dev->intf);
2685 if (ret < 0) {
2686 spin_unlock_irqrestore(&dev->txq.lock, flags);
2687 goto drop;
2688 }
2689
2690 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2691 skb->data, skb->len, tx_complete, skb);
2692
2693 if (length % dev->maxpacket == 0) {
2694 /* send USB_ZERO_PACKET */
2695 urb->transfer_flags |= URB_ZERO_PACKET;
2696 }
2697
2698#ifdef CONFIG_PM
2699 /* if this triggers the device is still a sleep */
2700 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2701 /* transmission will be done in resume */
2702 usb_anchor_urb(urb, &dev->deferred);
2703 /* no use to process more packets */
2704 netif_stop_queue(dev->net);
2705 usb_put_urb(urb);
2706 spin_unlock_irqrestore(&dev->txq.lock, flags);
2707 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2708 return;
2709 }
2710#endif
2711
2712 ret = usb_submit_urb(urb, GFP_ATOMIC);
2713 switch (ret) {
2714 case 0:
2715 dev->net->trans_start = jiffies;
2716 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2717 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2718 netif_stop_queue(dev->net);
2719 break;
2720 case -EPIPE:
2721 netif_stop_queue(dev->net);
2722 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2723 usb_autopm_put_interface_async(dev->intf);
2724 break;
2725 default:
2726 usb_autopm_put_interface_async(dev->intf);
2727 netif_dbg(dev, tx_err, dev->net,
2728 "tx: submit urb err %d\n", ret);
2729 break;
2730 }
2731
2732 spin_unlock_irqrestore(&dev->txq.lock, flags);
2733
2734 if (ret) {
2735 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2736drop:
2737 dev->net->stats.tx_dropped++;
2738 if (skb)
2739 dev_kfree_skb_any(skb);
2740 usb_free_urb(urb);
2741 } else
2742 netif_dbg(dev, tx_queued, dev->net,
2743 "> tx, len %d, type 0x%x\n", length, skb->protocol);
2744}
2745
2746static void lan78xx_rx_bh(struct lan78xx_net *dev)
2747{
2748 struct urb *urb;
2749 int i;
2750
2751 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2752 for (i = 0; i < 10; i++) {
2753 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2754 break;
2755 urb = usb_alloc_urb(0, GFP_ATOMIC);
2756 if (urb)
2757 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2758 return;
2759 }
2760
2761 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2762 tasklet_schedule(&dev->bh);
2763 }
2764 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2765 netif_wake_queue(dev->net);
2766}
2767
2768static void lan78xx_bh(unsigned long param)
2769{
2770 struct lan78xx_net *dev = (struct lan78xx_net *)param;
2771 struct sk_buff *skb;
2772 struct skb_data *entry;
2773
55d7de9d
WH
2774 while ((skb = skb_dequeue(&dev->done))) {
2775 entry = (struct skb_data *)(skb->cb);
2776 switch (entry->state) {
2777 case rx_done:
2778 entry->state = rx_cleanup;
2779 rx_process(dev, skb);
2780 continue;
2781 case tx_done:
2782 usb_free_urb(entry->urb);
2783 dev_kfree_skb(skb);
2784 continue;
2785 case rx_cleanup:
2786 usb_free_urb(entry->urb);
2787 dev_kfree_skb(skb);
2788 continue;
2789 default:
2790 netdev_dbg(dev->net, "skb state %d\n", entry->state);
2791 return;
2792 }
55d7de9d
WH
2793 }
2794
2795 if (netif_device_present(dev->net) && netif_running(dev->net)) {
2796 if (!skb_queue_empty(&dev->txq_pend))
2797 lan78xx_tx_bh(dev);
2798
2799 if (!timer_pending(&dev->delay) &&
2800 !test_bit(EVENT_RX_HALT, &dev->flags))
2801 lan78xx_rx_bh(dev);
2802 }
2803}
2804
2805static void lan78xx_delayedwork(struct work_struct *work)
2806{
2807 int status;
2808 struct lan78xx_net *dev;
2809
2810 dev = container_of(work, struct lan78xx_net, wq.work);
2811
2812 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2813 unlink_urbs(dev, &dev->txq);
2814 status = usb_autopm_get_interface(dev->intf);
2815 if (status < 0)
2816 goto fail_pipe;
2817 status = usb_clear_halt(dev->udev, dev->pipe_out);
2818 usb_autopm_put_interface(dev->intf);
2819 if (status < 0 &&
2820 status != -EPIPE &&
2821 status != -ESHUTDOWN) {
2822 if (netif_msg_tx_err(dev))
2823fail_pipe:
2824 netdev_err(dev->net,
2825 "can't clear tx halt, status %d\n",
2826 status);
2827 } else {
2828 clear_bit(EVENT_TX_HALT, &dev->flags);
2829 if (status != -ESHUTDOWN)
2830 netif_wake_queue(dev->net);
2831 }
2832 }
2833 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2834 unlink_urbs(dev, &dev->rxq);
2835 status = usb_autopm_get_interface(dev->intf);
2836 if (status < 0)
2837 goto fail_halt;
2838 status = usb_clear_halt(dev->udev, dev->pipe_in);
2839 usb_autopm_put_interface(dev->intf);
2840 if (status < 0 &&
2841 status != -EPIPE &&
2842 status != -ESHUTDOWN) {
2843 if (netif_msg_rx_err(dev))
2844fail_halt:
2845 netdev_err(dev->net,
2846 "can't clear rx halt, status %d\n",
2847 status);
2848 } else {
2849 clear_bit(EVENT_RX_HALT, &dev->flags);
2850 tasklet_schedule(&dev->bh);
2851 }
2852 }
2853
2854 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2855 int ret = 0;
2856
2857 clear_bit(EVENT_LINK_RESET, &dev->flags);
2858 status = usb_autopm_get_interface(dev->intf);
2859 if (status < 0)
2860 goto skip_reset;
2861 if (lan78xx_link_reset(dev) < 0) {
2862 usb_autopm_put_interface(dev->intf);
2863skip_reset:
2864 netdev_info(dev->net, "link reset failed (%d)\n",
2865 ret);
2866 } else {
2867 usb_autopm_put_interface(dev->intf);
2868 }
2869 }
2870}
2871
2872static void intr_complete(struct urb *urb)
2873{
2874 struct lan78xx_net *dev = urb->context;
2875 int status = urb->status;
2876
2877 switch (status) {
2878 /* success */
2879 case 0:
2880 lan78xx_status(dev, urb);
2881 break;
2882
2883 /* software-driven interface shutdown */
2884 case -ENOENT: /* urb killed */
2885 case -ESHUTDOWN: /* hardware gone */
2886 netif_dbg(dev, ifdown, dev->net,
2887 "intr shutdown, code %d\n", status);
2888 return;
2889
2890 /* NOTE: not throttling like RX/TX, since this endpoint
2891 * already polls infrequently
2892 */
2893 default:
2894 netdev_dbg(dev->net, "intr status %d\n", status);
2895 break;
2896 }
2897
2898 if (!netif_running(dev->net))
2899 return;
2900
2901 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2902 status = usb_submit_urb(urb, GFP_ATOMIC);
2903 if (status != 0)
2904 netif_err(dev, timer, dev->net,
2905 "intr resubmit --> %d\n", status);
2906}
2907
2908static void lan78xx_disconnect(struct usb_interface *intf)
2909{
2910 struct lan78xx_net *dev;
2911 struct usb_device *udev;
2912 struct net_device *net;
2913
2914 dev = usb_get_intfdata(intf);
2915 usb_set_intfdata(intf, NULL);
2916 if (!dev)
2917 return;
2918
2919 udev = interface_to_usbdev(intf);
2920
2921 net = dev->net;
2922 unregister_netdev(net);
2923
2924 cancel_delayed_work_sync(&dev->wq);
2925
2926 usb_scuttle_anchored_urbs(&dev->deferred);
2927
2928 lan78xx_unbind(dev, intf);
2929
2930 usb_kill_urb(dev->urb_intr);
2931 usb_free_urb(dev->urb_intr);
2932
2933 free_netdev(net);
2934 usb_put_dev(udev);
2935}
2936
2937void lan78xx_tx_timeout(struct net_device *net)
2938{
2939 struct lan78xx_net *dev = netdev_priv(net);
2940
2941 unlink_urbs(dev, &dev->txq);
2942 tasklet_schedule(&dev->bh);
2943}
2944
2945static const struct net_device_ops lan78xx_netdev_ops = {
2946 .ndo_open = lan78xx_open,
2947 .ndo_stop = lan78xx_stop,
2948 .ndo_start_xmit = lan78xx_start_xmit,
2949 .ndo_tx_timeout = lan78xx_tx_timeout,
2950 .ndo_change_mtu = lan78xx_change_mtu,
2951 .ndo_set_mac_address = lan78xx_set_mac_addr,
2952 .ndo_validate_addr = eth_validate_addr,
2953 .ndo_do_ioctl = lan78xx_ioctl,
2954 .ndo_set_rx_mode = lan78xx_set_multicast,
2955 .ndo_set_features = lan78xx_set_features,
2956 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
2957 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
2958};
2959
2960static int lan78xx_probe(struct usb_interface *intf,
2961 const struct usb_device_id *id)
2962{
2963 struct lan78xx_net *dev;
2964 struct net_device *netdev;
2965 struct usb_device *udev;
2966 int ret;
2967 unsigned maxp;
2968 unsigned period;
2969 u8 *buf = NULL;
2970
2971 udev = interface_to_usbdev(intf);
2972 udev = usb_get_dev(udev);
2973
2974 ret = -ENOMEM;
2975 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
2976 if (!netdev) {
2977 dev_err(&intf->dev, "Error: OOM\n");
2978 goto out1;
2979 }
2980
2981 /* netdev_printk() needs this */
2982 SET_NETDEV_DEV(netdev, &intf->dev);
2983
2984 dev = netdev_priv(netdev);
2985 dev->udev = udev;
2986 dev->intf = intf;
2987 dev->net = netdev;
2988 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
2989 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
2990
2991 skb_queue_head_init(&dev->rxq);
2992 skb_queue_head_init(&dev->txq);
2993 skb_queue_head_init(&dev->done);
2994 skb_queue_head_init(&dev->rxq_pause);
2995 skb_queue_head_init(&dev->txq_pend);
2996 mutex_init(&dev->phy_mutex);
2997
2998 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
2999 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3000 init_usb_anchor(&dev->deferred);
3001
3002 netdev->netdev_ops = &lan78xx_netdev_ops;
3003 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3004 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3005
3006 ret = lan78xx_bind(dev, intf);
3007 if (ret < 0)
3008 goto out2;
3009 strcpy(netdev->name, "eth%d");
3010
3011 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3012 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3013
3014 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3015 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3016 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3017
3018 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3019 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3020
3021 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3022 dev->ep_intr->desc.bEndpointAddress &
3023 USB_ENDPOINT_NUMBER_MASK);
3024 period = dev->ep_intr->desc.bInterval;
3025
3026 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3027 buf = kmalloc(maxp, GFP_KERNEL);
3028 if (buf) {
3029 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3030 if (!dev->urb_intr) {
3031 kfree(buf);
3032 goto out3;
3033 } else {
3034 usb_fill_int_urb(dev->urb_intr, dev->udev,
3035 dev->pipe_intr, buf, maxp,
3036 intr_complete, dev, period);
3037 }
3038 }
3039
3040 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3041
3042 /* driver requires remote-wakeup capability during autosuspend. */
3043 intf->needs_remote_wakeup = 1;
3044
3045 ret = register_netdev(netdev);
3046 if (ret != 0) {
3047 netif_err(dev, probe, netdev, "couldn't register the device\n");
3048 goto out2;
3049 }
3050
3051 usb_set_intfdata(intf, dev);
3052
3053 ret = device_set_wakeup_enable(&udev->dev, true);
3054
3055 /* Default delay of 2sec has more overhead than advantage.
3056 * Set to 10sec as default.
3057 */
3058 pm_runtime_set_autosuspend_delay(&udev->dev,
3059 DEFAULT_AUTOSUSPEND_DELAY);
3060
3061 return 0;
3062
55d7de9d
WH
3063out3:
3064 lan78xx_unbind(dev, intf);
3065out2:
3066 free_netdev(netdev);
3067out1:
3068 usb_put_dev(udev);
3069
3070 return ret;
3071}
3072
3073static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3074{
3075 const u16 crc16poly = 0x8005;
3076 int i;
3077 u16 bit, crc, msb;
3078 u8 data;
3079
3080 crc = 0xFFFF;
3081 for (i = 0; i < len; i++) {
3082 data = *buf++;
3083 for (bit = 0; bit < 8; bit++) {
3084 msb = crc >> 15;
3085 crc <<= 1;
3086
3087 if (msb ^ (u16)(data & 1)) {
3088 crc ^= crc16poly;
3089 crc |= (u16)0x0001U;
3090 }
3091 data >>= 1;
3092 }
3093 }
3094
3095 return crc;
3096}
3097
3098static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3099{
3100 u32 buf;
3101 int ret;
3102 int mask_index;
3103 u16 crc;
3104 u32 temp_wucsr;
3105 u32 temp_pmt_ctl;
3106 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3107 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3108 const u8 arp_type[2] = { 0x08, 0x06 };
3109
3110 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3111 buf &= ~MAC_TX_TXEN_;
3112 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3113 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3114 buf &= ~MAC_RX_RXEN_;
3115 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3116
3117 ret = lan78xx_write_reg(dev, WUCSR, 0);
3118 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3119 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3120
3121 temp_wucsr = 0;
3122
3123 temp_pmt_ctl = 0;
3124 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3125 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3126 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3127
3128 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3129 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3130
3131 mask_index = 0;
3132 if (wol & WAKE_PHY) {
3133 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3134
3135 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3136 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3137 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3138 }
3139 if (wol & WAKE_MAGIC) {
3140 temp_wucsr |= WUCSR_MPEN_;
3141
3142 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3143 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3144 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3145 }
3146 if (wol & WAKE_BCAST) {
3147 temp_wucsr |= WUCSR_BCST_EN_;
3148
3149 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3150 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3151 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3152 }
3153 if (wol & WAKE_MCAST) {
3154 temp_wucsr |= WUCSR_WAKE_EN_;
3155
3156 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3157 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3158 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3159 WUF_CFGX_EN_ |
3160 WUF_CFGX_TYPE_MCAST_ |
3161 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3162 (crc & WUF_CFGX_CRC16_MASK_));
3163
3164 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3165 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3166 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3167 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3168 mask_index++;
3169
3170 /* for IPv6 Multicast */
3171 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3172 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3173 WUF_CFGX_EN_ |
3174 WUF_CFGX_TYPE_MCAST_ |
3175 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3176 (crc & WUF_CFGX_CRC16_MASK_));
3177
3178 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3179 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3180 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3181 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3182 mask_index++;
3183
3184 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3185 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3186 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3187 }
3188 if (wol & WAKE_UCAST) {
3189 temp_wucsr |= WUCSR_PFDA_EN_;
3190
3191 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3192 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3193 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3194 }
3195 if (wol & WAKE_ARP) {
3196 temp_wucsr |= WUCSR_WAKE_EN_;
3197
3198 /* set WUF_CFG & WUF_MASK
3199 * for packettype (offset 12,13) = ARP (0x0806)
3200 */
3201 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3202 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3203 WUF_CFGX_EN_ |
3204 WUF_CFGX_TYPE_ALL_ |
3205 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3206 (crc & WUF_CFGX_CRC16_MASK_));
3207
3208 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3209 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3210 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3211 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3212 mask_index++;
3213
3214 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3215 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3216 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3217 }
3218
3219 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3220
3221 /* when multiple WOL bits are set */
3222 if (hweight_long((unsigned long)wol) > 1) {
3223 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3224 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3225 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3226 }
3227 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3228
3229 /* clear WUPS */
3230 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3231 buf |= PMT_CTL_WUPS_MASK_;
3232 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3233
3234 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3235 buf |= MAC_RX_RXEN_;
3236 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3237
3238 return 0;
3239}
3240
3241int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3242{
3243 struct lan78xx_net *dev = usb_get_intfdata(intf);
3244 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3245 u32 buf;
3246 int ret;
3247 int event;
3248
55d7de9d
WH
3249 event = message.event;
3250
3251 if (!dev->suspend_count++) {
3252 spin_lock_irq(&dev->txq.lock);
3253 /* don't autosuspend while transmitting */
3254 if ((skb_queue_len(&dev->txq) ||
3255 skb_queue_len(&dev->txq_pend)) &&
3256 PMSG_IS_AUTO(message)) {
3257 spin_unlock_irq(&dev->txq.lock);
3258 ret = -EBUSY;
3259 goto out;
3260 } else {
3261 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3262 spin_unlock_irq(&dev->txq.lock);
3263 }
3264
3265 /* stop TX & RX */
3266 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3267 buf &= ~MAC_TX_TXEN_;
3268 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3269 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3270 buf &= ~MAC_RX_RXEN_;
3271 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3272
3273 /* empty out the rx and queues */
3274 netif_device_detach(dev->net);
3275 lan78xx_terminate_urbs(dev);
3276 usb_kill_urb(dev->urb_intr);
3277
3278 /* reattach */
3279 netif_device_attach(dev->net);
3280 }
3281
3282 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3283 if (PMSG_IS_AUTO(message)) {
3284 /* auto suspend (selective suspend) */
3285 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3286 buf &= ~MAC_TX_TXEN_;
3287 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3288 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3289 buf &= ~MAC_RX_RXEN_;
3290 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3291
3292 ret = lan78xx_write_reg(dev, WUCSR, 0);
3293 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3294 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3295
3296 /* set goodframe wakeup */
3297 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3298
3299 buf |= WUCSR_RFE_WAKE_EN_;
3300 buf |= WUCSR_STORE_WAKE_;
3301
3302 ret = lan78xx_write_reg(dev, WUCSR, buf);
3303
3304 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3305
3306 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3307 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3308
3309 buf |= PMT_CTL_PHY_WAKE_EN_;
3310 buf |= PMT_CTL_WOL_EN_;
3311 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3312 buf |= PMT_CTL_SUS_MODE_3_;
3313
3314 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3315
3316 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3317
3318 buf |= PMT_CTL_WUPS_MASK_;
3319
3320 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3321
3322 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3323 buf |= MAC_RX_RXEN_;
3324 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3325 } else {
3326 lan78xx_set_suspend(dev, pdata->wol);
3327 }
3328 }
3329
49d28b56 3330 ret = 0;
55d7de9d
WH
3331out:
3332 return ret;
3333}
3334
3335int lan78xx_resume(struct usb_interface *intf)
3336{
3337 struct lan78xx_net *dev = usb_get_intfdata(intf);
3338 struct sk_buff *skb;
3339 struct urb *res;
3340 int ret;
3341 u32 buf;
3342
3343 if (!--dev->suspend_count) {
3344 /* resume interrupt URBs */
3345 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3346 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3347
3348 spin_lock_irq(&dev->txq.lock);
3349 while ((res = usb_get_from_anchor(&dev->deferred))) {
3350 skb = (struct sk_buff *)res->context;
3351 ret = usb_submit_urb(res, GFP_ATOMIC);
3352 if (ret < 0) {
3353 dev_kfree_skb_any(skb);
3354 usb_free_urb(res);
3355 usb_autopm_put_interface_async(dev->intf);
3356 } else {
3357 dev->net->trans_start = jiffies;
3358 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3359 }
3360 }
3361
3362 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3363 spin_unlock_irq(&dev->txq.lock);
3364
3365 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3366 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3367 netif_start_queue(dev->net);
3368 tasklet_schedule(&dev->bh);
3369 }
3370 }
3371
3372 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3373 ret = lan78xx_write_reg(dev, WUCSR, 0);
3374 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3375
3376 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3377 WUCSR2_ARP_RCD_ |
3378 WUCSR2_IPV6_TCPSYN_RCD_ |
3379 WUCSR2_IPV4_TCPSYN_RCD_);
3380
3381 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3382 WUCSR_EEE_RX_WAKE_ |
3383 WUCSR_PFDA_FR_ |
3384 WUCSR_RFE_WAKE_FR_ |
3385 WUCSR_WUFR_ |
3386 WUCSR_MPR_ |
3387 WUCSR_BCST_FR_);
3388
3389 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3390 buf |= MAC_TX_TXEN_;
3391 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3392
3393 return 0;
3394}
3395
3396int lan78xx_reset_resume(struct usb_interface *intf)
3397{
3398 struct lan78xx_net *dev = usb_get_intfdata(intf);
3399
3400 lan78xx_reset(dev);
ce85e13a
WH
3401
3402 lan78xx_phy_init(dev);
3403
55d7de9d
WH
3404 return lan78xx_resume(intf);
3405}
3406
3407static const struct usb_device_id products[] = {
3408 {
3409 /* LAN7800 USB Gigabit Ethernet Device */
3410 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3411 },
3412 {
3413 /* LAN7850 USB Gigabit Ethernet Device */
3414 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3415 },
3416 {},
3417};
3418MODULE_DEVICE_TABLE(usb, products);
3419
3420static struct usb_driver lan78xx_driver = {
3421 .name = DRIVER_NAME,
3422 .id_table = products,
3423 .probe = lan78xx_probe,
3424 .disconnect = lan78xx_disconnect,
3425 .suspend = lan78xx_suspend,
3426 .resume = lan78xx_resume,
3427 .reset_resume = lan78xx_reset_resume,
3428 .supports_autosuspend = 1,
3429 .disable_hub_initiated_lpm = 1,
3430};
3431
3432module_usb_driver(lan78xx_driver);
3433
3434MODULE_AUTHOR(DRIVER_AUTHOR);
3435MODULE_DESCRIPTION(DRIVER_DESC);
3436MODULE_LICENSE("GPL");
This page took 0.185764 seconds and 5 git commands to generate.