usbnet: fix failure handling in usbnet_probe
[deliverable/linux.git] / drivers / net / usb / usbnet.c
1 /*
2 * USB Network driver infrastructure
3 * Copyright (C) 2000-2005 by David Brownell
4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 /*
22 * This is a generic "USB networking" framework that works with several
23 * kinds of full and high speed networking devices: host-to-host cables,
24 * smart usb peripherals, and actual Ethernet adapters.
25 *
26 * These devices usually differ in terms of control protocols (if they
27 * even have one!) and sometimes they define new framing to wrap or batch
28 * Ethernet packets. Otherwise, they talk to USB pretty much the same,
29 * so interface (un)binding, endpoint I/O queues, fault handling, and other
30 * issues can usefully be addressed by this framework.
31 */
32
33 // #define DEBUG // error path messages, extra info
34 // #define VERBOSE // more; success messages
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/ctype.h>
41 #include <linux/ethtool.h>
42 #include <linux/workqueue.h>
43 #include <linux/mii.h>
44 #include <linux/usb.h>
45 #include <linux/usb/usbnet.h>
46 #include <linux/slab.h>
47 #include <linux/kernel.h>
48 #include <linux/pm_runtime.h>
49
50 #define DRIVER_VERSION "22-Aug-2005"
51
52
53 /*-------------------------------------------------------------------------*/
54
55 /*
56 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max.
57 * Several dozen bytes of IPv4 data can fit in two such transactions.
58 * One maximum size Ethernet packet takes twenty four of them.
59 * For high speed, each frame comfortably fits almost 36 max size
60 * Ethernet packets (so queues should be bigger).
61 *
62 * REVISIT qlens should be members of 'struct usbnet'; the goal is to
63 * let the USB host controller be busy for 5msec or more before an irq
64 * is required, under load. Jumbograms change the equation.
65 */
66 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
67 #define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
68 (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
69 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
70 (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
71
72 // reawaken network queue this soon after stopping; else watchdog barks
73 #define TX_TIMEOUT_JIFFIES (5*HZ)
74
75 // throttle rx/tx briefly after some faults, so khubd might disconnect()
76 // us (it polls at HZ/4 usually) before we report too many false errors.
77 #define THROTTLE_JIFFIES (HZ/8)
78
79 // between wakeups
80 #define UNLINK_TIMEOUT_MS 3
81
82 /*-------------------------------------------------------------------------*/
83
84 // randomly generated ethernet address
85 static u8 node_id [ETH_ALEN];
86
87 static const char driver_name [] = "usbnet";
88
89 /* use ethtool to change the level for any given device */
90 static int msg_level = -1;
91 module_param (msg_level, int, 0);
92 MODULE_PARM_DESC (msg_level, "Override default message level");
93
94 /*-------------------------------------------------------------------------*/
95
96 /* handles CDC Ethernet and many other network "bulk data" interfaces */
97 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
98 {
99 int tmp;
100 struct usb_host_interface *alt = NULL;
101 struct usb_host_endpoint *in = NULL, *out = NULL;
102 struct usb_host_endpoint *status = NULL;
103
104 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
105 unsigned ep;
106
107 in = out = status = NULL;
108 alt = intf->altsetting + tmp;
109
110 /* take the first altsetting with in-bulk + out-bulk;
111 * remember any status endpoint, just in case;
112 * ignore other endpoints and altsettings.
113 */
114 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
115 struct usb_host_endpoint *e;
116 int intr = 0;
117
118 e = alt->endpoint + ep;
119 switch (e->desc.bmAttributes) {
120 case USB_ENDPOINT_XFER_INT:
121 if (!usb_endpoint_dir_in(&e->desc))
122 continue;
123 intr = 1;
124 /* FALLTHROUGH */
125 case USB_ENDPOINT_XFER_BULK:
126 break;
127 default:
128 continue;
129 }
130 if (usb_endpoint_dir_in(&e->desc)) {
131 if (!intr && !in)
132 in = e;
133 else if (intr && !status)
134 status = e;
135 } else {
136 if (!out)
137 out = e;
138 }
139 }
140 if (in && out)
141 break;
142 }
143 if (!alt || !in || !out)
144 return -EINVAL;
145
146 if (alt->desc.bAlternateSetting != 0 ||
147 !(dev->driver_info->flags & FLAG_NO_SETINT)) {
148 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
149 alt->desc.bAlternateSetting);
150 if (tmp < 0)
151 return tmp;
152 }
153
154 dev->in = usb_rcvbulkpipe (dev->udev,
155 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
156 dev->out = usb_sndbulkpipe (dev->udev,
157 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
158 dev->status = status;
159 return 0;
160 }
161 EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
162
163 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
164 {
165 int tmp, i;
166 unsigned char buf [13];
167
168 tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
169 if (tmp != 12) {
170 dev_dbg(&dev->udev->dev,
171 "bad MAC string %d fetch, %d\n", iMACAddress, tmp);
172 if (tmp >= 0)
173 tmp = -EINVAL;
174 return tmp;
175 }
176 for (i = tmp = 0; i < 6; i++, tmp += 2)
177 dev->net->dev_addr [i] =
178 (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]);
179 return 0;
180 }
181 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
182
183 static void intr_complete (struct urb *urb);
184
185 static int init_status (struct usbnet *dev, struct usb_interface *intf)
186 {
187 char *buf = NULL;
188 unsigned pipe = 0;
189 unsigned maxp;
190 unsigned period;
191
192 if (!dev->driver_info->status)
193 return 0;
194
195 pipe = usb_rcvintpipe (dev->udev,
196 dev->status->desc.bEndpointAddress
197 & USB_ENDPOINT_NUMBER_MASK);
198 maxp = usb_maxpacket (dev->udev, pipe, 0);
199
200 /* avoid 1 msec chatter: min 8 msec poll rate */
201 period = max ((int) dev->status->desc.bInterval,
202 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
203
204 buf = kmalloc (maxp, GFP_KERNEL);
205 if (buf) {
206 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
207 if (!dev->interrupt) {
208 kfree (buf);
209 return -ENOMEM;
210 } else {
211 usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
212 buf, maxp, intr_complete, dev, period);
213 dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
214 dev_dbg(&intf->dev,
215 "status ep%din, %d bytes period %d\n",
216 usb_pipeendpoint(pipe), maxp, period);
217 }
218 }
219 return 0;
220 }
221
222 /* Passes this packet up the stack, updating its accounting.
223 * Some link protocols batch packets, so their rx_fixup paths
224 * can return clones as well as just modify the original skb.
225 */
226 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
227 {
228 int status;
229
230 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
231 skb_queue_tail(&dev->rxq_pause, skb);
232 return;
233 }
234
235 skb->protocol = eth_type_trans (skb, dev->net);
236 dev->net->stats.rx_packets++;
237 dev->net->stats.rx_bytes += skb->len;
238
239 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
240 skb->len + sizeof (struct ethhdr), skb->protocol);
241 memset (skb->cb, 0, sizeof (struct skb_data));
242
243 if (skb_defer_rx_timestamp(skb))
244 return;
245
246 status = netif_rx (skb);
247 if (status != NET_RX_SUCCESS)
248 netif_dbg(dev, rx_err, dev->net,
249 "netif_rx status %d\n", status);
250 }
251 EXPORT_SYMBOL_GPL(usbnet_skb_return);
252
253 \f
254 /*-------------------------------------------------------------------------
255 *
256 * Network Device Driver (peer link to "Host Device", from USB host)
257 *
258 *-------------------------------------------------------------------------*/
259
260 int usbnet_change_mtu (struct net_device *net, int new_mtu)
261 {
262 struct usbnet *dev = netdev_priv(net);
263 int ll_mtu = new_mtu + net->hard_header_len;
264 int old_hard_mtu = dev->hard_mtu;
265 int old_rx_urb_size = dev->rx_urb_size;
266
267 if (new_mtu <= 0)
268 return -EINVAL;
269 // no second zero-length packet read wanted after mtu-sized packets
270 if ((ll_mtu % dev->maxpacket) == 0)
271 return -EDOM;
272 net->mtu = new_mtu;
273
274 dev->hard_mtu = net->mtu + net->hard_header_len;
275 if (dev->rx_urb_size == old_hard_mtu) {
276 dev->rx_urb_size = dev->hard_mtu;
277 if (dev->rx_urb_size > old_rx_urb_size)
278 usbnet_unlink_rx_urbs(dev);
279 }
280
281 return 0;
282 }
283 EXPORT_SYMBOL_GPL(usbnet_change_mtu);
284
285 /*-------------------------------------------------------------------------*/
286
287 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
288 * completion callbacks. 2.5 should have fixed those bugs...
289 */
290
291 static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
292 {
293 unsigned long flags;
294
295 spin_lock_irqsave(&list->lock, flags);
296 __skb_unlink(skb, list);
297 spin_unlock(&list->lock);
298 spin_lock(&dev->done.lock);
299 __skb_queue_tail(&dev->done, skb);
300 if (dev->done.qlen == 1)
301 tasklet_schedule(&dev->bh);
302 spin_unlock_irqrestore(&dev->done.lock, flags);
303 }
304
305 /* some work can't be done in tasklets, so we use keventd
306 *
307 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
308 * but tasklet_schedule() doesn't. hope the failure is rare.
309 */
310 void usbnet_defer_kevent (struct usbnet *dev, int work)
311 {
312 set_bit (work, &dev->flags);
313 if (!schedule_work (&dev->kevent))
314 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
315 else
316 netdev_dbg(dev->net, "kevent %d scheduled\n", work);
317 }
318 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
319
320 /*-------------------------------------------------------------------------*/
321
322 static void rx_complete (struct urb *urb);
323
324 static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
325 {
326 struct sk_buff *skb;
327 struct skb_data *entry;
328 int retval = 0;
329 unsigned long lockflags;
330 size_t size = dev->rx_urb_size;
331
332 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
333 if (!skb) {
334 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
335 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
336 usb_free_urb (urb);
337 return -ENOMEM;
338 }
339
340 entry = (struct skb_data *) skb->cb;
341 entry->urb = urb;
342 entry->dev = dev;
343 entry->state = rx_start;
344 entry->length = 0;
345
346 usb_fill_bulk_urb (urb, dev->udev, dev->in,
347 skb->data, size, rx_complete, skb);
348
349 spin_lock_irqsave (&dev->rxq.lock, lockflags);
350
351 if (netif_running (dev->net) &&
352 netif_device_present (dev->net) &&
353 !test_bit (EVENT_RX_HALT, &dev->flags) &&
354 !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
355 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
356 case -EPIPE:
357 usbnet_defer_kevent (dev, EVENT_RX_HALT);
358 break;
359 case -ENOMEM:
360 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
361 break;
362 case -ENODEV:
363 netif_dbg(dev, ifdown, dev->net, "device gone\n");
364 netif_device_detach (dev->net);
365 break;
366 case -EHOSTUNREACH:
367 retval = -ENOLINK;
368 break;
369 default:
370 netif_dbg(dev, rx_err, dev->net,
371 "rx submit, %d\n", retval);
372 tasklet_schedule (&dev->bh);
373 break;
374 case 0:
375 __skb_queue_tail (&dev->rxq, skb);
376 }
377 } else {
378 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
379 retval = -ENOLINK;
380 }
381 spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
382 if (retval) {
383 dev_kfree_skb_any (skb);
384 usb_free_urb (urb);
385 }
386 return retval;
387 }
388
389
390 /*-------------------------------------------------------------------------*/
391
392 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
393 {
394 if (dev->driver_info->rx_fixup &&
395 !dev->driver_info->rx_fixup (dev, skb)) {
396 /* With RX_ASSEMBLE, rx_fixup() must update counters */
397 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
398 dev->net->stats.rx_errors++;
399 goto done;
400 }
401 // else network stack removes extra byte if we forced a short packet
402
403 if (skb->len) {
404 /* all data was already cloned from skb inside the driver */
405 if (dev->driver_info->flags & FLAG_MULTI_PACKET)
406 dev_kfree_skb_any(skb);
407 else
408 usbnet_skb_return(dev, skb);
409 return;
410 }
411
412 netif_dbg(dev, rx_err, dev->net, "drop\n");
413 dev->net->stats.rx_errors++;
414 done:
415 skb_queue_tail(&dev->done, skb);
416 }
417
418 /*-------------------------------------------------------------------------*/
419
420 static void rx_complete (struct urb *urb)
421 {
422 struct sk_buff *skb = (struct sk_buff *) urb->context;
423 struct skb_data *entry = (struct skb_data *) skb->cb;
424 struct usbnet *dev = entry->dev;
425 int urb_status = urb->status;
426
427 skb_put (skb, urb->actual_length);
428 entry->state = rx_done;
429 entry->urb = NULL;
430
431 switch (urb_status) {
432 /* success */
433 case 0:
434 if (skb->len < dev->net->hard_header_len) {
435 entry->state = rx_cleanup;
436 dev->net->stats.rx_errors++;
437 dev->net->stats.rx_length_errors++;
438 netif_dbg(dev, rx_err, dev->net,
439 "rx length %d\n", skb->len);
440 }
441 break;
442
443 /* stalls need manual reset. this is rare ... except that
444 * when going through USB 2.0 TTs, unplug appears this way.
445 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
446 * storm, recovering as needed.
447 */
448 case -EPIPE:
449 dev->net->stats.rx_errors++;
450 usbnet_defer_kevent (dev, EVENT_RX_HALT);
451 // FALLTHROUGH
452
453 /* software-driven interface shutdown */
454 case -ECONNRESET: /* async unlink */
455 case -ESHUTDOWN: /* hardware gone */
456 netif_dbg(dev, ifdown, dev->net,
457 "rx shutdown, code %d\n", urb_status);
458 goto block;
459
460 /* we get controller i/o faults during khubd disconnect() delays.
461 * throttle down resubmits, to avoid log floods; just temporarily,
462 * so we still recover when the fault isn't a khubd delay.
463 */
464 case -EPROTO:
465 case -ETIME:
466 case -EILSEQ:
467 dev->net->stats.rx_errors++;
468 if (!timer_pending (&dev->delay)) {
469 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
470 netif_dbg(dev, link, dev->net,
471 "rx throttle %d\n", urb_status);
472 }
473 block:
474 entry->state = rx_cleanup;
475 entry->urb = urb;
476 urb = NULL;
477 break;
478
479 /* data overrun ... flush fifo? */
480 case -EOVERFLOW:
481 dev->net->stats.rx_over_errors++;
482 // FALLTHROUGH
483
484 default:
485 entry->state = rx_cleanup;
486 dev->net->stats.rx_errors++;
487 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
488 break;
489 }
490
491 defer_bh(dev, skb, &dev->rxq);
492
493 if (urb) {
494 if (netif_running (dev->net) &&
495 !test_bit (EVENT_RX_HALT, &dev->flags)) {
496 rx_submit (dev, urb, GFP_ATOMIC);
497 usb_mark_last_busy(dev->udev);
498 return;
499 }
500 usb_free_urb (urb);
501 }
502 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
503 }
504
505 static void intr_complete (struct urb *urb)
506 {
507 struct usbnet *dev = urb->context;
508 int status = urb->status;
509
510 switch (status) {
511 /* success */
512 case 0:
513 dev->driver_info->status(dev, urb);
514 break;
515
516 /* software-driven interface shutdown */
517 case -ENOENT: /* urb killed */
518 case -ESHUTDOWN: /* hardware gone */
519 netif_dbg(dev, ifdown, dev->net,
520 "intr shutdown, code %d\n", status);
521 return;
522
523 /* NOTE: not throttling like RX/TX, since this endpoint
524 * already polls infrequently
525 */
526 default:
527 netdev_dbg(dev->net, "intr status %d\n", status);
528 break;
529 }
530
531 if (!netif_running (dev->net))
532 return;
533
534 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
535 status = usb_submit_urb (urb, GFP_ATOMIC);
536 if (status != 0)
537 netif_err(dev, timer, dev->net,
538 "intr resubmit --> %d\n", status);
539 }
540
541 /*-------------------------------------------------------------------------*/
542 void usbnet_pause_rx(struct usbnet *dev)
543 {
544 set_bit(EVENT_RX_PAUSED, &dev->flags);
545
546 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n");
547 }
548 EXPORT_SYMBOL_GPL(usbnet_pause_rx);
549
550 void usbnet_resume_rx(struct usbnet *dev)
551 {
552 struct sk_buff *skb;
553 int num = 0;
554
555 clear_bit(EVENT_RX_PAUSED, &dev->flags);
556
557 while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
558 usbnet_skb_return(dev, skb);
559 num++;
560 }
561
562 tasklet_schedule(&dev->bh);
563
564 netif_dbg(dev, rx_status, dev->net,
565 "paused rx queue disabled, %d skbs requeued\n", num);
566 }
567 EXPORT_SYMBOL_GPL(usbnet_resume_rx);
568
569 void usbnet_purge_paused_rxq(struct usbnet *dev)
570 {
571 skb_queue_purge(&dev->rxq_pause);
572 }
573 EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
574
575 /*-------------------------------------------------------------------------*/
576
577 // unlink pending rx/tx; completion handlers do all other cleanup
578
579 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
580 {
581 unsigned long flags;
582 struct sk_buff *skb, *skbnext;
583 int count = 0;
584
585 spin_lock_irqsave (&q->lock, flags);
586 skb_queue_walk_safe(q, skb, skbnext) {
587 struct skb_data *entry;
588 struct urb *urb;
589 int retval;
590
591 entry = (struct skb_data *) skb->cb;
592 urb = entry->urb;
593
594 /*
595 * Get reference count of the URB to avoid it to be
596 * freed during usb_unlink_urb, which may trigger
597 * use-after-free problem inside usb_unlink_urb since
598 * usb_unlink_urb is always racing with .complete
599 * handler(include defer_bh).
600 */
601 usb_get_urb(urb);
602 spin_unlock_irqrestore(&q->lock, flags);
603 // during some PM-driven resume scenarios,
604 // these (async) unlinks complete immediately
605 retval = usb_unlink_urb (urb);
606 if (retval != -EINPROGRESS && retval != 0)
607 netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
608 else
609 count++;
610 usb_put_urb(urb);
611 spin_lock_irqsave(&q->lock, flags);
612 }
613 spin_unlock_irqrestore (&q->lock, flags);
614 return count;
615 }
616
617 // Flush all pending rx urbs
618 // minidrivers may need to do this when the MTU changes
619
620 void usbnet_unlink_rx_urbs(struct usbnet *dev)
621 {
622 if (netif_running(dev->net)) {
623 (void) unlink_urbs (dev, &dev->rxq);
624 tasklet_schedule(&dev->bh);
625 }
626 }
627 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
628
629 /*-------------------------------------------------------------------------*/
630
631 // precondition: never called in_interrupt
632 static void usbnet_terminate_urbs(struct usbnet *dev)
633 {
634 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
635 DECLARE_WAITQUEUE(wait, current);
636 int temp;
637
638 /* ensure there are no more active urbs */
639 add_wait_queue(&unlink_wakeup, &wait);
640 set_current_state(TASK_UNINTERRUPTIBLE);
641 dev->wait = &unlink_wakeup;
642 temp = unlink_urbs(dev, &dev->txq) +
643 unlink_urbs(dev, &dev->rxq);
644
645 /* maybe wait for deletions to finish. */
646 while (!skb_queue_empty(&dev->rxq)
647 && !skb_queue_empty(&dev->txq)
648 && !skb_queue_empty(&dev->done)) {
649 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
650 set_current_state(TASK_UNINTERRUPTIBLE);
651 netif_dbg(dev, ifdown, dev->net,
652 "waited for %d urb completions\n", temp);
653 }
654 set_current_state(TASK_RUNNING);
655 dev->wait = NULL;
656 remove_wait_queue(&unlink_wakeup, &wait);
657 }
658
659 int usbnet_stop (struct net_device *net)
660 {
661 struct usbnet *dev = netdev_priv(net);
662 struct driver_info *info = dev->driver_info;
663 int retval;
664
665 clear_bit(EVENT_DEV_OPEN, &dev->flags);
666 netif_stop_queue (net);
667
668 netif_info(dev, ifdown, dev->net,
669 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
670 net->stats.rx_packets, net->stats.tx_packets,
671 net->stats.rx_errors, net->stats.tx_errors);
672
673 /* allow minidriver to stop correctly (wireless devices to turn off
674 * radio etc) */
675 if (info->stop) {
676 retval = info->stop(dev);
677 if (retval < 0)
678 netif_info(dev, ifdown, dev->net,
679 "stop fail (%d) usbnet usb-%s-%s, %s\n",
680 retval,
681 dev->udev->bus->bus_name, dev->udev->devpath,
682 info->description);
683 }
684
685 if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
686 usbnet_terminate_urbs(dev);
687
688 usb_kill_urb(dev->interrupt);
689
690 usbnet_purge_paused_rxq(dev);
691
692 /* deferred work (task, timer, softirq) must also stop.
693 * can't flush_scheduled_work() until we drop rtnl (later),
694 * else workers could deadlock; so make workers a NOP.
695 */
696 dev->flags = 0;
697 del_timer_sync (&dev->delay);
698 tasklet_kill (&dev->bh);
699 if (info->manage_power)
700 info->manage_power(dev, 0);
701 else
702 usb_autopm_put_interface(dev->intf);
703
704 return 0;
705 }
706 EXPORT_SYMBOL_GPL(usbnet_stop);
707
708 /*-------------------------------------------------------------------------*/
709
710 // posts reads, and enables write queuing
711
712 // precondition: never called in_interrupt
713
714 int usbnet_open (struct net_device *net)
715 {
716 struct usbnet *dev = netdev_priv(net);
717 int retval;
718 struct driver_info *info = dev->driver_info;
719
720 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
721 netif_info(dev, ifup, dev->net,
722 "resumption fail (%d) usbnet usb-%s-%s, %s\n",
723 retval,
724 dev->udev->bus->bus_name,
725 dev->udev->devpath,
726 info->description);
727 goto done_nopm;
728 }
729
730 // put into "known safe" state
731 if (info->reset && (retval = info->reset (dev)) < 0) {
732 netif_info(dev, ifup, dev->net,
733 "open reset fail (%d) usbnet usb-%s-%s, %s\n",
734 retval,
735 dev->udev->bus->bus_name,
736 dev->udev->devpath,
737 info->description);
738 goto done;
739 }
740
741 // insist peer be connected
742 if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
743 netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
744 goto done;
745 }
746
747 /* start any status interrupt transfer */
748 if (dev->interrupt) {
749 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
750 if (retval < 0) {
751 netif_err(dev, ifup, dev->net,
752 "intr submit %d\n", retval);
753 goto done;
754 }
755 }
756
757 set_bit(EVENT_DEV_OPEN, &dev->flags);
758 netif_start_queue (net);
759 netif_info(dev, ifup, dev->net,
760 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
761 (int)RX_QLEN(dev), (int)TX_QLEN(dev),
762 dev->net->mtu,
763 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
764 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
765 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
766 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
767 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
768 "simple");
769
770 // delay posting reads until we're fully open
771 tasklet_schedule (&dev->bh);
772 if (info->manage_power) {
773 retval = info->manage_power(dev, 1);
774 if (retval < 0)
775 goto done;
776 usb_autopm_put_interface(dev->intf);
777 }
778 return retval;
779
780 done:
781 usb_autopm_put_interface(dev->intf);
782 done_nopm:
783 return retval;
784 }
785 EXPORT_SYMBOL_GPL(usbnet_open);
786
787 /*-------------------------------------------------------------------------*/
788
789 /* ethtool methods; minidrivers may need to add some more, but
790 * they'll probably want to use this base set.
791 */
792
793 int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd)
794 {
795 struct usbnet *dev = netdev_priv(net);
796
797 if (!dev->mii.mdio_read)
798 return -EOPNOTSUPP;
799
800 return mii_ethtool_gset(&dev->mii, cmd);
801 }
802 EXPORT_SYMBOL_GPL(usbnet_get_settings);
803
804 int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
805 {
806 struct usbnet *dev = netdev_priv(net);
807 int retval;
808
809 if (!dev->mii.mdio_write)
810 return -EOPNOTSUPP;
811
812 retval = mii_ethtool_sset(&dev->mii, cmd);
813
814 /* link speed/duplex might have changed */
815 if (dev->driver_info->link_reset)
816 dev->driver_info->link_reset(dev);
817
818 return retval;
819
820 }
821 EXPORT_SYMBOL_GPL(usbnet_set_settings);
822
823 u32 usbnet_get_link (struct net_device *net)
824 {
825 struct usbnet *dev = netdev_priv(net);
826
827 /* If a check_connect is defined, return its result */
828 if (dev->driver_info->check_connect)
829 return dev->driver_info->check_connect (dev) == 0;
830
831 /* if the device has mii operations, use those */
832 if (dev->mii.mdio_read)
833 return mii_link_ok(&dev->mii);
834
835 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */
836 return ethtool_op_get_link(net);
837 }
838 EXPORT_SYMBOL_GPL(usbnet_get_link);
839
840 int usbnet_nway_reset(struct net_device *net)
841 {
842 struct usbnet *dev = netdev_priv(net);
843
844 if (!dev->mii.mdio_write)
845 return -EOPNOTSUPP;
846
847 return mii_nway_restart(&dev->mii);
848 }
849 EXPORT_SYMBOL_GPL(usbnet_nway_reset);
850
851 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
852 {
853 struct usbnet *dev = netdev_priv(net);
854
855 strncpy (info->driver, dev->driver_name, sizeof info->driver);
856 strncpy (info->version, DRIVER_VERSION, sizeof info->version);
857 strncpy (info->fw_version, dev->driver_info->description,
858 sizeof info->fw_version);
859 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
860 }
861 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
862
863 u32 usbnet_get_msglevel (struct net_device *net)
864 {
865 struct usbnet *dev = netdev_priv(net);
866
867 return dev->msg_enable;
868 }
869 EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
870
871 void usbnet_set_msglevel (struct net_device *net, u32 level)
872 {
873 struct usbnet *dev = netdev_priv(net);
874
875 dev->msg_enable = level;
876 }
877 EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
878
879 /* drivers may override default ethtool_ops in their bind() routine */
880 static const struct ethtool_ops usbnet_ethtool_ops = {
881 .get_settings = usbnet_get_settings,
882 .set_settings = usbnet_set_settings,
883 .get_link = usbnet_get_link,
884 .nway_reset = usbnet_nway_reset,
885 .get_drvinfo = usbnet_get_drvinfo,
886 .get_msglevel = usbnet_get_msglevel,
887 .set_msglevel = usbnet_set_msglevel,
888 };
889
890 /*-------------------------------------------------------------------------*/
891
892 /* work that cannot be done in interrupt context uses keventd.
893 *
894 * NOTE: with 2.5 we could do more of this using completion callbacks,
895 * especially now that control transfers can be queued.
896 */
897 static void
898 kevent (struct work_struct *work)
899 {
900 struct usbnet *dev =
901 container_of(work, struct usbnet, kevent);
902 int status;
903
904 /* usb_clear_halt() needs a thread context */
905 if (test_bit (EVENT_TX_HALT, &dev->flags)) {
906 unlink_urbs (dev, &dev->txq);
907 status = usb_autopm_get_interface(dev->intf);
908 if (status < 0)
909 goto fail_pipe;
910 status = usb_clear_halt (dev->udev, dev->out);
911 usb_autopm_put_interface(dev->intf);
912 if (status < 0 &&
913 status != -EPIPE &&
914 status != -ESHUTDOWN) {
915 if (netif_msg_tx_err (dev))
916 fail_pipe:
917 netdev_err(dev->net, "can't clear tx halt, status %d\n",
918 status);
919 } else {
920 clear_bit (EVENT_TX_HALT, &dev->flags);
921 if (status != -ESHUTDOWN)
922 netif_wake_queue (dev->net);
923 }
924 }
925 if (test_bit (EVENT_RX_HALT, &dev->flags)) {
926 unlink_urbs (dev, &dev->rxq);
927 status = usb_autopm_get_interface(dev->intf);
928 if (status < 0)
929 goto fail_halt;
930 status = usb_clear_halt (dev->udev, dev->in);
931 usb_autopm_put_interface(dev->intf);
932 if (status < 0 &&
933 status != -EPIPE &&
934 status != -ESHUTDOWN) {
935 if (netif_msg_rx_err (dev))
936 fail_halt:
937 netdev_err(dev->net, "can't clear rx halt, status %d\n",
938 status);
939 } else {
940 clear_bit (EVENT_RX_HALT, &dev->flags);
941 tasklet_schedule (&dev->bh);
942 }
943 }
944
945 /* tasklet could resubmit itself forever if memory is tight */
946 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
947 struct urb *urb = NULL;
948 int resched = 1;
949
950 if (netif_running (dev->net))
951 urb = usb_alloc_urb (0, GFP_KERNEL);
952 else
953 clear_bit (EVENT_RX_MEMORY, &dev->flags);
954 if (urb != NULL) {
955 clear_bit (EVENT_RX_MEMORY, &dev->flags);
956 status = usb_autopm_get_interface(dev->intf);
957 if (status < 0) {
958 usb_free_urb(urb);
959 goto fail_lowmem;
960 }
961 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
962 resched = 0;
963 usb_autopm_put_interface(dev->intf);
964 fail_lowmem:
965 if (resched)
966 tasklet_schedule (&dev->bh);
967 }
968 }
969
970 if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
971 struct driver_info *info = dev->driver_info;
972 int retval = 0;
973
974 clear_bit (EVENT_LINK_RESET, &dev->flags);
975 status = usb_autopm_get_interface(dev->intf);
976 if (status < 0)
977 goto skip_reset;
978 if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
979 usb_autopm_put_interface(dev->intf);
980 skip_reset:
981 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
982 retval,
983 dev->udev->bus->bus_name,
984 dev->udev->devpath,
985 info->description);
986 } else {
987 usb_autopm_put_interface(dev->intf);
988 }
989 }
990
991 if (dev->flags)
992 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
993 }
994
995 /*-------------------------------------------------------------------------*/
996
997 static void tx_complete (struct urb *urb)
998 {
999 struct sk_buff *skb = (struct sk_buff *) urb->context;
1000 struct skb_data *entry = (struct skb_data *) skb->cb;
1001 struct usbnet *dev = entry->dev;
1002
1003 if (urb->status == 0) {
1004 if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
1005 dev->net->stats.tx_packets++;
1006 dev->net->stats.tx_bytes += entry->length;
1007 } else {
1008 dev->net->stats.tx_errors++;
1009
1010 switch (urb->status) {
1011 case -EPIPE:
1012 usbnet_defer_kevent (dev, EVENT_TX_HALT);
1013 break;
1014
1015 /* software-driven interface shutdown */
1016 case -ECONNRESET: // async unlink
1017 case -ESHUTDOWN: // hardware gone
1018 break;
1019
1020 // like rx, tx gets controller i/o faults during khubd delays
1021 // and so it uses the same throttling mechanism.
1022 case -EPROTO:
1023 case -ETIME:
1024 case -EILSEQ:
1025 usb_mark_last_busy(dev->udev);
1026 if (!timer_pending (&dev->delay)) {
1027 mod_timer (&dev->delay,
1028 jiffies + THROTTLE_JIFFIES);
1029 netif_dbg(dev, link, dev->net,
1030 "tx throttle %d\n", urb->status);
1031 }
1032 netif_stop_queue (dev->net);
1033 break;
1034 default:
1035 netif_dbg(dev, tx_err, dev->net,
1036 "tx err %d\n", entry->urb->status);
1037 break;
1038 }
1039 }
1040
1041 usb_autopm_put_interface_async(dev->intf);
1042 entry->state = tx_done;
1043 defer_bh(dev, skb, &dev->txq);
1044 }
1045
1046 /*-------------------------------------------------------------------------*/
1047
1048 void usbnet_tx_timeout (struct net_device *net)
1049 {
1050 struct usbnet *dev = netdev_priv(net);
1051
1052 unlink_urbs (dev, &dev->txq);
1053 tasklet_schedule (&dev->bh);
1054
1055 // FIXME: device recovery -- reset?
1056 }
1057 EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
1058
1059 /*-------------------------------------------------------------------------*/
1060
1061 netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1062 struct net_device *net)
1063 {
1064 struct usbnet *dev = netdev_priv(net);
1065 int length;
1066 struct urb *urb = NULL;
1067 struct skb_data *entry;
1068 struct driver_info *info = dev->driver_info;
1069 unsigned long flags;
1070 int retval;
1071
1072 if (skb)
1073 skb_tx_timestamp(skb);
1074
1075 // some devices want funky USB-level framing, for
1076 // win32 driver (usually) and/or hardware quirks
1077 if (info->tx_fixup) {
1078 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1079 if (!skb) {
1080 if (netif_msg_tx_err(dev)) {
1081 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1082 goto drop;
1083 } else {
1084 /* cdc_ncm collected packet; waits for more */
1085 goto not_drop;
1086 }
1087 }
1088 }
1089 length = skb->len;
1090
1091 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
1092 netif_dbg(dev, tx_err, dev->net, "no urb\n");
1093 goto drop;
1094 }
1095
1096 entry = (struct skb_data *) skb->cb;
1097 entry->urb = urb;
1098 entry->dev = dev;
1099 entry->state = tx_start;
1100 entry->length = length;
1101
1102 usb_fill_bulk_urb (urb, dev->udev, dev->out,
1103 skb->data, skb->len, tx_complete, skb);
1104
1105 /* don't assume the hardware handles USB_ZERO_PACKET
1106 * NOTE: strictly conforming cdc-ether devices should expect
1107 * the ZLP here, but ignore the one-byte packet.
1108 * NOTE2: CDC NCM specification is different from CDC ECM when
1109 * handling ZLP/short packets, so cdc_ncm driver will make short
1110 * packet itself if needed.
1111 */
1112 if (length % dev->maxpacket == 0) {
1113 if (!(info->flags & FLAG_SEND_ZLP)) {
1114 if (!(info->flags & FLAG_MULTI_PACKET)) {
1115 urb->transfer_buffer_length++;
1116 if (skb_tailroom(skb)) {
1117 skb->data[skb->len] = 0;
1118 __skb_put(skb, 1);
1119 }
1120 }
1121 } else
1122 urb->transfer_flags |= URB_ZERO_PACKET;
1123 }
1124
1125 spin_lock_irqsave(&dev->txq.lock, flags);
1126 retval = usb_autopm_get_interface_async(dev->intf);
1127 if (retval < 0) {
1128 spin_unlock_irqrestore(&dev->txq.lock, flags);
1129 goto drop;
1130 }
1131
1132 #ifdef CONFIG_PM
1133 /* if this triggers the device is still a sleep */
1134 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
1135 /* transmission will be done in resume */
1136 usb_anchor_urb(urb, &dev->deferred);
1137 /* no use to process more packets */
1138 netif_stop_queue(net);
1139 spin_unlock_irqrestore(&dev->txq.lock, flags);
1140 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
1141 goto deferred;
1142 }
1143 #endif
1144
1145 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
1146 case -EPIPE:
1147 netif_stop_queue (net);
1148 usbnet_defer_kevent (dev, EVENT_TX_HALT);
1149 usb_autopm_put_interface_async(dev->intf);
1150 break;
1151 default:
1152 usb_autopm_put_interface_async(dev->intf);
1153 netif_dbg(dev, tx_err, dev->net,
1154 "tx: submit urb err %d\n", retval);
1155 break;
1156 case 0:
1157 net->trans_start = jiffies;
1158 __skb_queue_tail (&dev->txq, skb);
1159 if (dev->txq.qlen >= TX_QLEN (dev))
1160 netif_stop_queue (net);
1161 }
1162 spin_unlock_irqrestore (&dev->txq.lock, flags);
1163
1164 if (retval) {
1165 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
1166 drop:
1167 dev->net->stats.tx_dropped++;
1168 not_drop:
1169 if (skb)
1170 dev_kfree_skb_any (skb);
1171 usb_free_urb (urb);
1172 } else
1173 netif_dbg(dev, tx_queued, dev->net,
1174 "> tx, len %d, type 0x%x\n", length, skb->protocol);
1175 #ifdef CONFIG_PM
1176 deferred:
1177 #endif
1178 return NETDEV_TX_OK;
1179 }
1180 EXPORT_SYMBOL_GPL(usbnet_start_xmit);
1181
1182 /*-------------------------------------------------------------------------*/
1183
1184 // tasklet (work deferred from completions, in_irq) or timer
1185
1186 static void usbnet_bh (unsigned long param)
1187 {
1188 struct usbnet *dev = (struct usbnet *) param;
1189 struct sk_buff *skb;
1190 struct skb_data *entry;
1191
1192 while ((skb = skb_dequeue (&dev->done))) {
1193 entry = (struct skb_data *) skb->cb;
1194 switch (entry->state) {
1195 case rx_done:
1196 entry->state = rx_cleanup;
1197 rx_process (dev, skb);
1198 continue;
1199 case tx_done:
1200 case rx_cleanup:
1201 usb_free_urb (entry->urb);
1202 dev_kfree_skb (skb);
1203 continue;
1204 default:
1205 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
1206 }
1207 }
1208
1209 // waiting for all pending urbs to complete?
1210 if (dev->wait) {
1211 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
1212 wake_up (dev->wait);
1213 }
1214
1215 // or are we maybe short a few urbs?
1216 } else if (netif_running (dev->net) &&
1217 netif_device_present (dev->net) &&
1218 !timer_pending (&dev->delay) &&
1219 !test_bit (EVENT_RX_HALT, &dev->flags)) {
1220 int temp = dev->rxq.qlen;
1221 int qlen = RX_QLEN (dev);
1222
1223 if (temp < qlen) {
1224 struct urb *urb;
1225 int i;
1226
1227 // don't refill the queue all at once
1228 for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
1229 urb = usb_alloc_urb (0, GFP_ATOMIC);
1230 if (urb != NULL) {
1231 if (rx_submit (dev, urb, GFP_ATOMIC) ==
1232 -ENOLINK)
1233 return;
1234 }
1235 }
1236 if (temp != dev->rxq.qlen)
1237 netif_dbg(dev, link, dev->net,
1238 "rxqlen %d --> %d\n",
1239 temp, dev->rxq.qlen);
1240 if (dev->rxq.qlen < qlen)
1241 tasklet_schedule (&dev->bh);
1242 }
1243 if (dev->txq.qlen < TX_QLEN (dev))
1244 netif_wake_queue (dev->net);
1245 }
1246 }
1247
1248
1249 /*-------------------------------------------------------------------------
1250 *
1251 * USB Device Driver support
1252 *
1253 *-------------------------------------------------------------------------*/
1254
1255 // precondition: never called in_interrupt
1256
1257 void usbnet_disconnect (struct usb_interface *intf)
1258 {
1259 struct usbnet *dev;
1260 struct usb_device *xdev;
1261 struct net_device *net;
1262
1263 dev = usb_get_intfdata(intf);
1264 usb_set_intfdata(intf, NULL);
1265 if (!dev)
1266 return;
1267
1268 xdev = interface_to_usbdev (intf);
1269
1270 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
1271 intf->dev.driver->name,
1272 xdev->bus->bus_name, xdev->devpath,
1273 dev->driver_info->description);
1274
1275 net = dev->net;
1276 unregister_netdev (net);
1277
1278 cancel_work_sync(&dev->kevent);
1279
1280 if (dev->driver_info->unbind)
1281 dev->driver_info->unbind (dev, intf);
1282
1283 usb_kill_urb(dev->interrupt);
1284 usb_free_urb(dev->interrupt);
1285
1286 free_netdev(net);
1287 usb_put_dev (xdev);
1288 }
1289 EXPORT_SYMBOL_GPL(usbnet_disconnect);
1290
1291 static const struct net_device_ops usbnet_netdev_ops = {
1292 .ndo_open = usbnet_open,
1293 .ndo_stop = usbnet_stop,
1294 .ndo_start_xmit = usbnet_start_xmit,
1295 .ndo_tx_timeout = usbnet_tx_timeout,
1296 .ndo_change_mtu = usbnet_change_mtu,
1297 .ndo_set_mac_address = eth_mac_addr,
1298 .ndo_validate_addr = eth_validate_addr,
1299 };
1300
1301 /*-------------------------------------------------------------------------*/
1302
1303 // precondition: never called in_interrupt
1304
1305 static struct device_type wlan_type = {
1306 .name = "wlan",
1307 };
1308
1309 static struct device_type wwan_type = {
1310 .name = "wwan",
1311 };
1312
1313 int
1314 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1315 {
1316 struct usbnet *dev;
1317 struct net_device *net;
1318 struct usb_host_interface *interface;
1319 struct driver_info *info;
1320 struct usb_device *xdev;
1321 int status;
1322 const char *name;
1323 struct usb_driver *driver = to_usb_driver(udev->dev.driver);
1324
1325 /* usbnet already took usb runtime pm, so have to enable the feature
1326 * for usb interface, otherwise usb_autopm_get_interface may return
1327 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
1328 */
1329 if (!driver->supports_autosuspend) {
1330 driver->supports_autosuspend = 1;
1331 pm_runtime_enable(&udev->dev);
1332 }
1333
1334 name = udev->dev.driver->name;
1335 info = (struct driver_info *) prod->driver_info;
1336 if (!info) {
1337 dev_dbg (&udev->dev, "blacklisted by %s\n", name);
1338 return -ENODEV;
1339 }
1340 xdev = interface_to_usbdev (udev);
1341 interface = udev->cur_altsetting;
1342
1343 usb_get_dev (xdev);
1344
1345 status = -ENOMEM;
1346
1347 // set up our own records
1348 net = alloc_etherdev(sizeof(*dev));
1349 if (!net)
1350 goto out;
1351
1352 /* netdev_printk() needs this so do it as early as possible */
1353 SET_NETDEV_DEV(net, &udev->dev);
1354
1355 dev = netdev_priv(net);
1356 dev->udev = xdev;
1357 dev->intf = udev;
1358 dev->driver_info = info;
1359 dev->driver_name = name;
1360 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1361 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1362 skb_queue_head_init (&dev->rxq);
1363 skb_queue_head_init (&dev->txq);
1364 skb_queue_head_init (&dev->done);
1365 skb_queue_head_init(&dev->rxq_pause);
1366 dev->bh.func = usbnet_bh;
1367 dev->bh.data = (unsigned long) dev;
1368 INIT_WORK (&dev->kevent, kevent);
1369 init_usb_anchor(&dev->deferred);
1370 dev->delay.function = usbnet_bh;
1371 dev->delay.data = (unsigned long) dev;
1372 init_timer (&dev->delay);
1373 mutex_init (&dev->phy_mutex);
1374
1375 dev->net = net;
1376 strcpy (net->name, "usb%d");
1377 memcpy (net->dev_addr, node_id, sizeof node_id);
1378
1379 /* rx and tx sides can use different message sizes;
1380 * bind() should set rx_urb_size in that case.
1381 */
1382 dev->hard_mtu = net->mtu + net->hard_header_len;
1383 #if 0
1384 // dma_supported() is deeply broken on almost all architectures
1385 // possible with some EHCI controllers
1386 if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
1387 net->features |= NETIF_F_HIGHDMA;
1388 #endif
1389
1390 net->netdev_ops = &usbnet_netdev_ops;
1391 net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
1392 net->ethtool_ops = &usbnet_ethtool_ops;
1393
1394 // allow device-specific bind/init procedures
1395 // NOTE net->name still not usable ...
1396 if (info->bind) {
1397 status = info->bind (dev, udev);
1398 if (status < 0)
1399 goto out1;
1400
1401 // heuristic: "usb%d" for links we know are two-host,
1402 // else "eth%d" when there's reasonable doubt. userspace
1403 // can rename the link if it knows better.
1404 if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
1405 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
1406 (net->dev_addr [0] & 0x02) == 0))
1407 strcpy (net->name, "eth%d");
1408 /* WLAN devices should always be named "wlan%d" */
1409 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1410 strcpy(net->name, "wlan%d");
1411 /* WWAN devices should always be named "wwan%d" */
1412 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1413 strcpy(net->name, "wwan%d");
1414
1415 /* maybe the remote can't receive an Ethernet MTU */
1416 if (net->mtu > (dev->hard_mtu - net->hard_header_len))
1417 net->mtu = dev->hard_mtu - net->hard_header_len;
1418 } else if (!info->in || !info->out)
1419 status = usbnet_get_endpoints (dev, udev);
1420 else {
1421 dev->in = usb_rcvbulkpipe (xdev, info->in);
1422 dev->out = usb_sndbulkpipe (xdev, info->out);
1423 if (!(info->flags & FLAG_NO_SETINT))
1424 status = usb_set_interface (xdev,
1425 interface->desc.bInterfaceNumber,
1426 interface->desc.bAlternateSetting);
1427 else
1428 status = 0;
1429
1430 }
1431 if (status >= 0 && dev->status)
1432 status = init_status (dev, udev);
1433 if (status < 0)
1434 goto out3;
1435
1436 if (!dev->rx_urb_size)
1437 dev->rx_urb_size = dev->hard_mtu;
1438 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1439
1440 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1441 SET_NETDEV_DEVTYPE(net, &wlan_type);
1442 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1443 SET_NETDEV_DEVTYPE(net, &wwan_type);
1444
1445 status = register_netdev (net);
1446 if (status)
1447 goto out4;
1448 netif_info(dev, probe, dev->net,
1449 "register '%s' at usb-%s-%s, %s, %pM\n",
1450 udev->dev.driver->name,
1451 xdev->bus->bus_name, xdev->devpath,
1452 dev->driver_info->description,
1453 net->dev_addr);
1454
1455 // ok, it's ready to go.
1456 usb_set_intfdata (udev, dev);
1457
1458 netif_device_attach (net);
1459
1460 if (dev->driver_info->flags & FLAG_LINK_INTR)
1461 netif_carrier_off(net);
1462
1463 return 0;
1464
1465 out4:
1466 usb_free_urb(dev->interrupt);
1467 out3:
1468 if (info->unbind)
1469 info->unbind (dev, udev);
1470 out1:
1471 free_netdev(net);
1472 out:
1473 usb_put_dev(xdev);
1474 return status;
1475 }
1476 EXPORT_SYMBOL_GPL(usbnet_probe);
1477
1478 /*-------------------------------------------------------------------------*/
1479
1480 /*
1481 * suspend the whole driver as soon as the first interface is suspended
1482 * resume only when the last interface is resumed
1483 */
1484
1485 int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
1486 {
1487 struct usbnet *dev = usb_get_intfdata(intf);
1488
1489 if (!dev->suspend_count++) {
1490 spin_lock_irq(&dev->txq.lock);
1491 /* don't autosuspend while transmitting */
1492 if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
1493 spin_unlock_irq(&dev->txq.lock);
1494 return -EBUSY;
1495 } else {
1496 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
1497 spin_unlock_irq(&dev->txq.lock);
1498 }
1499 /*
1500 * accelerate emptying of the rx and queues, to avoid
1501 * having everything error out.
1502 */
1503 netif_device_detach (dev->net);
1504 usbnet_terminate_urbs(dev);
1505 usb_kill_urb(dev->interrupt);
1506
1507 /*
1508 * reattach so runtime management can use and
1509 * wake the device
1510 */
1511 netif_device_attach (dev->net);
1512 }
1513 return 0;
1514 }
1515 EXPORT_SYMBOL_GPL(usbnet_suspend);
1516
1517 int usbnet_resume (struct usb_interface *intf)
1518 {
1519 struct usbnet *dev = usb_get_intfdata(intf);
1520 struct sk_buff *skb;
1521 struct urb *res;
1522 int retval;
1523
1524 if (!--dev->suspend_count) {
1525 /* resume interrupt URBs */
1526 if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
1527 usb_submit_urb(dev->interrupt, GFP_NOIO);
1528
1529 spin_lock_irq(&dev->txq.lock);
1530 while ((res = usb_get_from_anchor(&dev->deferred))) {
1531
1532 skb = (struct sk_buff *)res->context;
1533 retval = usb_submit_urb(res, GFP_ATOMIC);
1534 if (retval < 0) {
1535 dev_kfree_skb_any(skb);
1536 usb_free_urb(res);
1537 usb_autopm_put_interface_async(dev->intf);
1538 } else {
1539 dev->net->trans_start = jiffies;
1540 __skb_queue_tail(&dev->txq, skb);
1541 }
1542 }
1543
1544 smp_mb();
1545 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
1546 spin_unlock_irq(&dev->txq.lock);
1547
1548 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
1549 if (!(dev->txq.qlen >= TX_QLEN(dev)))
1550 netif_tx_wake_all_queues(dev->net);
1551 tasklet_schedule (&dev->bh);
1552 }
1553 }
1554 return 0;
1555 }
1556 EXPORT_SYMBOL_GPL(usbnet_resume);
1557
1558
1559 /*-------------------------------------------------------------------------*/
1560
1561 static int __init usbnet_init(void)
1562 {
1563 /* Compiler should optimize this out. */
1564 BUILD_BUG_ON(
1565 FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
1566
1567 random_ether_addr(node_id);
1568 return 0;
1569 }
1570 module_init(usbnet_init);
1571
1572 static void __exit usbnet_exit(void)
1573 {
1574 }
1575 module_exit(usbnet_exit);
1576
1577 MODULE_AUTHOR("David Brownell");
1578 MODULE_DESCRIPTION("USB network driver framework");
1579 MODULE_LICENSE("GPL");
This page took 0.064073 seconds and 5 git commands to generate.