Phonet: back-end for autoconfigured addresses
[deliverable/linux.git] / drivers / net / usb / cdc-phonet.c
CommitLineData
87cf6560
RDC
1/*
2 * phonet.c -- USB CDC Phonet host driver
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation. All rights reserved.
5 *
6 * Author: Rémi Denis-Courmont
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/usb.h>
26#include <linux/usb/cdc.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_phonet.h>
30
31#define PN_MEDIA_USB 0x1B
32
33static const unsigned rxq_size = 17;
34
35struct usbpn_dev {
36 struct net_device *dev;
37
38 struct usb_interface *intf, *data_intf;
39 struct usb_device *usb;
40 unsigned int tx_pipe, rx_pipe;
41 u8 active_setting;
42 u8 disconnected;
43
44 unsigned tx_queue;
45 spinlock_t tx_lock;
46
47 spinlock_t rx_lock;
48 struct sk_buff *rx_skb;
49 struct urb *urbs[0];
50};
51
52static void tx_complete(struct urb *req);
53static void rx_complete(struct urb *req);
54
55/*
56 * Network device callbacks
57 */
25a79c41 58static netdev_tx_t usbpn_xmit(struct sk_buff *skb, struct net_device *dev)
87cf6560
RDC
59{
60 struct usbpn_dev *pnd = netdev_priv(dev);
61 struct urb *req = NULL;
62 unsigned long flags;
63 int err;
64
65 if (skb->protocol != htons(ETH_P_PHONET))
66 goto drop;
67
68 req = usb_alloc_urb(0, GFP_ATOMIC);
69 if (!req)
70 goto drop;
71 usb_fill_bulk_urb(req, pnd->usb, pnd->tx_pipe, skb->data, skb->len,
72 tx_complete, skb);
73 req->transfer_flags = URB_ZERO_PACKET;
74 err = usb_submit_urb(req, GFP_ATOMIC);
75 if (err) {
76 usb_free_urb(req);
77 goto drop;
78 }
79
80 spin_lock_irqsave(&pnd->tx_lock, flags);
81 pnd->tx_queue++;
82 if (pnd->tx_queue >= dev->tx_queue_len)
83 netif_stop_queue(dev);
84 spin_unlock_irqrestore(&pnd->tx_lock, flags);
25a79c41 85 return NETDEV_TX_OK;
87cf6560
RDC
86
87drop:
88 dev_kfree_skb(skb);
89 dev->stats.tx_dropped++;
25a79c41 90 return NETDEV_TX_OK;
87cf6560
RDC
91}
92
93static void tx_complete(struct urb *req)
94{
95 struct sk_buff *skb = req->context;
96 struct net_device *dev = skb->dev;
97 struct usbpn_dev *pnd = netdev_priv(dev);
98
99 switch (req->status) {
100 case 0:
101 dev->stats.tx_bytes += skb->len;
102 break;
103
104 case -ENOENT:
105 case -ECONNRESET:
106 case -ESHUTDOWN:
107 dev->stats.tx_aborted_errors++;
108 default:
109 dev->stats.tx_errors++;
110 dev_dbg(&dev->dev, "TX error (%d)\n", req->status);
111 }
112 dev->stats.tx_packets++;
113
114 spin_lock(&pnd->tx_lock);
115 pnd->tx_queue--;
116 netif_wake_queue(dev);
117 spin_unlock(&pnd->tx_lock);
118
119 dev_kfree_skb_any(skb);
120 usb_free_urb(req);
121}
122
123static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
124{
125 struct net_device *dev = pnd->dev;
126 struct page *page;
127 int err;
128
129 page = __netdev_alloc_page(dev, gfp_flags);
130 if (!page)
131 return -ENOMEM;
132
133 usb_fill_bulk_urb(req, pnd->usb, pnd->rx_pipe, page_address(page),
134 PAGE_SIZE, rx_complete, dev);
135 req->transfer_flags = 0;
136 err = usb_submit_urb(req, gfp_flags);
137 if (unlikely(err)) {
138 dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
139 netdev_free_page(dev, page);
140 }
141 return err;
142}
143
144static void rx_complete(struct urb *req)
145{
146 struct net_device *dev = req->context;
147 struct usbpn_dev *pnd = netdev_priv(dev);
148 struct page *page = virt_to_page(req->transfer_buffer);
149 struct sk_buff *skb;
150 unsigned long flags;
151
152 switch (req->status) {
153 case 0:
154 spin_lock_irqsave(&pnd->rx_lock, flags);
155 skb = pnd->rx_skb;
156 if (!skb) {
157 skb = pnd->rx_skb = netdev_alloc_skb(dev, 12);
158 if (likely(skb)) {
159 /* Can't use pskb_pull() on page in IRQ */
160 memcpy(skb_put(skb, 1), page_address(page), 1);
161 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
162 page, 1, req->actual_length);
163 page = NULL;
164 }
165 } else {
166 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
167 page, 0, req->actual_length);
168 page = NULL;
169 }
170 if (req->actual_length < PAGE_SIZE)
171 pnd->rx_skb = NULL; /* Last fragment */
172 else
173 skb = NULL;
174 spin_unlock_irqrestore(&pnd->rx_lock, flags);
175 if (skb) {
176 skb->protocol = htons(ETH_P_PHONET);
177 skb_reset_mac_header(skb);
178 __skb_pull(skb, 1);
179 skb->dev = dev;
180 dev->stats.rx_packets++;
181 dev->stats.rx_bytes += skb->len;
182
183 netif_rx(skb);
184 }
185 goto resubmit;
186
187 case -ENOENT:
188 case -ECONNRESET:
189 case -ESHUTDOWN:
190 req = NULL;
191 break;
192
193 case -EOVERFLOW:
194 dev->stats.rx_over_errors++;
195 dev_dbg(&dev->dev, "RX overflow\n");
196 break;
197
198 case -EILSEQ:
199 dev->stats.rx_crc_errors++;
200 break;
201 }
202
203 dev->stats.rx_errors++;
204resubmit:
205 if (page)
206 netdev_free_page(dev, page);
207 if (req)
208 rx_submit(pnd, req, GFP_ATOMIC);
209}
210
211static int usbpn_close(struct net_device *dev);
212
213static int usbpn_open(struct net_device *dev)
214{
215 struct usbpn_dev *pnd = netdev_priv(dev);
216 int err;
217 unsigned i;
218 unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber;
219
220 err = usb_set_interface(pnd->usb, num, pnd->active_setting);
221 if (err)
222 return err;
223
224 for (i = 0; i < rxq_size; i++) {
225 struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
226
227 if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
228 usbpn_close(dev);
229 return -ENOMEM;
230 }
231 pnd->urbs[i] = req;
232 }
233
234 netif_wake_queue(dev);
235 return 0;
236}
237
238static int usbpn_close(struct net_device *dev)
239{
240 struct usbpn_dev *pnd = netdev_priv(dev);
241 unsigned i;
242 unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber;
243
244 netif_stop_queue(dev);
245
246 for (i = 0; i < rxq_size; i++) {
247 struct urb *req = pnd->urbs[i];
248
249 if (!req)
250 continue;
251 usb_kill_urb(req);
252 usb_free_urb(req);
253 pnd->urbs[i] = NULL;
254 }
255
256 return usb_set_interface(pnd->usb, num, !pnd->active_setting);
257}
258
259static int usbpn_set_mtu(struct net_device *dev, int new_mtu)
260{
261 if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU))
262 return -EINVAL;
263
264 dev->mtu = new_mtu;
265 return 0;
266}
267
268static const struct net_device_ops usbpn_ops = {
269 .ndo_open = usbpn_open,
270 .ndo_stop = usbpn_close,
271 .ndo_start_xmit = usbpn_xmit,
272 .ndo_change_mtu = usbpn_set_mtu,
273};
274
275static void usbpn_setup(struct net_device *dev)
276{
277 dev->features = 0;
278 dev->netdev_ops = &usbpn_ops,
279 dev->header_ops = &phonet_header_ops;
280 dev->type = ARPHRD_PHONET;
281 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
282 dev->mtu = PHONET_MAX_MTU;
283 dev->hard_header_len = 1;
284 dev->dev_addr[0] = PN_MEDIA_USB;
285 dev->addr_len = 1;
286 dev->tx_queue_len = 3;
287
288 dev->destructor = free_netdev;
289}
290
291/*
292 * USB driver callbacks
293 */
294static struct usb_device_id usbpn_ids[] = {
295 {
296 .match_flags = USB_DEVICE_ID_MATCH_VENDOR
297 | USB_DEVICE_ID_MATCH_INT_CLASS
298 | USB_DEVICE_ID_MATCH_INT_SUBCLASS,
299 .idVendor = 0x0421, /* Nokia */
300 .bInterfaceClass = USB_CLASS_COMM,
301 .bInterfaceSubClass = 0xFE,
302 },
303 { },
304};
305
306MODULE_DEVICE_TABLE(usb, usbpn_ids);
307
308static struct usb_driver usbpn_driver;
309
310int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
311{
312 static const char ifname[] = "usbpn%d";
313 const struct usb_cdc_union_desc *union_header = NULL;
314 const struct usb_cdc_header_desc *phonet_header = NULL;
315 const struct usb_host_interface *data_desc;
316 struct usb_interface *data_intf;
317 struct usb_device *usbdev = interface_to_usbdev(intf);
318 struct net_device *dev;
319 struct usbpn_dev *pnd;
320 u8 *data;
321 int len, err;
322
323 data = intf->altsetting->extra;
324 len = intf->altsetting->extralen;
325 while (len >= 3) {
326 u8 dlen = data[0];
327 if (dlen < 3)
328 return -EINVAL;
329
330 /* bDescriptorType */
331 if (data[1] == USB_DT_CS_INTERFACE) {
332 /* bDescriptorSubType */
333 switch (data[2]) {
334 case USB_CDC_UNION_TYPE:
335 if (union_header || dlen < 5)
336 break;
337 union_header =
338 (struct usb_cdc_union_desc *)data;
339 break;
340 case 0xAB:
341 if (phonet_header || dlen < 5)
342 break;
343 phonet_header =
344 (struct usb_cdc_header_desc *)data;
345 break;
346 }
347 }
348 data += dlen;
349 len -= dlen;
350 }
351
352 if (!union_header || !phonet_header)
353 return -EINVAL;
354
355 data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0);
356 if (data_intf == NULL)
357 return -ENODEV;
358 /* Data interface has one inactive and one active setting */
359 if (data_intf->num_altsetting != 2)
360 return -EINVAL;
361 if (data_intf->altsetting[0].desc.bNumEndpoints == 0
362 && data_intf->altsetting[1].desc.bNumEndpoints == 2)
363 data_desc = data_intf->altsetting + 1;
364 else
365 if (data_intf->altsetting[0].desc.bNumEndpoints == 2
366 && data_intf->altsetting[1].desc.bNumEndpoints == 0)
367 data_desc = data_intf->altsetting;
368 else
369 return -EINVAL;
370
371 dev = alloc_netdev(sizeof(*pnd) + sizeof(pnd->urbs[0]) * rxq_size,
372 ifname, usbpn_setup);
373 if (!dev)
374 return -ENOMEM;
375
376 pnd = netdev_priv(dev);
377 SET_NETDEV_DEV(dev, &intf->dev);
378 netif_stop_queue(dev);
379
380 pnd->dev = dev;
381 pnd->usb = usb_get_dev(usbdev);
382 pnd->intf = intf;
383 pnd->data_intf = data_intf;
384 spin_lock_init(&pnd->tx_lock);
385 spin_lock_init(&pnd->rx_lock);
386 /* Endpoints */
387 if (usb_pipein(data_desc->endpoint[0].desc.bEndpointAddress)) {
388 pnd->rx_pipe = usb_rcvbulkpipe(usbdev,
389 data_desc->endpoint[0].desc.bEndpointAddress);
390 pnd->tx_pipe = usb_sndbulkpipe(usbdev,
391 data_desc->endpoint[1].desc.bEndpointAddress);
392 } else {
393 pnd->rx_pipe = usb_rcvbulkpipe(usbdev,
394 data_desc->endpoint[1].desc.bEndpointAddress);
395 pnd->tx_pipe = usb_sndbulkpipe(usbdev,
396 data_desc->endpoint[0].desc.bEndpointAddress);
397 }
398 pnd->active_setting = data_desc - data_intf->altsetting;
399
400 err = usb_driver_claim_interface(&usbpn_driver, data_intf, pnd);
401 if (err)
402 goto out;
403
404 /* Force inactive mode until the network device is brought UP */
405 usb_set_interface(usbdev, union_header->bSlaveInterface0,
406 !pnd->active_setting);
407 usb_set_intfdata(intf, pnd);
408
409 err = register_netdev(dev);
410 if (err) {
411 usb_driver_release_interface(&usbpn_driver, data_intf);
412 goto out;
413 }
414
415 dev_dbg(&dev->dev, "USB CDC Phonet device found\n");
416 return 0;
417
418out:
419 usb_set_intfdata(intf, NULL);
420 free_netdev(dev);
421 return err;
422}
423
424static void usbpn_disconnect(struct usb_interface *intf)
425{
426 struct usbpn_dev *pnd = usb_get_intfdata(intf);
427 struct usb_device *usb = pnd->usb;
428
429 if (pnd->disconnected)
430 return;
431
432 pnd->disconnected = 1;
433 usb_driver_release_interface(&usbpn_driver,
434 (pnd->intf == intf) ? pnd->data_intf : pnd->intf);
435 unregister_netdev(pnd->dev);
436 usb_put_dev(usb);
437}
438
439static struct usb_driver usbpn_driver = {
440 .name = "cdc_phonet",
441 .probe = usbpn_probe,
442 .disconnect = usbpn_disconnect,
443 .id_table = usbpn_ids,
444};
445
446static int __init usbpn_init(void)
447{
448 return usb_register(&usbpn_driver);
449}
450
451static void __exit usbpn_exit(void)
452{
453 usb_deregister(&usbpn_driver);
454}
455
456module_init(usbpn_init);
457module_exit(usbpn_exit);
458
459MODULE_AUTHOR("Remi Denis-Courmont");
460MODULE_DESCRIPTION("USB CDC Phonet host interface");
461MODULE_LICENSE("GPL");
This page took 0.061233 seconds and 5 git commands to generate.