uml: Eliminate temporary buffer in eth_configure
[deliverable/linux.git] / arch / um / drivers / net_kern.c
CommitLineData
1da177e4 1/*
f28169d2 2 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
1da177e4
LT
3 * James Leu (jleu@mindspring.net).
4 * Copyright (C) 2001 by various other people who didn't put their name here.
5 * Licensed under the GPL.
6 */
7
1da177e4
LT
8#include "linux/kernel.h"
9#include "linux/netdevice.h"
10#include "linux/rtnetlink.h"
11#include "linux/skbuff.h"
12#include "linux/socket.h"
13#include "linux/spinlock.h"
14#include "linux/module.h"
15#include "linux/init.h"
16#include "linux/etherdevice.h"
17#include "linux/list.h"
18#include "linux/inetdevice.h"
19#include "linux/ctype.h"
20#include "linux/bootmem.h"
21#include "linux/ethtool.h"
d052d1be 22#include "linux/platform_device.h"
1da177e4 23#include "asm/uaccess.h"
1da177e4
LT
24#include "kern_util.h"
25#include "net_kern.h"
26#include "net_user.h"
27#include "mconsole_kern.h"
28#include "init.h"
29#include "irq_user.h"
30#include "irq_kern.h"
31
bf61f50d
PBG
32static inline void set_ether_mac(struct net_device *dev, unsigned char *addr)
33{
34 memcpy(dev->dev_addr, addr, ETH_ALEN);
35}
36
1da177e4
LT
37#define DRIVER_NAME "uml-netdev"
38
39static DEFINE_SPINLOCK(opened_lock);
9010772c 40static LIST_HEAD(opened);
1da177e4
LT
41
42static int uml_net_rx(struct net_device *dev)
43{
44 struct uml_net_private *lp = dev->priv;
45 int pkt_len;
46 struct sk_buff *skb;
47
48 /* If we can't allocate memory, try again next round. */
49 skb = dev_alloc_skb(dev->mtu);
50 if (skb == NULL) {
51 lp->stats.rx_dropped++;
52 return 0;
53 }
54
55 skb->dev = dev;
56 skb_put(skb, dev->mtu);
459a98ed 57 skb_reset_mac_header(skb);
1da177e4
LT
58 pkt_len = (*lp->read)(lp->fd, &skb, lp);
59
60 if (pkt_len > 0) {
61 skb_trim(skb, pkt_len);
62 skb->protocol = (*lp->protocol)(skb);
63 netif_rx(skb);
64
65 lp->stats.rx_bytes += skb->len;
66 lp->stats.rx_packets++;
67 return pkt_len;
68 }
69
70 kfree_skb(skb);
71 return pkt_len;
72}
73
eff3b634 74static void uml_dev_close(struct work_struct *work)
71c8d4c3 75{
eff3b634
PZ
76 struct uml_net_private *lp =
77 container_of(work, struct uml_net_private, work);
78 dev_close(lp->dev);
71c8d4c3
PBG
79}
80
7bea96fd 81irqreturn_t uml_net_interrupt(int irq, void *dev_id)
1da177e4
LT
82{
83 struct net_device *dev = dev_id;
84 struct uml_net_private *lp = dev->priv;
85 int err;
86
87 if(!netif_running(dev))
88 return(IRQ_NONE);
89
90 spin_lock(&lp->lock);
91 while((err = uml_net_rx(dev)) > 0) ;
92 if(err < 0) {
f28169d2
JD
93 printk(KERN_ERR
94 "Device '%s' read returned %d, shutting it down\n",
1da177e4 95 dev->name, err);
71c8d4c3
PBG
96 /* dev_close can't be called in interrupt context, and takes
97 * again lp->lock.
98 * And dev_close() can be safely called multiple times on the
99 * same device, since it tests for (dev->flags & IFF_UP). So
eff3b634
PZ
100 * there's no harm in delaying the device shutdown.
101 * Furthermore, the workqueue will not re-enqueue an already
102 * enqueued work item. */
103 schedule_work(&lp->work);
1da177e4
LT
104 goto out;
105 }
106 reactivate_fd(lp->fd, UM_ETH_IRQ);
107
71c8d4c3 108out:
1da177e4 109 spin_unlock(&lp->lock);
4ea21cd9 110 return IRQ_HANDLED;
1da177e4
LT
111}
112
113static int uml_net_open(struct net_device *dev)
114{
115 struct uml_net_private *lp = dev->priv;
1da177e4
LT
116 int err;
117
1da177e4
LT
118 if(lp->fd >= 0){
119 err = -ENXIO;
120 goto out;
121 }
122
1da177e4
LT
123 lp->fd = (*lp->open)(&lp->user);
124 if(lp->fd < 0){
125 err = lp->fd;
126 goto out;
127 }
128
129 err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
bd6aa650 130 IRQF_DISABLED | IRQF_SHARED, dev->name, dev);
1da177e4
LT
131 if(err != 0){
132 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
1da177e4 133 err = -ENETUNREACH;
14d9ead0 134 goto out_close;
1da177e4
LT
135 }
136
137 lp->tl.data = (unsigned long) &lp->user;
138 netif_start_queue(dev);
139
140 /* clear buffer - it can happen that the host side of the interface
141 * is full when we get here. In this case, new data is never queued,
142 * SIGIOs never arrive, and the net never works.
143 */
144 while((err = uml_net_rx(dev)) > 0) ;
145
14d9ead0
JD
146 spin_lock(&opened_lock);
147 list_add(&lp->list, &opened);
148 spin_unlock(&opened_lock);
149
150 return 0;
151out_close:
152 if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
153 lp->fd = -1;
154out:
14d9ead0 155 return err;
1da177e4
LT
156}
157
158static int uml_net_close(struct net_device *dev)
159{
160 struct uml_net_private *lp = dev->priv;
f28169d2 161
1da177e4 162 netif_stop_queue(dev);
1da177e4 163
1da177e4
LT
164 free_irq(dev->irq, dev);
165 if(lp->close != NULL)
166 (*lp->close)(lp->fd, &lp->user);
167 lp->fd = -1;
168
14d9ead0
JD
169 spin_lock(&opened_lock);
170 list_del(&lp->list);
171 spin_unlock(&opened_lock);
172
1da177e4
LT
173 return 0;
174}
175
176static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
177{
178 struct uml_net_private *lp = dev->priv;
179 unsigned long flags;
180 int len;
181
182 netif_stop_queue(dev);
183
184 spin_lock_irqsave(&lp->lock, flags);
185
186 len = (*lp->write)(lp->fd, &skb, lp);
187
188 if(len == skb->len) {
189 lp->stats.tx_packets++;
190 lp->stats.tx_bytes += skb->len;
191 dev->trans_start = jiffies;
192 netif_start_queue(dev);
193
194 /* this is normally done in the interrupt when tx finishes */
195 netif_wake_queue(dev);
f28169d2 196 }
1da177e4
LT
197 else if(len == 0){
198 netif_start_queue(dev);
199 lp->stats.tx_dropped++;
200 }
201 else {
202 netif_start_queue(dev);
203 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
204 }
205
206 spin_unlock_irqrestore(&lp->lock, flags);
207
208 dev_kfree_skb(skb);
209
210 return 0;
211}
212
213static struct net_device_stats *uml_net_get_stats(struct net_device *dev)
214{
215 struct uml_net_private *lp = dev->priv;
216 return &lp->stats;
217}
218
219static void uml_net_set_multicast_list(struct net_device *dev)
220{
221 if (dev->flags & IFF_PROMISC) return;
222 else if (dev->mc_count) dev->flags |= IFF_ALLMULTI;
223 else dev->flags &= ~IFF_ALLMULTI;
224}
225
226static void uml_net_tx_timeout(struct net_device *dev)
227{
228 dev->trans_start = jiffies;
229 netif_wake_queue(dev);
230}
231
232static int uml_net_set_mac(struct net_device *dev, void *addr)
233{
234 struct uml_net_private *lp = dev->priv;
235 struct sockaddr *hwaddr = addr;
236
48af05ed 237 spin_lock_irq(&lp->lock);
bf61f50d 238 set_ether_mac(dev, hwaddr->sa_data);
48af05ed 239 spin_unlock_irq(&lp->lock);
1da177e4 240
4ea21cd9 241 return 0;
1da177e4
LT
242}
243
244static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
245{
246 struct uml_net_private *lp = dev->priv;
247 int err = 0;
248
48af05ed 249 spin_lock_irq(&lp->lock);
1da177e4
LT
250
251 new_mtu = (*lp->set_mtu)(new_mtu, &lp->user);
252 if(new_mtu < 0){
253 err = new_mtu;
254 goto out;
255 }
256
257 dev->mtu = new_mtu;
258
259 out:
48af05ed 260 spin_unlock_irq(&lp->lock);
1da177e4
LT
261 return err;
262}
263
6d387484
CH
264static void uml_net_get_drvinfo(struct net_device *dev,
265 struct ethtool_drvinfo *info)
1da177e4 266{
6d387484
CH
267 strcpy(info->driver, DRIVER_NAME);
268 strcpy(info->version, "42");
1da177e4
LT
269}
270
6d387484
CH
271static struct ethtool_ops uml_net_ethtool_ops = {
272 .get_drvinfo = uml_net_get_drvinfo,
273 .get_link = ethtool_op_get_link,
274};
275
1da177e4
LT
276void uml_net_user_timer_expire(unsigned long _conn)
277{
278#ifdef undef
279 struct connection *conn = (struct connection *)_conn;
280
281 dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn);
282 do_connect(conn);
283#endif
284}
285
e024715f 286static void setup_etheraddr(char *str, unsigned char *addr, char *name)
b10aeeef
JD
287{
288 char *end;
289 int i;
290
291 if(str == NULL)
292 goto random;
293
294 for(i=0;i<6;i++){
295 addr[i] = simple_strtoul(str, &end, 16);
296 if((end == str) ||
297 ((*end != ':') && (*end != ',') && (*end != '\0'))){
298 printk(KERN_ERR
299 "setup_etheraddr: failed to parse '%s' "
300 "as an ethernet address\n", str);
301 goto random;
302 }
303 str = end + 1;
304 }
e024715f 305 if (is_multicast_ether_addr(addr)) {
b10aeeef 306 printk(KERN_ERR
e024715f 307 "Attempt to assign a multicast ethernet address to a "
b10aeeef
JD
308 "device disallowed\n");
309 goto random;
310 }
e024715f
PBG
311 if (!is_valid_ether_addr(addr)) {
312 printk(KERN_ERR
313 "Attempt to assign an invalid ethernet address to a "
314 "device disallowed\n");
315 goto random;
316 }
317 if (!is_local_ether_addr(addr)) {
318 printk(KERN_WARNING
319 "Warning: attempt to assign a globally valid ethernet address to a "
320 "device\n");
321 printk(KERN_WARNING "You should better enable the 2nd rightmost bit "
322 "in the first byte of the MAC, i.e. "
323 "%02x:%02x:%02x:%02x:%02x:%02x\n",
324 addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], addr[5]);
325 }
b10aeeef
JD
326 return;
327
328random:
e024715f
PBG
329 printk(KERN_INFO
330 "Choosing a random ethernet address for device %s\n", name);
d6c64102 331 random_ether_addr(addr);
b10aeeef
JD
332}
333
1da177e4 334static DEFINE_SPINLOCK(devices_lock);
9010772c 335static LIST_HEAD(devices);
1da177e4 336
3ae5eaec
RK
337static struct platform_driver uml_net_driver = {
338 .driver = {
339 .name = DRIVER_NAME,
340 },
1da177e4
LT
341};
342static int driver_registered;
343
f34d9d2d
JD
344static void eth_configure(int n, void *init, char *mac,
345 struct transport *transport)
1da177e4
LT
346{
347 struct uml_net *device;
348 struct net_device *dev;
349 struct uml_net_private *lp;
350 int save, err, size;
351
f28169d2 352 size = transport->private_size + sizeof(struct uml_net_private) +
1da177e4
LT
353 sizeof(((struct uml_net_private *) 0)->user);
354
0268bd0a 355 device = kzalloc(sizeof(*device), GFP_KERNEL);
1da177e4 356 if (device == NULL) {
8c840835
PBG
357 printk(KERN_ERR "eth_configure failed to allocate struct "
358 "uml_net\n");
f34d9d2d 359 return;
1da177e4
LT
360 }
361
8c840835
PBG
362 dev = alloc_etherdev(size);
363 if (dev == NULL) {
364 printk(KERN_ERR "eth_configure: failed to allocate struct "
365 "net_device for eth%d\n", n);
366 goto out_free_device;
367 }
368
1da177e4
LT
369 INIT_LIST_HEAD(&device->list);
370 device->index = n;
371
e024715f
PBG
372 /* If this name ends up conflicting with an existing registered
373 * netdevice, that is OK, register_netdev{,ice}() will notice this
374 * and fail.
375 */
8c840835 376 snprintf(dev->name, sizeof(dev->name), "eth%d", n);
e024715f 377
8c840835 378 setup_etheraddr(mac, device->mac, dev->name);
1da177e4
LT
379
380 printk(KERN_INFO "Netdevice %d ", n);
b10aeeef
JD
381 printk("(%02x:%02x:%02x:%02x:%02x:%02x) ",
382 device->mac[0], device->mac[1],
383 device->mac[2], device->mac[3],
384 device->mac[4], device->mac[5]);
1da177e4 385 printk(": ");
1da177e4 386
e56a7885
PBG
387 lp = dev->priv;
388 /* This points to the transport private data. It's still clear, but we
389 * must memset it to 0 *now*. Let's help the drivers. */
390 memset(lp, 0, size);
eff3b634 391 INIT_WORK(&lp->work, uml_dev_close);
e56a7885 392
1da177e4
LT
393 /* sysfs register */
394 if (!driver_registered) {
3ae5eaec 395 platform_driver_register(&uml_net_driver);
1da177e4
LT
396 driver_registered = 1;
397 }
398 device->pdev.id = n;
399 device->pdev.name = DRIVER_NAME;
f34d9d2d
JD
400 if(platform_device_register(&device->pdev))
401 goto out_free_netdev;
1da177e4
LT
402 SET_NETDEV_DEV(dev,&device->pdev.dev);
403
1da177e4
LT
404 device->dev = dev;
405
f34d9d2d
JD
406 /*
407 * These just fill in a data structure, so there's no failure
408 * to be worried about.
409 */
1da177e4
LT
410 (*transport->kern->init)(dev, init);
411
1da177e4
LT
412 /* lp.user is the first four bytes of the transport data, which
413 * has already been initialized. This structure assignment will
414 * overwrite that, so we make sure that .user gets overwritten with
415 * what it already has.
416 */
417 save = lp->user[0];
418 *lp = ((struct uml_net_private)
419 { .list = LIST_HEAD_INIT(lp->list),
420 .dev = dev,
421 .fd = -1,
422 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
1da177e4
LT
423 .protocol = transport->kern->protocol,
424 .open = transport->user->open,
425 .close = transport->user->close,
426 .remove = transport->user->remove,
427 .read = transport->kern->read,
428 .write = transport->kern->write,
429 .add_address = transport->user->add_address,
430 .delete_address = transport->user->delete_address,
431 .set_mtu = transport->user->set_mtu,
432 .user = { save } });
433
434 init_timer(&lp->tl);
435 spin_lock_init(&lp->lock);
436 lp->tl.function = uml_net_user_timer_expire;
b10aeeef 437 memcpy(lp->mac, device->mac, sizeof(lp->mac));
1da177e4 438
f34d9d2d
JD
439 if ((transport->user->init != NULL) &&
440 ((*transport->user->init)(&lp->user, dev) != 0))
441 goto out_unregister;
1da177e4 442
b10aeeef 443 set_ether_mac(dev, device->mac);
f34d9d2d
JD
444 dev->mtu = transport->user->max_packet;
445 dev->open = uml_net_open;
446 dev->hard_start_xmit = uml_net_start_xmit;
447 dev->stop = uml_net_close;
448 dev->get_stats = uml_net_get_stats;
449 dev->set_multicast_list = uml_net_set_multicast_list;
450 dev->tx_timeout = uml_net_tx_timeout;
451 dev->set_mac_address = uml_net_set_mac;
452 dev->change_mtu = uml_net_change_mtu;
453 dev->ethtool_ops = &uml_net_ethtool_ops;
454 dev->watchdog_timeo = (HZ >> 1);
455 dev->irq = UM_ETH_IRQ;
1da177e4 456
f34d9d2d
JD
457 rtnl_lock();
458 err = register_netdevice(dev);
459 rtnl_unlock();
460 if (err)
461 goto out_undo_user_init;
462
463 spin_lock(&devices_lock);
464 list_add(&device->list, &devices);
465 spin_unlock(&devices_lock);
466
467 return;
468
469out_undo_user_init:
8c840835 470 if (transport->user->remove != NULL)
f34d9d2d
JD
471 (*transport->user->remove)(&lp->user);
472out_unregister:
473 platform_device_unregister(&device->pdev);
474out_free_netdev:
475 free_netdev(dev);
8c840835 476out_free_device:
f34d9d2d 477 kfree(device);
1da177e4
LT
478}
479
480static struct uml_net *find_device(int n)
481{
482 struct uml_net *device;
483 struct list_head *ele;
484
485 spin_lock(&devices_lock);
486 list_for_each(ele, &devices){
487 device = list_entry(ele, struct uml_net, list);
488 if(device->index == n)
489 goto out;
490 }
491 device = NULL;
492 out:
493 spin_unlock(&devices_lock);
4ea21cd9 494 return device;
1da177e4
LT
495}
496
f28169d2
JD
497static int eth_parse(char *str, int *index_out, char **str_out,
498 char **error_out)
1da177e4
LT
499{
500 char *end;
f28169d2 501 int n, err = -EINVAL;;
1da177e4
LT
502
503 n = simple_strtoul(str, &end, 0);
504 if(end == str){
f28169d2
JD
505 *error_out = "Bad device number";
506 return err;
1da177e4 507 }
f28169d2 508
1da177e4
LT
509 str = end;
510 if(*str != '='){
f28169d2
JD
511 *error_out = "Expected '=' after device number";
512 return err;
1da177e4 513 }
f28169d2 514
1da177e4
LT
515 str++;
516 if(find_device(n)){
f28169d2
JD
517 *error_out = "Device already configured";
518 return err;
1da177e4 519 }
f28169d2
JD
520
521 *index_out = n;
1da177e4 522 *str_out = str;
f28169d2 523 return 0;
1da177e4
LT
524}
525
526struct eth_init {
527 struct list_head list;
528 char *init;
529 int index;
530};
531
d3b7f69d
JD
532static DEFINE_SPINLOCK(transports_lock);
533static LIST_HEAD(transports);
1da177e4
LT
534
535/* Filled in during early boot */
c862fc32 536static LIST_HEAD(eth_cmd_line);
1da177e4
LT
537
538static int check_transport(struct transport *transport, char *eth, int n,
539 void **init_out, char **mac_out)
540{
541 int len;
542
543 len = strlen(transport->name);
544 if(strncmp(eth, transport->name, len))
4ea21cd9 545 return 0;
1da177e4
LT
546
547 eth += len;
548 if(*eth == ',')
549 eth++;
550 else if(*eth != '\0')
4ea21cd9 551 return 0;
1da177e4
LT
552
553 *init_out = kmalloc(transport->setup_size, GFP_KERNEL);
554 if(*init_out == NULL)
4ea21cd9 555 return 1;
1da177e4
LT
556
557 if(!transport->setup(eth, mac_out, *init_out)){
558 kfree(*init_out);
559 *init_out = NULL;
560 }
4ea21cd9 561 return 1;
1da177e4
LT
562}
563
564void register_transport(struct transport *new)
565{
566 struct list_head *ele, *next;
567 struct eth_init *eth;
568 void *init;
569 char *mac = NULL;
570 int match;
571
d3b7f69d
JD
572 spin_lock(&transports_lock);
573 BUG_ON(!list_empty(&new->list));
1da177e4 574 list_add(&new->list, &transports);
d3b7f69d 575 spin_unlock(&transports_lock);
1da177e4
LT
576
577 list_for_each_safe(ele, next, &eth_cmd_line){
578 eth = list_entry(ele, struct eth_init, list);
579 match = check_transport(new, eth->init, eth->index, &init,
580 &mac);
581 if(!match)
582 continue;
583 else if(init != NULL){
584 eth_configure(eth->index, init, mac, new);
585 kfree(init);
586 }
587 list_del(&eth->list);
588 }
589}
590
591static int eth_setup_common(char *str, int index)
592{
593 struct list_head *ele;
594 struct transport *transport;
595 void *init;
596 char *mac = NULL;
c862fc32 597 int found = 0;
1da177e4 598
c862fc32 599 spin_lock(&transports_lock);
1da177e4
LT
600 list_for_each(ele, &transports){
601 transport = list_entry(ele, struct transport, list);
602 if(!check_transport(transport, str, index, &init, &mac))
603 continue;
604 if(init != NULL){
605 eth_configure(index, init, mac, transport);
606 kfree(init);
607 }
c862fc32
JD
608 found = 1;
609 break;
1da177e4 610 }
c862fc32
JD
611
612 spin_unlock(&transports_lock);
613 return found;
1da177e4
LT
614}
615
616static int eth_setup(char *str)
617{
618 struct eth_init *new;
f28169d2 619 char *error;
1da177e4
LT
620 int n, err;
621
f28169d2
JD
622 err = eth_parse(str, &n, &str, &error);
623 if(err){
624 printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n",
625 str, error);
1183dc94 626 return 1;
f28169d2 627 }
1da177e4 628
1183dc94 629 new = alloc_bootmem(sizeof(*new));
1da177e4
LT
630 if (new == NULL){
631 printk("eth_init : alloc_bootmem failed\n");
1183dc94 632 return 1;
1da177e4
LT
633 }
634
635 INIT_LIST_HEAD(&new->list);
636 new->index = n;
637 new->init = str;
638
639 list_add_tail(&new->list, &eth_cmd_line);
1183dc94 640 return 1;
1da177e4
LT
641}
642
643__setup("eth", eth_setup);
644__uml_help(eth_setup,
645"eth[0-9]+=<transport>,<options>\n"
646" Configure a network device.\n\n"
647);
648
f28169d2 649static int net_config(char *str, char **error_out)
1da177e4
LT
650{
651 int n, err;
652
f28169d2
JD
653 err = eth_parse(str, &n, &str, error_out);
654 if(err)
655 return err;
1da177e4 656
f28169d2
JD
657 /* This string is broken up and the pieces used by the underlying
658 * driver. So, it is freed only if eth_setup_common fails.
659 */
970d6e3a 660 str = kstrdup(str, GFP_KERNEL);
1da177e4 661 if(str == NULL){
f28169d2
JD
662 *error_out = "net_config failed to strdup string";
663 return -ENOMEM;
1da177e4
LT
664 }
665 err = !eth_setup_common(str, n);
f28169d2 666 if(err)
1da177e4
LT
667 kfree(str);
668 return(err);
669}
670
29d56cfe
JD
671static int net_id(char **str, int *start_out, int *end_out)
672{
673 char *end;
674 int n;
675
676 n = simple_strtoul(*str, &end, 0);
677 if((*end != '\0') || (end == *str))
678 return -1;
679
680 *start_out = n;
681 *end_out = n;
682 *str = end;
683 return n;
684}
685
f28169d2 686static int net_remove(int n, char **error_out)
1da177e4
LT
687{
688 struct uml_net *device;
689 struct net_device *dev;
690 struct uml_net_private *lp;
1da177e4
LT
691
692 device = find_device(n);
693 if(device == NULL)
29d56cfe 694 return -ENODEV;
1da177e4
LT
695
696 dev = device->dev;
697 lp = dev->priv;
29d56cfe 698 if(lp->fd > 0)
4ea21cd9 699 return -EBUSY;
1da177e4
LT
700 if(lp->remove != NULL) (*lp->remove)(&lp->user);
701 unregister_netdev(dev);
702 platform_device_unregister(&device->pdev);
703
704 list_del(&device->list);
705 kfree(device);
706 free_netdev(dev);
29d56cfe 707 return 0;
1da177e4
LT
708}
709
710static struct mc_device net_mc = {
84f48d4f 711 .list = LIST_HEAD_INIT(net_mc.list),
1da177e4
LT
712 .name = "eth",
713 .config = net_config,
714 .get_config = NULL,
4ea21cd9 715 .id = net_id,
1da177e4
LT
716 .remove = net_remove,
717};
718
719static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
720 void *ptr)
721{
722 struct in_ifaddr *ifa = ptr;
1da177e4
LT
723 struct net_device *dev = ifa->ifa_dev->dev;
724 struct uml_net_private *lp;
725 void (*proc)(unsigned char *, unsigned char *, void *);
726 unsigned char addr_buf[4], netmask_buf[4];
727
4ea21cd9
JD
728 if(dev->open != uml_net_open)
729 return NOTIFY_DONE;
1da177e4
LT
730
731 lp = dev->priv;
732
733 proc = NULL;
734 switch (event){
735 case NETDEV_UP:
736 proc = lp->add_address;
737 break;
738 case NETDEV_DOWN:
739 proc = lp->delete_address;
740 break;
741 }
742 if(proc != NULL){
0e76422c
BS
743 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
744 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
1da177e4
LT
745 (*proc)(addr_buf, netmask_buf, &lp->user);
746 }
4ea21cd9 747 return NOTIFY_DONE;
1da177e4
LT
748}
749
c862fc32 750/* uml_net_init shouldn't be called twice on two CPUs at the same time */
1da177e4
LT
751struct notifier_block uml_inetaddr_notifier = {
752 .notifier_call = uml_inetaddr_event,
753};
754
755static int uml_net_init(void)
756{
757 struct list_head *ele;
f28169d2 758 struct uml_net_private *lp;
1da177e4
LT
759 struct in_device *ip;
760 struct in_ifaddr *in;
761
762 mconsole_register_dev(&net_mc);
763 register_inetaddr_notifier(&uml_inetaddr_notifier);
764
765 /* Devices may have been opened already, so the uml_inetaddr_notifier
766 * didn't get a chance to run for them. This fakes it so that
767 * addresses which have already been set up get handled properly.
768 */
c862fc32 769 spin_lock(&opened_lock);
1da177e4
LT
770 list_for_each(ele, &opened){
771 lp = list_entry(ele, struct uml_net_private, list);
772 ip = lp->dev->ip_ptr;
c862fc32
JD
773 if(ip == NULL)
774 continue;
1da177e4
LT
775 in = ip->ifa_list;
776 while(in != NULL){
777 uml_inetaddr_event(NULL, NETDEV_UP, in);
778 in = in->ifa_next;
779 }
f28169d2 780 }
c862fc32 781 spin_unlock(&opened_lock);
1da177e4 782
c862fc32 783 return 0;
1da177e4
LT
784}
785
786__initcall(uml_net_init);
787
788static void close_devices(void)
789{
790 struct list_head *ele;
791 struct uml_net_private *lp;
792
c862fc32 793 spin_lock(&opened_lock);
1da177e4
LT
794 list_for_each(ele, &opened){
795 lp = list_entry(ele, struct uml_net_private, list);
8d93c700 796 free_irq(lp->dev->irq, lp->dev);
1da177e4
LT
797 if((lp->close != NULL) && (lp->fd >= 0))
798 (*lp->close)(lp->fd, &lp->user);
c862fc32
JD
799 if(lp->remove != NULL)
800 (*lp->remove)(&lp->user);
1da177e4 801 }
c862fc32 802 spin_unlock(&opened_lock);
1da177e4
LT
803}
804
805__uml_exitcall(close_devices);
806
1da177e4
LT
807struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
808{
809 if((skb != NULL) && (skb_tailroom(skb) < extra)){
810 struct sk_buff *skb2;
811
812 skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
813 dev_kfree_skb(skb);
814 skb = skb2;
815 }
816 if(skb != NULL) skb_put(skb, extra);
817 return(skb);
818}
819
f28169d2
JD
820void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
821 void *),
1da177e4
LT
822 void *arg)
823{
824 struct net_device *dev = d;
825 struct in_device *ip = dev->ip_ptr;
826 struct in_ifaddr *in;
827 unsigned char address[4], netmask[4];
828
829 if(ip == NULL) return;
830 in = ip->ifa_list;
831 while(in != NULL){
0e76422c
BS
832 memcpy(address, &in->ifa_address, sizeof(address));
833 memcpy(netmask, &in->ifa_mask, sizeof(netmask));
1da177e4
LT
834 (*cb)(address, netmask, arg);
835 in = in->ifa_next;
836 }
837}
838
839int dev_netmask(void *d, void *m)
840{
841 struct net_device *dev = d;
842 struct in_device *ip = dev->ip_ptr;
843 struct in_ifaddr *in;
a144ea4b 844 __be32 *mask_out = m;
1da177e4 845
f28169d2 846 if(ip == NULL)
1da177e4
LT
847 return(1);
848
849 in = ip->ifa_list;
f28169d2 850 if(in == NULL)
1da177e4
LT
851 return(1);
852
853 *mask_out = in->ifa_mask;
854 return(0);
855}
856
857void *get_output_buffer(int *len_out)
858{
859 void *ret;
860
861 ret = (void *) __get_free_pages(GFP_KERNEL, 0);
862 if(ret) *len_out = PAGE_SIZE;
863 else *len_out = 0;
4ea21cd9 864 return ret;
1da177e4
LT
865}
866
867void free_output_buffer(void *buffer)
868{
869 free_pages((unsigned long) buffer, 0);
870}
871
f28169d2 872int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
1da177e4
LT
873 char **gate_addr)
874{
875 char *remain;
876
877 remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
878 if(remain != NULL){
879 printk("tap_setup_common - Extra garbage on specification : "
880 "'%s'\n", remain);
881 return(1);
882 }
883
884 return(0);
885}
886
887unsigned short eth_protocol(struct sk_buff *skb)
888{
889 return(eth_type_trans(skb, skb->dev));
890}
This page took 0.330841 seconds and 5 git commands to generate.