xen-netback: add control protocol implementation
[deliverable/linux.git] / drivers / net / xen-netback / interface.c
CommitLineData
f942dc25
IC
1/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
b3f980bd 33#include <linux/kthread.h>
f942dc25
IC
34#include <linux/ethtool.h>
35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h>
e7b599d7 37#include <linux/vmalloc.h>
f942dc25
IC
38
39#include <xen/events.h>
40#include <asm/xen/hypercall.h>
f53c3fe8 41#include <xen/balloon.h>
f942dc25
IC
42
43#define XENVIF_QUEUE_LENGTH 32
b3f980bd 44#define XENVIF_NAPI_WEIGHT 64
f942dc25 45
f48da8b1
DV
46/* Number of bytes allowed on the internal guest Rx queue. */
47#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48
a64bd934
WL
49/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
50 * increasing the inflight counter. We need to increase the inflight
51 * counter because core driver calls into xenvif_zerocopy_callback
52 * which calls xenvif_skb_zerocopy_complete.
53 */
54void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
55 struct sk_buff *skb)
56{
57 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
58 atomic_inc(&queue->inflight_packets);
59}
60
61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62{
63 atomic_dec(&queue->inflight_packets);
57b22906
RL
64
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
a64bd934
WL
70}
71
f942dc25
IC
72int xenvif_schedulable(struct xenvif *vif)
73{
3d1af1df 74 return netif_running(vif->dev) &&
f48da8b1
DV
75 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
76 !vif->disabled;
f942dc25
IC
77}
78
e1f00a69 79static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
f942dc25 80{
e9ce7cb6 81 struct xenvif_queue *queue = dev_id;
f942dc25 82
e9ce7cb6
WL
83 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
84 napi_schedule(&queue->napi);
f942dc25 85
e1f00a69
WL
86 return IRQ_HANDLED;
87}
88
38741d50 89static int xenvif_poll(struct napi_struct *napi, int budget)
b3f980bd 90{
e9ce7cb6
WL
91 struct xenvif_queue *queue =
92 container_of(napi, struct xenvif_queue, napi);
b3f980bd
WL
93 int work_done;
94
e9d8b2c2
WL
95 /* This vif is rogue, we pretend we've there is nothing to do
96 * for this vif to deschedule it from NAPI. But this interface
97 * will be turned off in thread context later.
98 */
2561cc15 99 if (unlikely(queue->vif->disabled)) {
e9d8b2c2
WL
100 napi_complete(napi);
101 return 0;
102 }
103
e9ce7cb6 104 work_done = xenvif_tx_action(queue, budget);
b3f980bd
WL
105
106 if (work_done < budget) {
0d08fceb 107 napi_complete(napi);
e9ce7cb6 108 xenvif_napi_schedule_or_enable_events(queue);
b3f980bd
WL
109 }
110
111 return work_done;
112}
113
e1f00a69
WL
114static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
115{
e9ce7cb6 116 struct xenvif_queue *queue = dev_id;
e1f00a69 117
e9ce7cb6 118 xenvif_kick_thread(queue);
f942dc25
IC
119
120 return IRQ_HANDLED;
121}
122
f51de243 123irqreturn_t xenvif_interrupt(int irq, void *dev_id)
e1f00a69
WL
124{
125 xenvif_tx_interrupt(irq, dev_id);
126 xenvif_rx_interrupt(irq, dev_id);
127
128 return IRQ_HANDLED;
129}
130
4e15ee2c
PD
131irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id)
132{
133 struct xenvif *vif = dev_id;
134
135 wake_up(&vif->ctrl_wq);
136
137 return IRQ_HANDLED;
138}
139
e9ce7cb6
WL
140int xenvif_queue_stopped(struct xenvif_queue *queue)
141{
142 struct net_device *dev = queue->vif->dev;
143 unsigned int id = queue->id;
144 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
145}
146
147void xenvif_wake_queue(struct xenvif_queue *queue)
148{
149 struct net_device *dev = queue->vif->dev;
150 unsigned int id = queue->id;
151 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
152}
153
40d8abde
PD
154static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
155 void *accel_priv,
156 select_queue_fallback_t fallback)
157{
158 struct xenvif *vif = netdev_priv(dev);
159 unsigned int size = vif->hash.size;
160
161 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
162 return fallback(dev, skb) % dev->real_num_tx_queues;
163
164 xenvif_set_skb_hash(vif, skb);
165
166 if (size == 0)
167 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
168
169 return vif->hash.mapping[skb_get_hash_raw(skb) % size];
170}
171
f942dc25
IC
172static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
173{
174 struct xenvif *vif = netdev_priv(dev);
e9ce7cb6 175 struct xenvif_queue *queue = NULL;
f7b50c4e 176 unsigned int num_queues = vif->num_queues;
e9ce7cb6 177 u16 index;
f48da8b1 178 struct xenvif_rx_cb *cb;
f942dc25
IC
179
180 BUG_ON(skb->dev != dev);
181
e9ce7cb6
WL
182 /* Drop the packet if queues are not set up */
183 if (num_queues < 1)
184 goto drop;
185
186 /* Obtain the queue to be used to transmit this packet */
187 index = skb_get_queue_mapping(skb);
188 if (index >= num_queues) {
189 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
190 index, vif->dev->name);
191 index %= num_queues;
192 }
193 queue = &vif->queues[index];
194
195 /* Drop the packet if queue is not ready */
196 if (queue->task == NULL ||
197 queue->dealloc_task == NULL ||
f53c3fe8 198 !xenvif_schedulable(vif))
f942dc25
IC
199 goto drop;
200
210c34dc
PD
201 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
202 struct ethhdr *eth = (struct ethhdr *)skb->data;
203
204 if (!xenvif_mcast_match(vif, eth->h_dest))
205 goto drop;
206 }
207
f48da8b1 208 cb = XENVIF_RX_CB(skb);
26c0e102 209 cb->expires = jiffies + vif->drain_timeout;
f942dc25 210
f48da8b1 211 xenvif_rx_queue_tail(queue, skb);
e9ce7cb6 212 xenvif_kick_thread(queue);
f942dc25
IC
213
214 return NETDEV_TX_OK;
215
216 drop:
217 vif->dev->stats.tx_dropped++;
218 dev_kfree_skb(skb);
219 return NETDEV_TX_OK;
220}
221
f942dc25
IC
222static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
223{
224 struct xenvif *vif = netdev_priv(dev);
e9ce7cb6 225 struct xenvif_queue *queue = NULL;
f7b50c4e 226 unsigned int num_queues = vif->num_queues;
e9ce7cb6
WL
227 unsigned long rx_bytes = 0;
228 unsigned long rx_packets = 0;
229 unsigned long tx_bytes = 0;
230 unsigned long tx_packets = 0;
231 unsigned int index;
232
233 if (vif->queues == NULL)
234 goto out;
235
236 /* Aggregate tx and rx stats from each queue */
237 for (index = 0; index < num_queues; ++index) {
238 queue = &vif->queues[index];
239 rx_bytes += queue->stats.rx_bytes;
240 rx_packets += queue->stats.rx_packets;
241 tx_bytes += queue->stats.tx_bytes;
242 tx_packets += queue->stats.tx_packets;
243 }
244
245out:
246 vif->dev->stats.rx_bytes = rx_bytes;
247 vif->dev->stats.rx_packets = rx_packets;
248 vif->dev->stats.tx_bytes = tx_bytes;
249 vif->dev->stats.tx_packets = tx_packets;
250
f942dc25
IC
251 return &vif->dev->stats;
252}
253
254static void xenvif_up(struct xenvif *vif)
255{
e9ce7cb6 256 struct xenvif_queue *queue = NULL;
f7b50c4e 257 unsigned int num_queues = vif->num_queues;
e9ce7cb6
WL
258 unsigned int queue_index;
259
260 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
261 queue = &vif->queues[queue_index];
262 napi_enable(&queue->napi);
263 enable_irq(queue->tx_irq);
264 if (queue->tx_irq != queue->rx_irq)
265 enable_irq(queue->rx_irq);
266 xenvif_napi_schedule_or_enable_events(queue);
267 }
f942dc25
IC
268}
269
270static void xenvif_down(struct xenvif *vif)
271{
e9ce7cb6 272 struct xenvif_queue *queue = NULL;
f7b50c4e 273 unsigned int num_queues = vif->num_queues;
e9ce7cb6
WL
274 unsigned int queue_index;
275
276 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
277 queue = &vif->queues[queue_index];
e9ce7cb6
WL
278 disable_irq(queue->tx_irq);
279 if (queue->tx_irq != queue->rx_irq)
280 disable_irq(queue->rx_irq);
8fe78989 281 napi_disable(&queue->napi);
e9ce7cb6
WL
282 del_timer_sync(&queue->credit_timeout);
283 }
f942dc25
IC
284}
285
286static int xenvif_open(struct net_device *dev)
287{
288 struct xenvif *vif = netdev_priv(dev);
3d1af1df 289 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
f942dc25 290 xenvif_up(vif);
e9ce7cb6 291 netif_tx_start_all_queues(dev);
f942dc25
IC
292 return 0;
293}
294
295static int xenvif_close(struct net_device *dev)
296{
297 struct xenvif *vif = netdev_priv(dev);
3d1af1df 298 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
f942dc25 299 xenvif_down(vif);
e9ce7cb6 300 netif_tx_stop_all_queues(dev);
f942dc25
IC
301 return 0;
302}
303
304static int xenvif_change_mtu(struct net_device *dev, int mtu)
305{
306 struct xenvif *vif = netdev_priv(dev);
307 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
308
309 if (mtu > max)
310 return -EINVAL;
311 dev->mtu = mtu;
312 return 0;
313}
314
c8f44aff
MM
315static netdev_features_t xenvif_fix_features(struct net_device *dev,
316 netdev_features_t features)
f942dc25
IC
317{
318 struct xenvif *vif = netdev_priv(dev);
f942dc25 319
47103041
MM
320 if (!vif->can_sg)
321 features &= ~NETIF_F_SG;
82cada22 322 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
47103041 323 features &= ~NETIF_F_TSO;
82cada22
PD
324 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
325 features &= ~NETIF_F_TSO6;
146c8a77 326 if (!vif->ip_csum)
47103041 327 features &= ~NETIF_F_IP_CSUM;
146c8a77
PD
328 if (!vif->ipv6_csum)
329 features &= ~NETIF_F_IPV6_CSUM;
f942dc25 330
47103041 331 return features;
f942dc25
IC
332}
333
334static const struct xenvif_stat {
335 char name[ETH_GSTRING_LEN];
336 u16 offset;
337} xenvif_stats[] = {
338 {
339 "rx_gso_checksum_fixup",
e9ce7cb6 340 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
f942dc25 341 },
1bb332af
ZK
342 /* If (sent != success + fail), there are probably packets never
343 * freed up properly!
344 */
345 {
346 "tx_zerocopy_sent",
e9ce7cb6 347 offsetof(struct xenvif_stats, tx_zerocopy_sent),
1bb332af
ZK
348 },
349 {
350 "tx_zerocopy_success",
e9ce7cb6 351 offsetof(struct xenvif_stats, tx_zerocopy_success),
1bb332af
ZK
352 },
353 {
354 "tx_zerocopy_fail",
e9ce7cb6 355 offsetof(struct xenvif_stats, tx_zerocopy_fail)
1bb332af 356 },
e3377f36
ZK
357 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
358 * a guest with the same MAX_SKB_FRAG
359 */
360 {
361 "tx_frag_overflow",
e9ce7cb6 362 offsetof(struct xenvif_stats, tx_frag_overflow)
e3377f36 363 },
f942dc25
IC
364};
365
366static int xenvif_get_sset_count(struct net_device *dev, int string_set)
367{
368 switch (string_set) {
369 case ETH_SS_STATS:
370 return ARRAY_SIZE(xenvif_stats);
371 default:
372 return -EINVAL;
373 }
374}
375
376static void xenvif_get_ethtool_stats(struct net_device *dev,
377 struct ethtool_stats *stats, u64 * data)
378{
e9ce7cb6 379 struct xenvif *vif = netdev_priv(dev);
f7b50c4e 380 unsigned int num_queues = vif->num_queues;
f942dc25 381 int i;
e9ce7cb6 382 unsigned int queue_index;
e9ce7cb6
WL
383
384 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
385 unsigned long accum = 0;
386 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
d63951d7 387 void *vif_stats = &vif->queues[queue_index].stats;
e9ce7cb6
WL
388 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
389 }
390 data[i] = accum;
391 }
f942dc25
IC
392}
393
394static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
395{
396 int i;
397
398 switch (stringset) {
399 case ETH_SS_STATS:
400 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
401 memcpy(data + i * ETH_GSTRING_LEN,
402 xenvif_stats[i].name, ETH_GSTRING_LEN);
403 break;
404 }
405}
406
813abbba 407static const struct ethtool_ops xenvif_ethtool_ops = {
f942dc25
IC
408 .get_link = ethtool_op_get_link,
409
410 .get_sset_count = xenvif_get_sset_count,
411 .get_ethtool_stats = xenvif_get_ethtool_stats,
412 .get_strings = xenvif_get_strings,
413};
414
813abbba 415static const struct net_device_ops xenvif_netdev_ops = {
40d8abde 416 .ndo_select_queue = xenvif_select_queue,
f942dc25
IC
417 .ndo_start_xmit = xenvif_start_xmit,
418 .ndo_get_stats = xenvif_get_stats,
419 .ndo_open = xenvif_open,
420 .ndo_stop = xenvif_close,
421 .ndo_change_mtu = xenvif_change_mtu,
47103041 422 .ndo_fix_features = xenvif_fix_features,
4a633a60
MW
423 .ndo_set_mac_address = eth_mac_addr,
424 .ndo_validate_addr = eth_validate_addr,
f942dc25
IC
425};
426
427struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
428 unsigned int handle)
429{
430 int err;
431 struct net_device *dev;
432 struct xenvif *vif;
433 char name[IFNAMSIZ] = {};
434
435 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
8d3d53b3
AB
436 /* Allocate a netdev with the max. supported number of queues.
437 * When the guest selects the desired number, it will be updated
f7b50c4e 438 * via netif_set_real_num_*_queues().
8d3d53b3 439 */
c835a677
TG
440 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
441 ether_setup, xenvif_max_queues);
f942dc25 442 if (dev == NULL) {
b3f980bd 443 pr_warn("Could not allocate netdev for %s\n", name);
f942dc25
IC
444 return ERR_PTR(-ENOMEM);
445 }
446
447 SET_NETDEV_DEV(dev, parent);
448
449 vif = netdev_priv(dev);
ac3d5ac2 450
f942dc25
IC
451 vif->domid = domid;
452 vif->handle = handle;
f942dc25 453 vif->can_sg = 1;
146c8a77 454 vif->ip_csum = 1;
f942dc25 455 vif->dev = dev;
e9d8b2c2 456 vif->disabled = false;
26c0e102
DV
457 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
458 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
e9d8b2c2 459
f7b50c4e 460 /* Start out with no queues. */
e9ce7cb6 461 vif->queues = NULL;
f7b50c4e 462 vif->num_queues = 0;
09350788 463
ecf08d2d 464 spin_lock_init(&vif->lock);
210c34dc 465 INIT_LIST_HEAD(&vif->fe_mcast_addr);
ecf08d2d 466
f942dc25 467 dev->netdev_ops = &xenvif_netdev_ops;
146c8a77
PD
468 dev->hw_features = NETIF_F_SG |
469 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
82cada22 470 NETIF_F_TSO | NETIF_F_TSO6;
7365bcfa 471 dev->features = dev->hw_features | NETIF_F_RXCSUM;
7ad24ea4 472 dev->ethtool_ops = &xenvif_ethtool_ops;
f942dc25
IC
473
474 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
475
476 /*
477 * Initialise a dummy MAC address. We choose the numerically
478 * largest non-broadcast address to prevent the address getting
479 * stolen by an Ethernet bridge for STP purposes.
480 * (FE:FF:FF:FF:FF:FF)
481 */
3b6ed26d 482 eth_broadcast_addr(dev->dev_addr);
f942dc25
IC
483 dev->dev_addr[0] &= ~0x01;
484
485 netif_carrier_off(dev);
486
487 err = register_netdev(dev);
488 if (err) {
489 netdev_warn(dev, "Could not register device: err=%d\n", err);
490 free_netdev(dev);
491 return ERR_PTR(err);
492 }
493
494 netdev_dbg(dev, "Successfully created xenvif\n");
279f438e
PD
495
496 __module_get(THIS_MODULE);
497
f942dc25
IC
498 return vif;
499}
500
e9ce7cb6
WL
501int xenvif_init_queue(struct xenvif_queue *queue)
502{
503 int err, i;
504
505 queue->credit_bytes = queue->remaining_credit = ~0UL;
506 queue->credit_usec = 0UL;
507 init_timer(&queue->credit_timeout);
edafc132 508 queue->credit_timeout.function = xenvif_tx_credit_callback;
e9ce7cb6
WL
509 queue->credit_window_start = get_jiffies_64();
510
f48da8b1
DV
511 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
512
e9ce7cb6
WL
513 skb_queue_head_init(&queue->rx_queue);
514 skb_queue_head_init(&queue->tx_queue);
515
516 queue->pending_cons = 0;
517 queue->pending_prod = MAX_PENDING_REQS;
518 for (i = 0; i < MAX_PENDING_REQS; ++i)
519 queue->pending_ring[i] = i;
520
521 spin_lock_init(&queue->callback_lock);
522 spin_lock_init(&queue->response_lock);
523
524 /* If ballooning is disabled, this will consume real memory, so you
525 * better enable it. The long term solution would be to use just a
526 * bunch of valid page descriptors, without dependency on ballooning
527 */
ff4b156f
DV
528 err = gnttab_alloc_pages(MAX_PENDING_REQS,
529 queue->mmap_pages);
e9ce7cb6
WL
530 if (err) {
531 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
532 return -ENOMEM;
533 }
534
535 for (i = 0; i < MAX_PENDING_REQS; i++) {
536 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
537 { .callback = xenvif_zerocopy_callback,
538 .ctx = NULL,
539 .desc = i };
540 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
541 }
542
e9ce7cb6
WL
543 return 0;
544}
545
546void xenvif_carrier_on(struct xenvif *vif)
547{
548 rtnl_lock();
549 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
550 dev_set_mtu(vif->dev, ETH_DATA_LEN);
551 netdev_update_features(vif->dev);
3d1af1df 552 set_bit(VIF_STATUS_CONNECTED, &vif->status);
e9ce7cb6
WL
553 if (netif_running(vif->dev))
554 xenvif_up(vif);
555 rtnl_unlock();
556}
557
4e15ee2c
PD
558int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
559 unsigned int evtchn)
560{
561 struct net_device *dev = vif->dev;
562 void *addr;
563 struct xen_netif_ctrl_sring *shared;
564 struct task_struct *task;
565 int err = -ENOMEM;
566
567 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
568 &ring_ref, 1, &addr);
569 if (err)
570 goto err;
571
572 shared = (struct xen_netif_ctrl_sring *)addr;
573 BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
574
575 init_waitqueue_head(&vif->ctrl_wq);
576
577 err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn,
578 xenvif_ctrl_interrupt,
579 0, dev->name, vif);
580 if (err < 0)
581 goto err_unmap;
582
583 vif->ctrl_irq = err;
584
40d8abde
PD
585 xenvif_init_hash(vif);
586
4e15ee2c
PD
587 task = kthread_create(xenvif_ctrl_kthread, (void *)vif,
588 "%s-control", dev->name);
589 if (IS_ERR(task)) {
590 pr_warn("Could not allocate kthread for %s\n", dev->name);
591 err = PTR_ERR(task);
592 goto err_deinit;
593 }
594
595 get_task_struct(task);
596 vif->ctrl_task = task;
597
598 wake_up_process(vif->ctrl_task);
599
600 return 0;
601
602err_deinit:
40d8abde 603 xenvif_deinit_hash(vif);
4e15ee2c
PD
604 unbind_from_irqhandler(vif->ctrl_irq, vif);
605 vif->ctrl_irq = 0;
606
607err_unmap:
608 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
609 vif->ctrl.sring);
610 vif->ctrl.sring = NULL;
611
612err:
613 return err;
614}
615
616int xenvif_connect_data(struct xenvif_queue *queue,
617 unsigned long tx_ring_ref,
618 unsigned long rx_ring_ref,
619 unsigned int tx_evtchn,
620 unsigned int rx_evtchn)
f942dc25 621{
67fa3660 622 struct task_struct *task;
f942dc25
IC
623 int err = -ENOMEM;
624
e9ce7cb6
WL
625 BUG_ON(queue->tx_irq);
626 BUG_ON(queue->task);
627 BUG_ON(queue->dealloc_task);
f942dc25 628
4e15ee2c
PD
629 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
630 rx_ring_ref);
f942dc25
IC
631 if (err < 0)
632 goto err;
633
e9ce7cb6
WL
634 init_waitqueue_head(&queue->wq);
635 init_waitqueue_head(&queue->dealloc_wq);
a64bd934 636 atomic_set(&queue->inflight_packets, 0);
ca2f09f2 637
e24f8191
WL
638 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
639 XENVIF_NAPI_WEIGHT);
640
e1f00a69
WL
641 if (tx_evtchn == rx_evtchn) {
642 /* feature-split-event-channels == 0 */
643 err = bind_interdomain_evtchn_to_irqhandler(
e9ce7cb6
WL
644 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
645 queue->name, queue);
e1f00a69
WL
646 if (err < 0)
647 goto err_unmap;
e9ce7cb6
WL
648 queue->tx_irq = queue->rx_irq = err;
649 disable_irq(queue->tx_irq);
e1f00a69
WL
650 } else {
651 /* feature-split-event-channels == 1 */
e9ce7cb6
WL
652 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
653 "%s-tx", queue->name);
e1f00a69 654 err = bind_interdomain_evtchn_to_irqhandler(
e9ce7cb6
WL
655 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
656 queue->tx_irq_name, queue);
e1f00a69
WL
657 if (err < 0)
658 goto err_unmap;
e9ce7cb6
WL
659 queue->tx_irq = err;
660 disable_irq(queue->tx_irq);
e1f00a69 661
e9ce7cb6
WL
662 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
663 "%s-rx", queue->name);
e1f00a69 664 err = bind_interdomain_evtchn_to_irqhandler(
e9ce7cb6
WL
665 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
666 queue->rx_irq_name, queue);
e1f00a69
WL
667 if (err < 0)
668 goto err_tx_unbind;
e9ce7cb6
WL
669 queue->rx_irq = err;
670 disable_irq(queue->rx_irq);
e1f00a69 671 }
f942dc25 672
ecf08d2d
DV
673 queue->stalled = true;
674
121fa4b7 675 task = kthread_create(xenvif_kthread_guest_rx,
e9ce7cb6 676 (void *)queue, "%s-guest-rx", queue->name);
67fa3660 677 if (IS_ERR(task)) {
e9ce7cb6 678 pr_warn("Could not allocate kthread for %s\n", queue->name);
67fa3660 679 err = PTR_ERR(task);
b3f980bd
WL
680 goto err_rx_unbind;
681 }
e9ce7cb6 682 queue->task = task;
42b5212f 683 get_task_struct(task);
67fa3660 684
f53c3fe8 685 task = kthread_create(xenvif_dealloc_kthread,
e9ce7cb6 686 (void *)queue, "%s-dealloc", queue->name);
f53c3fe8 687 if (IS_ERR(task)) {
e9ce7cb6 688 pr_warn("Could not allocate kthread for %s\n", queue->name);
f53c3fe8
ZK
689 err = PTR_ERR(task);
690 goto err_rx_unbind;
691 }
e9ce7cb6 692 queue->dealloc_task = task;
f53c3fe8 693
e9ce7cb6
WL
694 wake_up_process(queue->task);
695 wake_up_process(queue->dealloc_task);
b3f980bd 696
f942dc25 697 return 0;
b3f980bd
WL
698
699err_rx_unbind:
e9ce7cb6
WL
700 unbind_from_irqhandler(queue->rx_irq, queue);
701 queue->rx_irq = 0;
e1f00a69 702err_tx_unbind:
e9ce7cb6
WL
703 unbind_from_irqhandler(queue->tx_irq, queue);
704 queue->tx_irq = 0;
f942dc25 705err_unmap:
4e15ee2c 706 xenvif_unmap_frontend_data_rings(queue);
4a658527 707 netif_napi_del(&queue->napi);
f942dc25 708err:
b103f358 709 module_put(THIS_MODULE);
f942dc25
IC
710 return err;
711}
712
48856286 713void xenvif_carrier_off(struct xenvif *vif)
f942dc25
IC
714{
715 struct net_device *dev = vif->dev;
48856286
IC
716
717 rtnl_lock();
3d1af1df
ZK
718 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
719 netif_carrier_off(dev); /* discard queued packets */
720 if (netif_running(dev))
721 xenvif_down(vif);
722 }
48856286 723 rtnl_unlock();
48856286
IC
724}
725
4e15ee2c 726void xenvif_disconnect_data(struct xenvif *vif)
48856286 727{
e9ce7cb6 728 struct xenvif_queue *queue = NULL;
f7b50c4e 729 unsigned int num_queues = vif->num_queues;
e9ce7cb6
WL
730 unsigned int queue_index;
731
3d1af1df 732 xenvif_carrier_off(vif);
f942dc25 733
e9ce7cb6
WL
734 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
735 queue = &vif->queues[queue_index];
db739ef3 736
ea2c5e13
WL
737 netif_napi_del(&queue->napi);
738
e9ce7cb6 739 if (queue->task) {
e9ce7cb6 740 kthread_stop(queue->task);
42b5212f 741 put_task_struct(queue->task);
e9ce7cb6
WL
742 queue->task = NULL;
743 }
f53c3fe8 744
e9ce7cb6
WL
745 if (queue->dealloc_task) {
746 kthread_stop(queue->dealloc_task);
747 queue->dealloc_task = NULL;
748 }
749
750 if (queue->tx_irq) {
751 if (queue->tx_irq == queue->rx_irq)
752 unbind_from_irqhandler(queue->tx_irq, queue);
753 else {
754 unbind_from_irqhandler(queue->tx_irq, queue);
755 unbind_from_irqhandler(queue->rx_irq, queue);
756 }
757 queue->tx_irq = 0;
e1f00a69 758 }
f942dc25 759
4e15ee2c 760 xenvif_unmap_frontend_data_rings(queue);
e9ce7cb6 761 }
210c34dc
PD
762
763 xenvif_mcast_addr_list_free(vif);
279f438e
PD
764}
765
4e15ee2c
PD
766void xenvif_disconnect_ctrl(struct xenvif *vif)
767{
768 if (vif->ctrl_task) {
769 kthread_stop(vif->ctrl_task);
770 put_task_struct(vif->ctrl_task);
771 vif->ctrl_task = NULL;
772 }
773
40d8abde
PD
774 xenvif_deinit_hash(vif);
775
4e15ee2c
PD
776 if (vif->ctrl_irq) {
777 unbind_from_irqhandler(vif->ctrl_irq, vif);
778 vif->ctrl_irq = 0;
779 }
780
781 if (vif->ctrl.sring) {
782 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
783 vif->ctrl.sring);
784 vif->ctrl.sring = NULL;
785 }
786}
787
8d3d53b3
AB
788/* Reverse the relevant parts of xenvif_init_queue().
789 * Used for queue teardown from xenvif_free(), and on the
790 * error handling paths in xenbus.c:connect().
791 */
792void xenvif_deinit_queue(struct xenvif_queue *queue)
793{
ff4b156f 794 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
8d3d53b3
AB
795}
796
279f438e
PD
797void xenvif_free(struct xenvif *vif)
798{
9c6f3ffe 799 struct xenvif_queue *queues = vif->queues;
f7b50c4e 800 unsigned int num_queues = vif->num_queues;
e9ce7cb6 801 unsigned int queue_index;
f53c3fe8 802
e9ce7cb6 803 unregister_netdev(vif->dev);
f942dc25 804 free_netdev(vif->dev);
b103f358 805
9c6f3ffe
DV
806 for (queue_index = 0; queue_index < num_queues; ++queue_index)
807 xenvif_deinit_queue(&queues[queue_index]);
808 vfree(queues);
809
279f438e 810 module_put(THIS_MODULE);
f942dc25 811}
This page took 0.725257 seconds and 5 git commands to generate.