d163e112f04ce00956fedd17221c3c283ff8a3c6
[deliverable/linux.git] / drivers / net / bonding / bond_netlink.c
1 /*
2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/module.h>
13 #include <linux/errno.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/if_link.h>
17 #include <linux/if_ether.h>
18 #include <net/netlink.h>
19 #include <net/rtnetlink.h>
20 #include "bonding.h"
21
22 static size_t bond_get_slave_size(const struct net_device *bond_dev,
23 const struct net_device *slave_dev)
24 {
25 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_STATE */
26 nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_MII_STATUS */
27 nla_total_size(sizeof(u32)) + /* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */
28 nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */
29 nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */
30 nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
31 0;
32 }
33
34 static int bond_fill_slave_info(struct sk_buff *skb,
35 const struct net_device *bond_dev,
36 const struct net_device *slave_dev)
37 {
38 struct slave *slave = bond_slave_get_rtnl(slave_dev);
39
40 if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave)))
41 goto nla_put_failure;
42
43 if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link))
44 goto nla_put_failure;
45
46 if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
47 slave->link_failure_count))
48 goto nla_put_failure;
49
50 if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR,
51 slave_dev->addr_len, slave->perm_hwaddr))
52 goto nla_put_failure;
53
54 if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
55 goto nla_put_failure;
56
57 if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
58 const struct aggregator *agg;
59
60 agg = SLAVE_AD_INFO(slave)->port.aggregator;
61 if (agg)
62 if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
63 agg->aggregator_identifier))
64 goto nla_put_failure;
65 }
66
67 return 0;
68
69 nla_put_failure:
70 return -EMSGSIZE;
71 }
72
73 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
74 [IFLA_BOND_MODE] = { .type = NLA_U8 },
75 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
76 [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
77 [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
78 [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
79 [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
80 [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
81 [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
82 [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
83 [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
84 [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
85 [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
86 [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
87 [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
88 [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
89 [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
90 [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
91 [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
92 [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
93 [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
94 [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
95 [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
96 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
97 };
98
99 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
100 {
101 if (tb[IFLA_ADDRESS]) {
102 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
103 return -EINVAL;
104 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
105 return -EADDRNOTAVAIL;
106 }
107 return 0;
108 }
109
110 static int bond_changelink(struct net_device *bond_dev,
111 struct nlattr *tb[], struct nlattr *data[])
112 {
113 struct bonding *bond = netdev_priv(bond_dev);
114 struct bond_opt_value newval;
115 int miimon = 0;
116 int err;
117
118 if (!data)
119 return 0;
120
121 if (data[IFLA_BOND_MODE]) {
122 int mode = nla_get_u8(data[IFLA_BOND_MODE]);
123
124 bond_opt_initval(&newval, mode);
125 err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
126 if (err)
127 return err;
128 }
129 if (data[IFLA_BOND_ACTIVE_SLAVE]) {
130 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
131 struct net_device *slave_dev;
132 char *active_slave = "";
133
134 if (ifindex != 0) {
135 slave_dev = __dev_get_by_index(dev_net(bond_dev),
136 ifindex);
137 if (!slave_dev)
138 return -ENODEV;
139 active_slave = slave_dev->name;
140 }
141 bond_opt_initstr(&newval, active_slave);
142 err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
143 if (err)
144 return err;
145 }
146 if (data[IFLA_BOND_MIIMON]) {
147 miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
148
149 bond_opt_initval(&newval, miimon);
150 err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
151 if (err)
152 return err;
153 }
154 if (data[IFLA_BOND_UPDELAY]) {
155 int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
156
157 bond_opt_initval(&newval, updelay);
158 err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
159 if (err)
160 return err;
161 }
162 if (data[IFLA_BOND_DOWNDELAY]) {
163 int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
164
165 bond_opt_initval(&newval, downdelay);
166 err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
167 if (err)
168 return err;
169 }
170 if (data[IFLA_BOND_USE_CARRIER]) {
171 int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
172
173 bond_opt_initval(&newval, use_carrier);
174 err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
175 if (err)
176 return err;
177 }
178 if (data[IFLA_BOND_ARP_INTERVAL]) {
179 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
180
181 if (arp_interval && miimon) {
182 netdev_err(bond->dev, "ARP monitoring cannot be used with MII monitoring\n");
183 return -EINVAL;
184 }
185
186 bond_opt_initval(&newval, arp_interval);
187 err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
188 if (err)
189 return err;
190 }
191 if (data[IFLA_BOND_ARP_IP_TARGET]) {
192 struct nlattr *attr;
193 int i = 0, rem;
194
195 bond_option_arp_ip_targets_clear(bond);
196 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
197 __be32 target = nla_get_be32(attr);
198
199 bond_opt_initval(&newval, (__force u64)target);
200 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
201 &newval);
202 if (err)
203 break;
204 i++;
205 }
206 if (i == 0 && bond->params.arp_interval)
207 netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n");
208 if (err)
209 return err;
210 }
211 if (data[IFLA_BOND_ARP_VALIDATE]) {
212 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
213
214 if (arp_validate && miimon) {
215 netdev_err(bond->dev, "ARP validating cannot be used with MII monitoring\n");
216 return -EINVAL;
217 }
218
219 bond_opt_initval(&newval, arp_validate);
220 err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
221 if (err)
222 return err;
223 }
224 if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
225 int arp_all_targets =
226 nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
227
228 bond_opt_initval(&newval, arp_all_targets);
229 err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
230 if (err)
231 return err;
232 }
233 if (data[IFLA_BOND_PRIMARY]) {
234 int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
235 struct net_device *dev;
236 char *primary = "";
237
238 dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
239 if (dev)
240 primary = dev->name;
241
242 bond_opt_initstr(&newval, primary);
243 err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
244 if (err)
245 return err;
246 }
247 if (data[IFLA_BOND_PRIMARY_RESELECT]) {
248 int primary_reselect =
249 nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
250
251 bond_opt_initval(&newval, primary_reselect);
252 err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
253 if (err)
254 return err;
255 }
256 if (data[IFLA_BOND_FAIL_OVER_MAC]) {
257 int fail_over_mac =
258 nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
259
260 bond_opt_initval(&newval, fail_over_mac);
261 err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
262 if (err)
263 return err;
264 }
265 if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
266 int xmit_hash_policy =
267 nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
268
269 bond_opt_initval(&newval, xmit_hash_policy);
270 err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
271 if (err)
272 return err;
273 }
274 if (data[IFLA_BOND_RESEND_IGMP]) {
275 int resend_igmp =
276 nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
277
278 bond_opt_initval(&newval, resend_igmp);
279 err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
280 if (err)
281 return err;
282 }
283 if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
284 int num_peer_notif =
285 nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
286
287 bond_opt_initval(&newval, num_peer_notif);
288 err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
289 if (err)
290 return err;
291 }
292 if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
293 int all_slaves_active =
294 nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
295
296 bond_opt_initval(&newval, all_slaves_active);
297 err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
298 if (err)
299 return err;
300 }
301 if (data[IFLA_BOND_MIN_LINKS]) {
302 int min_links =
303 nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
304
305 bond_opt_initval(&newval, min_links);
306 err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
307 if (err)
308 return err;
309 }
310 if (data[IFLA_BOND_LP_INTERVAL]) {
311 int lp_interval =
312 nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
313
314 bond_opt_initval(&newval, lp_interval);
315 err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
316 if (err)
317 return err;
318 }
319 if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
320 int packets_per_slave =
321 nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
322
323 bond_opt_initval(&newval, packets_per_slave);
324 err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
325 if (err)
326 return err;
327 }
328 if (data[IFLA_BOND_AD_LACP_RATE]) {
329 int lacp_rate =
330 nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
331
332 bond_opt_initval(&newval, lacp_rate);
333 err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
334 if (err)
335 return err;
336 }
337 if (data[IFLA_BOND_AD_SELECT]) {
338 int ad_select =
339 nla_get_u8(data[IFLA_BOND_AD_SELECT]);
340
341 bond_opt_initval(&newval, ad_select);
342 err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
343 if (err)
344 return err;
345 }
346 return 0;
347 }
348
349 static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
350 struct nlattr *tb[], struct nlattr *data[])
351 {
352 int err;
353
354 err = bond_changelink(bond_dev, tb, data);
355 if (err < 0)
356 return err;
357
358 return register_netdevice(bond_dev);
359 }
360
361 static size_t bond_get_size(const struct net_device *bond_dev)
362 {
363 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
364 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
365 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
366 nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
367 nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
368 nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
369 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
370 /* IFLA_BOND_ARP_IP_TARGET */
371 nla_total_size(sizeof(struct nlattr)) +
372 nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
373 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
374 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
375 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
376 nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
377 nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
378 nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
379 nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
380 nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
381 nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
382 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
383 nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
384 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
385 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
386 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
387 nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
388 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
389 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
390 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
391 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
392 nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
393 0;
394 }
395
396 static int bond_option_active_slave_get_ifindex(struct bonding *bond)
397 {
398 const struct net_device *slave;
399 int ifindex;
400
401 rcu_read_lock();
402 slave = bond_option_active_slave_get_rcu(bond);
403 ifindex = slave ? slave->ifindex : 0;
404 rcu_read_unlock();
405 return ifindex;
406 }
407
408 static int bond_fill_info(struct sk_buff *skb,
409 const struct net_device *bond_dev)
410 {
411 struct bonding *bond = netdev_priv(bond_dev);
412 unsigned int packets_per_slave;
413 int ifindex, i, targets_added;
414 struct nlattr *targets;
415
416 if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
417 goto nla_put_failure;
418
419 ifindex = bond_option_active_slave_get_ifindex(bond);
420 if (ifindex && nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, ifindex))
421 goto nla_put_failure;
422
423 if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
424 goto nla_put_failure;
425
426 if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
427 bond->params.updelay * bond->params.miimon))
428 goto nla_put_failure;
429
430 if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
431 bond->params.downdelay * bond->params.miimon))
432 goto nla_put_failure;
433
434 if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
435 goto nla_put_failure;
436
437 if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
438 goto nla_put_failure;
439
440 targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
441 if (!targets)
442 goto nla_put_failure;
443
444 targets_added = 0;
445 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
446 if (bond->params.arp_targets[i]) {
447 nla_put_be32(skb, i, bond->params.arp_targets[i]);
448 targets_added = 1;
449 }
450 }
451
452 if (targets_added)
453 nla_nest_end(skb, targets);
454 else
455 nla_nest_cancel(skb, targets);
456
457 if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
458 goto nla_put_failure;
459
460 if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
461 bond->params.arp_all_targets))
462 goto nla_put_failure;
463
464 if (bond->primary_slave &&
465 nla_put_u32(skb, IFLA_BOND_PRIMARY,
466 bond->primary_slave->dev->ifindex))
467 goto nla_put_failure;
468
469 if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
470 bond->params.primary_reselect))
471 goto nla_put_failure;
472
473 if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
474 bond->params.fail_over_mac))
475 goto nla_put_failure;
476
477 if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
478 bond->params.xmit_policy))
479 goto nla_put_failure;
480
481 if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
482 bond->params.resend_igmp))
483 goto nla_put_failure;
484
485 if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
486 bond->params.num_peer_notif))
487 goto nla_put_failure;
488
489 if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
490 bond->params.all_slaves_active))
491 goto nla_put_failure;
492
493 if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
494 bond->params.min_links))
495 goto nla_put_failure;
496
497 if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
498 bond->params.lp_interval))
499 goto nla_put_failure;
500
501 packets_per_slave = bond->params.packets_per_slave;
502 if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
503 packets_per_slave))
504 goto nla_put_failure;
505
506 if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
507 bond->params.lacp_fast))
508 goto nla_put_failure;
509
510 if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
511 bond->params.ad_select))
512 goto nla_put_failure;
513
514 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
515 struct ad_info info;
516
517 if (!bond_3ad_get_active_agg_info(bond, &info)) {
518 struct nlattr *nest;
519
520 nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
521 if (!nest)
522 goto nla_put_failure;
523
524 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
525 info.aggregator_id))
526 goto nla_put_failure;
527 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
528 info.ports))
529 goto nla_put_failure;
530 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
531 info.actor_key))
532 goto nla_put_failure;
533 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
534 info.partner_key))
535 goto nla_put_failure;
536 if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
537 sizeof(info.partner_system),
538 &info.partner_system))
539 goto nla_put_failure;
540
541 nla_nest_end(skb, nest);
542 }
543 }
544
545 return 0;
546
547 nla_put_failure:
548 return -EMSGSIZE;
549 }
550
551 struct rtnl_link_ops bond_link_ops __read_mostly = {
552 .kind = "bond",
553 .priv_size = sizeof(struct bonding),
554 .setup = bond_setup,
555 .maxtype = IFLA_BOND_MAX,
556 .policy = bond_policy,
557 .validate = bond_validate,
558 .newlink = bond_newlink,
559 .changelink = bond_changelink,
560 .get_size = bond_get_size,
561 .fill_info = bond_fill_info,
562 .get_num_tx_queues = bond_get_num_tx_queues,
563 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
564 as for TX queues */
565 .get_slave_size = bond_get_slave_size,
566 .fill_slave_info = bond_fill_slave_info,
567 };
568
569 int __init bond_netlink_init(void)
570 {
571 return rtnl_link_register(&bond_link_ops);
572 }
573
574 void bond_netlink_fini(void)
575 {
576 rtnl_link_unregister(&bond_link_ops);
577 }
578
579 MODULE_ALIAS_RTNL_LINK("bond");
This page took 0.069292 seconds and 4 git commands to generate.