bonding: get rid of bond_option_active_slave_get()
[deliverable/linux.git] / drivers / net / bonding / bond_netlink.c
1 /*
2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/if_link.h>
19 #include <linux/if_ether.h>
20 #include <net/netlink.h>
21 #include <net/rtnetlink.h>
22 #include "bonding.h"
23
24 static size_t bond_get_slave_size(const struct net_device *bond_dev,
25 const struct net_device *slave_dev)
26 {
27 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_STATE */
28 nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_MII_STATUS */
29 nla_total_size(sizeof(u32)) + /* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */
30 nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */
31 nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */
32 nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
33 0;
34 }
35
36 static int bond_fill_slave_info(struct sk_buff *skb,
37 const struct net_device *bond_dev,
38 const struct net_device *slave_dev)
39 {
40 struct slave *slave = bond_slave_get_rtnl(slave_dev);
41
42 if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave)))
43 goto nla_put_failure;
44
45 if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link))
46 goto nla_put_failure;
47
48 if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
49 slave->link_failure_count))
50 goto nla_put_failure;
51
52 if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR,
53 slave_dev->addr_len, slave->perm_hwaddr))
54 goto nla_put_failure;
55
56 if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
57 goto nla_put_failure;
58
59 if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
60 const struct aggregator *agg;
61
62 agg = SLAVE_AD_INFO(slave)->port.aggregator;
63 if (agg)
64 if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
65 agg->aggregator_identifier))
66 goto nla_put_failure;
67 }
68
69 return 0;
70
71 nla_put_failure:
72 return -EMSGSIZE;
73 }
74
75 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
76 [IFLA_BOND_MODE] = { .type = NLA_U8 },
77 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
78 [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
79 [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
80 [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
81 [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
82 [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
83 [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
84 [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
85 [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
86 [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
87 [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
88 [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
89 [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
90 [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
91 [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
92 [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
93 [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
94 [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
95 [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
96 [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
97 [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
98 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
99 };
100
101 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
102 {
103 if (tb[IFLA_ADDRESS]) {
104 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
105 return -EINVAL;
106 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
107 return -EADDRNOTAVAIL;
108 }
109 return 0;
110 }
111
112 static int bond_changelink(struct net_device *bond_dev,
113 struct nlattr *tb[], struct nlattr *data[])
114 {
115 struct bonding *bond = netdev_priv(bond_dev);
116 struct bond_opt_value newval;
117 int miimon = 0;
118 int err;
119
120 if (!data)
121 return 0;
122
123 if (data[IFLA_BOND_MODE]) {
124 int mode = nla_get_u8(data[IFLA_BOND_MODE]);
125
126 bond_opt_initval(&newval, mode);
127 err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
128 if (err)
129 return err;
130 }
131 if (data[IFLA_BOND_ACTIVE_SLAVE]) {
132 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
133 struct net_device *slave_dev;
134 char *active_slave = "";
135
136 if (ifindex != 0) {
137 slave_dev = __dev_get_by_index(dev_net(bond_dev),
138 ifindex);
139 if (!slave_dev)
140 return -ENODEV;
141 active_slave = slave_dev->name;
142 }
143 bond_opt_initstr(&newval, active_slave);
144 err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
145 if (err)
146 return err;
147 }
148 if (data[IFLA_BOND_MIIMON]) {
149 miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
150
151 bond_opt_initval(&newval, miimon);
152 err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
153 if (err)
154 return err;
155 }
156 if (data[IFLA_BOND_UPDELAY]) {
157 int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
158
159 bond_opt_initval(&newval, updelay);
160 err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
161 if (err)
162 return err;
163 }
164 if (data[IFLA_BOND_DOWNDELAY]) {
165 int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
166
167 bond_opt_initval(&newval, downdelay);
168 err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
169 if (err)
170 return err;
171 }
172 if (data[IFLA_BOND_USE_CARRIER]) {
173 int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
174
175 bond_opt_initval(&newval, use_carrier);
176 err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
177 if (err)
178 return err;
179 }
180 if (data[IFLA_BOND_ARP_INTERVAL]) {
181 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
182
183 if (arp_interval && miimon) {
184 pr_err("%s: ARP monitoring cannot be used with MII monitoring\n",
185 bond->dev->name);
186 return -EINVAL;
187 }
188
189 bond_opt_initval(&newval, arp_interval);
190 err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
191 if (err)
192 return err;
193 }
194 if (data[IFLA_BOND_ARP_IP_TARGET]) {
195 struct nlattr *attr;
196 int i = 0, rem;
197
198 bond_option_arp_ip_targets_clear(bond);
199 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
200 __be32 target = nla_get_be32(attr);
201
202 bond_opt_initval(&newval, (__force u64)target);
203 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
204 &newval);
205 if (err)
206 break;
207 i++;
208 }
209 if (i == 0 && bond->params.arp_interval)
210 pr_warn("%s: Removing last arp target with arp_interval on\n",
211 bond->dev->name);
212 if (err)
213 return err;
214 }
215 if (data[IFLA_BOND_ARP_VALIDATE]) {
216 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
217
218 if (arp_validate && miimon) {
219 pr_err("%s: ARP validating cannot be used with MII monitoring\n",
220 bond->dev->name);
221 return -EINVAL;
222 }
223
224 bond_opt_initval(&newval, arp_validate);
225 err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
226 if (err)
227 return err;
228 }
229 if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
230 int arp_all_targets =
231 nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
232
233 bond_opt_initval(&newval, arp_all_targets);
234 err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
235 if (err)
236 return err;
237 }
238 if (data[IFLA_BOND_PRIMARY]) {
239 int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
240 struct net_device *dev;
241 char *primary = "";
242
243 dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
244 if (dev)
245 primary = dev->name;
246
247 bond_opt_initstr(&newval, primary);
248 err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
249 if (err)
250 return err;
251 }
252 if (data[IFLA_BOND_PRIMARY_RESELECT]) {
253 int primary_reselect =
254 nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
255
256 bond_opt_initval(&newval, primary_reselect);
257 err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
258 if (err)
259 return err;
260 }
261 if (data[IFLA_BOND_FAIL_OVER_MAC]) {
262 int fail_over_mac =
263 nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
264
265 bond_opt_initval(&newval, fail_over_mac);
266 err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
267 if (err)
268 return err;
269 }
270 if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
271 int xmit_hash_policy =
272 nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
273
274 bond_opt_initval(&newval, xmit_hash_policy);
275 err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
276 if (err)
277 return err;
278 }
279 if (data[IFLA_BOND_RESEND_IGMP]) {
280 int resend_igmp =
281 nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
282
283 bond_opt_initval(&newval, resend_igmp);
284 err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
285 if (err)
286 return err;
287 }
288 if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
289 int num_peer_notif =
290 nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
291
292 bond_opt_initval(&newval, num_peer_notif);
293 err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
294 if (err)
295 return err;
296 }
297 if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
298 int all_slaves_active =
299 nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
300
301 bond_opt_initval(&newval, all_slaves_active);
302 err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
303 if (err)
304 return err;
305 }
306 if (data[IFLA_BOND_MIN_LINKS]) {
307 int min_links =
308 nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
309
310 bond_opt_initval(&newval, min_links);
311 err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
312 if (err)
313 return err;
314 }
315 if (data[IFLA_BOND_LP_INTERVAL]) {
316 int lp_interval =
317 nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
318
319 bond_opt_initval(&newval, lp_interval);
320 err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
321 if (err)
322 return err;
323 }
324 if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
325 int packets_per_slave =
326 nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
327
328 bond_opt_initval(&newval, packets_per_slave);
329 err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
330 if (err)
331 return err;
332 }
333 if (data[IFLA_BOND_AD_LACP_RATE]) {
334 int lacp_rate =
335 nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
336
337 bond_opt_initval(&newval, lacp_rate);
338 err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
339 if (err)
340 return err;
341 }
342 if (data[IFLA_BOND_AD_SELECT]) {
343 int ad_select =
344 nla_get_u8(data[IFLA_BOND_AD_SELECT]);
345
346 bond_opt_initval(&newval, ad_select);
347 err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
348 if (err)
349 return err;
350 }
351 return 0;
352 }
353
354 static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
355 struct nlattr *tb[], struct nlattr *data[])
356 {
357 int err;
358
359 err = bond_changelink(bond_dev, tb, data);
360 if (err < 0)
361 return err;
362
363 return register_netdevice(bond_dev);
364 }
365
366 static size_t bond_get_size(const struct net_device *bond_dev)
367 {
368 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
369 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
370 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
371 nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
372 nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
373 nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
374 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
375 /* IFLA_BOND_ARP_IP_TARGET */
376 nla_total_size(sizeof(struct nlattr)) +
377 nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
378 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
379 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
380 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
381 nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
382 nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
383 nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
384 nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
385 nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
386 nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
387 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
388 nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
389 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
390 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
391 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
392 nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
393 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
394 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
395 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
396 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
397 nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
398 0;
399 }
400
401 static int bond_option_active_slave_get_ifindex(struct bonding *bond)
402 {
403 const struct net_device *slave;
404 int ifindex;
405
406 rcu_read_lock();
407 slave = bond_option_active_slave_get_rcu(bond);
408 ifindex = slave ? slave->ifindex : 0;
409 rcu_read_unlock();
410 return ifindex;
411 }
412
413 static int bond_fill_info(struct sk_buff *skb,
414 const struct net_device *bond_dev)
415 {
416 struct bonding *bond = netdev_priv(bond_dev);
417 unsigned int packets_per_slave;
418 int ifindex, i, targets_added;
419 struct nlattr *targets;
420
421 if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
422 goto nla_put_failure;
423
424 ifindex = bond_option_active_slave_get_ifindex(bond);
425 if (ifindex && nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, ifindex))
426 goto nla_put_failure;
427
428 if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
429 goto nla_put_failure;
430
431 if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
432 bond->params.updelay * bond->params.miimon))
433 goto nla_put_failure;
434
435 if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
436 bond->params.downdelay * bond->params.miimon))
437 goto nla_put_failure;
438
439 if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
440 goto nla_put_failure;
441
442 if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
443 goto nla_put_failure;
444
445 targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
446 if (!targets)
447 goto nla_put_failure;
448
449 targets_added = 0;
450 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
451 if (bond->params.arp_targets[i]) {
452 nla_put_be32(skb, i, bond->params.arp_targets[i]);
453 targets_added = 1;
454 }
455 }
456
457 if (targets_added)
458 nla_nest_end(skb, targets);
459 else
460 nla_nest_cancel(skb, targets);
461
462 if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
463 goto nla_put_failure;
464
465 if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
466 bond->params.arp_all_targets))
467 goto nla_put_failure;
468
469 if (bond->primary_slave &&
470 nla_put_u32(skb, IFLA_BOND_PRIMARY,
471 bond->primary_slave->dev->ifindex))
472 goto nla_put_failure;
473
474 if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
475 bond->params.primary_reselect))
476 goto nla_put_failure;
477
478 if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
479 bond->params.fail_over_mac))
480 goto nla_put_failure;
481
482 if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
483 bond->params.xmit_policy))
484 goto nla_put_failure;
485
486 if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
487 bond->params.resend_igmp))
488 goto nla_put_failure;
489
490 if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
491 bond->params.num_peer_notif))
492 goto nla_put_failure;
493
494 if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
495 bond->params.all_slaves_active))
496 goto nla_put_failure;
497
498 if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
499 bond->params.min_links))
500 goto nla_put_failure;
501
502 if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
503 bond->params.lp_interval))
504 goto nla_put_failure;
505
506 packets_per_slave = bond->params.packets_per_slave;
507 if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
508 packets_per_slave))
509 goto nla_put_failure;
510
511 if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
512 bond->params.lacp_fast))
513 goto nla_put_failure;
514
515 if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
516 bond->params.ad_select))
517 goto nla_put_failure;
518
519 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
520 struct ad_info info;
521
522 if (!bond_3ad_get_active_agg_info(bond, &info)) {
523 struct nlattr *nest;
524
525 nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
526 if (!nest)
527 goto nla_put_failure;
528
529 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
530 info.aggregator_id))
531 goto nla_put_failure;
532 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
533 info.ports))
534 goto nla_put_failure;
535 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
536 info.actor_key))
537 goto nla_put_failure;
538 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
539 info.partner_key))
540 goto nla_put_failure;
541 if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
542 sizeof(info.partner_system),
543 &info.partner_system))
544 goto nla_put_failure;
545
546 nla_nest_end(skb, nest);
547 }
548 }
549
550 return 0;
551
552 nla_put_failure:
553 return -EMSGSIZE;
554 }
555
556 struct rtnl_link_ops bond_link_ops __read_mostly = {
557 .kind = "bond",
558 .priv_size = sizeof(struct bonding),
559 .setup = bond_setup,
560 .maxtype = IFLA_BOND_MAX,
561 .policy = bond_policy,
562 .validate = bond_validate,
563 .newlink = bond_newlink,
564 .changelink = bond_changelink,
565 .get_size = bond_get_size,
566 .fill_info = bond_fill_info,
567 .get_num_tx_queues = bond_get_num_tx_queues,
568 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
569 as for TX queues */
570 .get_slave_size = bond_get_slave_size,
571 .fill_slave_info = bond_fill_slave_info,
572 };
573
574 int __init bond_netlink_init(void)
575 {
576 return rtnl_link_register(&bond_link_ops);
577 }
578
579 void bond_netlink_fini(void)
580 {
581 rtnl_link_unregister(&bond_link_ops);
582 }
583
584 MODULE_ALIAS_RTNL_LINK("bond");
This page took 0.045533 seconds and 5 git commands to generate.