Merge tag 'armsoc-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[deliverable/linux.git] / include / net / dst.h
1 /*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8 #ifndef _NET_DST_H
9 #define _NET_DST_H
10
11 #include <net/dst_ops.h>
12 #include <linux/netdevice.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/rcupdate.h>
15 #include <linux/bug.h>
16 #include <linux/jiffies.h>
17 #include <net/neighbour.h>
18 #include <asm/processor.h>
19
20 #define DST_GC_MIN (HZ/10)
21 #define DST_GC_INC (HZ/2)
22 #define DST_GC_MAX (120*HZ)
23
24 /* Each dst_entry has reference count and sits in some parent list(s).
25 * When it is removed from parent list, it is "freed" (dst_free).
26 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
27 * is zero, it can be destroyed immediately, otherwise it is added
28 * to gc list and garbage collector periodically checks the refcnt.
29 */
30
31 struct sk_buff;
32
33 struct dst_entry {
34 struct rcu_head rcu_head;
35 struct dst_entry *child;
36 struct net_device *dev;
37 struct dst_ops *ops;
38 unsigned long _metrics;
39 unsigned long expires;
40 struct dst_entry *path;
41 struct dst_entry *from;
42 #ifdef CONFIG_XFRM
43 struct xfrm_state *xfrm;
44 #else
45 void *__pad1;
46 #endif
47 int (*input)(struct sk_buff *);
48 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
49
50 unsigned short flags;
51 #define DST_HOST 0x0001
52 #define DST_NOXFRM 0x0002
53 #define DST_NOPOLICY 0x0004
54 #define DST_NOHASH 0x0008
55 #define DST_NOCACHE 0x0010
56 #define DST_NOCOUNT 0x0020
57 #define DST_FAKE_RTABLE 0x0040
58 #define DST_XFRM_TUNNEL 0x0080
59 #define DST_XFRM_QUEUE 0x0100
60 #define DST_METADATA 0x0200
61
62 unsigned short pending_confirm;
63
64 short error;
65
66 /* A non-zero value of dst->obsolete forces by-hand validation
67 * of the route entry. Positive values are set by the generic
68 * dst layer to indicate that the entry has been forcefully
69 * destroyed.
70 *
71 * Negative values are used by the implementation layer code to
72 * force invocation of the dst_ops->check() method.
73 */
74 short obsolete;
75 #define DST_OBSOLETE_NONE 0
76 #define DST_OBSOLETE_DEAD 2
77 #define DST_OBSOLETE_FORCE_CHK -1
78 #define DST_OBSOLETE_KILL -2
79 unsigned short header_len; /* more space at head required */
80 unsigned short trailer_len; /* space to reserve at tail */
81 #ifdef CONFIG_IP_ROUTE_CLASSID
82 __u32 tclassid;
83 #else
84 __u32 __pad2;
85 #endif
86
87 #ifdef CONFIG_64BIT
88 struct lwtunnel_state *lwtstate;
89 /*
90 * Align __refcnt to a 64 bytes alignment
91 * (L1_CACHE_SIZE would be too much)
92 */
93 long __pad_to_align_refcnt[1];
94 #endif
95 /*
96 * __refcnt wants to be on a different cache line from
97 * input/output/ops or performance tanks badly
98 */
99 atomic_t __refcnt; /* client references */
100 int __use;
101 unsigned long lastuse;
102 #ifndef CONFIG_64BIT
103 struct lwtunnel_state *lwtstate;
104 #endif
105 union {
106 struct dst_entry *next;
107 struct rtable __rcu *rt_next;
108 struct rt6_info *rt6_next;
109 struct dn_route __rcu *dn_next;
110 };
111 };
112
113 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
114 extern const u32 dst_default_metrics[];
115
116 #define DST_METRICS_READ_ONLY 0x1UL
117 #define DST_METRICS_FLAGS 0x3UL
118 #define __DST_METRICS_PTR(Y) \
119 ((u32 *)((Y) & ~DST_METRICS_FLAGS))
120 #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
121
122 static inline bool dst_metrics_read_only(const struct dst_entry *dst)
123 {
124 return dst->_metrics & DST_METRICS_READ_ONLY;
125 }
126
127 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
128
129 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
130 {
131 unsigned long val = dst->_metrics;
132 if (!(val & DST_METRICS_READ_ONLY))
133 __dst_destroy_metrics_generic(dst, val);
134 }
135
136 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
137 {
138 unsigned long p = dst->_metrics;
139
140 BUG_ON(!p);
141
142 if (p & DST_METRICS_READ_ONLY)
143 return dst->ops->cow_metrics(dst, p);
144 return __DST_METRICS_PTR(p);
145 }
146
147 /* This may only be invoked before the entry has reached global
148 * visibility.
149 */
150 static inline void dst_init_metrics(struct dst_entry *dst,
151 const u32 *src_metrics,
152 bool read_only)
153 {
154 dst->_metrics = ((unsigned long) src_metrics) |
155 (read_only ? DST_METRICS_READ_ONLY : 0);
156 }
157
158 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
159 {
160 u32 *dst_metrics = dst_metrics_write_ptr(dest);
161
162 if (dst_metrics) {
163 u32 *src_metrics = DST_METRICS_PTR(src);
164
165 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
166 }
167 }
168
169 static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
170 {
171 return DST_METRICS_PTR(dst);
172 }
173
174 static inline u32
175 dst_metric_raw(const struct dst_entry *dst, const int metric)
176 {
177 u32 *p = DST_METRICS_PTR(dst);
178
179 return p[metric-1];
180 }
181
182 static inline u32
183 dst_metric(const struct dst_entry *dst, const int metric)
184 {
185 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
186 metric == RTAX_ADVMSS ||
187 metric == RTAX_MTU);
188 return dst_metric_raw(dst, metric);
189 }
190
191 static inline u32
192 dst_metric_advmss(const struct dst_entry *dst)
193 {
194 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
195
196 if (!advmss)
197 advmss = dst->ops->default_advmss(dst);
198
199 return advmss;
200 }
201
202 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
203 {
204 u32 *p = dst_metrics_write_ptr(dst);
205
206 if (p)
207 p[metric-1] = val;
208 }
209
210 /* Kernel-internal feature bits that are unallocated in user space. */
211 #define DST_FEATURE_ECN_CA (1 << 31)
212
213 #define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
214 #define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
215
216 static inline u32
217 dst_feature(const struct dst_entry *dst, u32 feature)
218 {
219 return dst_metric(dst, RTAX_FEATURES) & feature;
220 }
221
222 static inline u32 dst_mtu(const struct dst_entry *dst)
223 {
224 return dst->ops->mtu(dst);
225 }
226
227 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
228 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
229 {
230 return msecs_to_jiffies(dst_metric(dst, metric));
231 }
232
233 static inline u32
234 dst_allfrag(const struct dst_entry *dst)
235 {
236 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
237 return ret;
238 }
239
240 static inline int
241 dst_metric_locked(const struct dst_entry *dst, int metric)
242 {
243 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
244 }
245
246 static inline void dst_hold(struct dst_entry *dst)
247 {
248 /*
249 * If your kernel compilation stops here, please check
250 * __pad_to_align_refcnt declaration in struct dst_entry
251 */
252 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
253 atomic_inc(&dst->__refcnt);
254 }
255
256 static inline void dst_use(struct dst_entry *dst, unsigned long time)
257 {
258 dst_hold(dst);
259 dst->__use++;
260 dst->lastuse = time;
261 }
262
263 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
264 {
265 dst->__use++;
266 dst->lastuse = time;
267 }
268
269 static inline struct dst_entry *dst_clone(struct dst_entry *dst)
270 {
271 if (dst)
272 atomic_inc(&dst->__refcnt);
273 return dst;
274 }
275
276 void dst_release(struct dst_entry *dst);
277
278 static inline void refdst_drop(unsigned long refdst)
279 {
280 if (!(refdst & SKB_DST_NOREF))
281 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
282 }
283
284 /**
285 * skb_dst_drop - drops skb dst
286 * @skb: buffer
287 *
288 * Drops dst reference count if a reference was taken.
289 */
290 static inline void skb_dst_drop(struct sk_buff *skb)
291 {
292 if (skb->_skb_refdst) {
293 refdst_drop(skb->_skb_refdst);
294 skb->_skb_refdst = 0UL;
295 }
296 }
297
298 static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
299 {
300 nskb->_skb_refdst = refdst;
301 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
302 dst_clone(skb_dst(nskb));
303 }
304
305 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
306 {
307 __skb_dst_copy(nskb, oskb->_skb_refdst);
308 }
309
310 /**
311 * skb_dst_force - makes sure skb dst is refcounted
312 * @skb: buffer
313 *
314 * If dst is not yet refcounted, let's do it
315 */
316 static inline void skb_dst_force(struct sk_buff *skb)
317 {
318 if (skb_dst_is_noref(skb)) {
319 WARN_ON(!rcu_read_lock_held());
320 skb->_skb_refdst &= ~SKB_DST_NOREF;
321 dst_clone(skb_dst(skb));
322 }
323 }
324
325 /**
326 * dst_hold_safe - Take a reference on a dst if possible
327 * @dst: pointer to dst entry
328 *
329 * This helper returns false if it could not safely
330 * take a reference on a dst.
331 */
332 static inline bool dst_hold_safe(struct dst_entry *dst)
333 {
334 if (dst->flags & DST_NOCACHE)
335 return atomic_inc_not_zero(&dst->__refcnt);
336 dst_hold(dst);
337 return true;
338 }
339
340 /**
341 * skb_dst_force_safe - makes sure skb dst is refcounted
342 * @skb: buffer
343 *
344 * If dst is not yet refcounted and not destroyed, grab a ref on it.
345 */
346 static inline void skb_dst_force_safe(struct sk_buff *skb)
347 {
348 if (skb_dst_is_noref(skb)) {
349 struct dst_entry *dst = skb_dst(skb);
350
351 if (!dst_hold_safe(dst))
352 dst = NULL;
353
354 skb->_skb_refdst = (unsigned long)dst;
355 }
356 }
357
358
359 /**
360 * __skb_tunnel_rx - prepare skb for rx reinsert
361 * @skb: buffer
362 * @dev: tunnel device
363 * @net: netns for packet i/o
364 *
365 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
366 * so make some cleanups. (no accounting done)
367 */
368 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
369 struct net *net)
370 {
371 skb->dev = dev;
372
373 /*
374 * Clear hash so that we can recalulate the hash for the
375 * encapsulated packet, unless we have already determine the hash
376 * over the L4 4-tuple.
377 */
378 skb_clear_hash_if_not_l4(skb);
379 skb_set_queue_mapping(skb, 0);
380 skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
381 }
382
383 /**
384 * skb_tunnel_rx - prepare skb for rx reinsert
385 * @skb: buffer
386 * @dev: tunnel device
387 *
388 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
389 * so make some cleanups, and perform accounting.
390 * Note: this accounting is not SMP safe.
391 */
392 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
393 struct net *net)
394 {
395 /* TODO : stats should be SMP safe */
396 dev->stats.rx_packets++;
397 dev->stats.rx_bytes += skb->len;
398 __skb_tunnel_rx(skb, dev, net);
399 }
400
401 static inline u32 dst_tclassid(const struct sk_buff *skb)
402 {
403 #ifdef CONFIG_IP_ROUTE_CLASSID
404 const struct dst_entry *dst;
405
406 dst = skb_dst(skb);
407 if (dst)
408 return dst->tclassid;
409 #endif
410 return 0;
411 }
412
413 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
414 static inline int dst_discard(struct sk_buff *skb)
415 {
416 return dst_discard_out(&init_net, skb->sk, skb);
417 }
418 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
419 int initial_obsolete, unsigned short flags);
420 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
421 struct net_device *dev, int initial_ref, int initial_obsolete,
422 unsigned short flags);
423 void __dst_free(struct dst_entry *dst);
424 struct dst_entry *dst_destroy(struct dst_entry *dst);
425
426 static inline void dst_free(struct dst_entry *dst)
427 {
428 if (dst->obsolete > 0)
429 return;
430 if (!atomic_read(&dst->__refcnt)) {
431 dst = dst_destroy(dst);
432 if (!dst)
433 return;
434 }
435 __dst_free(dst);
436 }
437
438 static inline void dst_rcu_free(struct rcu_head *head)
439 {
440 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
441 dst_free(dst);
442 }
443
444 static inline void dst_confirm(struct dst_entry *dst)
445 {
446 dst->pending_confirm = 1;
447 }
448
449 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
450 struct sk_buff *skb)
451 {
452 const struct hh_cache *hh;
453
454 if (dst->pending_confirm) {
455 unsigned long now = jiffies;
456
457 dst->pending_confirm = 0;
458 /* avoid dirtying neighbour */
459 if (n->confirmed != now)
460 n->confirmed = now;
461 }
462
463 hh = &n->hh;
464 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
465 return neigh_hh_output(hh, skb);
466 else
467 return n->output(n, skb);
468 }
469
470 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
471 {
472 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
473 return IS_ERR(n) ? NULL : n;
474 }
475
476 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
477 struct sk_buff *skb)
478 {
479 struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
480 return IS_ERR(n) ? NULL : n;
481 }
482
483 static inline void dst_link_failure(struct sk_buff *skb)
484 {
485 struct dst_entry *dst = skb_dst(skb);
486 if (dst && dst->ops && dst->ops->link_failure)
487 dst->ops->link_failure(skb);
488 }
489
490 static inline void dst_set_expires(struct dst_entry *dst, int timeout)
491 {
492 unsigned long expires = jiffies + timeout;
493
494 if (expires == 0)
495 expires = 1;
496
497 if (dst->expires == 0 || time_before(expires, dst->expires))
498 dst->expires = expires;
499 }
500
501 /* Output packet to network from transport. */
502 static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
503 {
504 return skb_dst(skb)->output(net, sk, skb);
505 }
506
507 /* Input packet from network to transport. */
508 static inline int dst_input(struct sk_buff *skb)
509 {
510 return skb_dst(skb)->input(skb);
511 }
512
513 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
514 {
515 if (dst->obsolete)
516 dst = dst->ops->check(dst, cookie);
517 return dst;
518 }
519
520 void dst_subsys_init(void);
521
522 /* Flags for xfrm_lookup flags argument. */
523 enum {
524 XFRM_LOOKUP_ICMP = 1 << 0,
525 XFRM_LOOKUP_QUEUE = 1 << 1,
526 XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
527 };
528
529 struct flowi;
530 #ifndef CONFIG_XFRM
531 static inline struct dst_entry *xfrm_lookup(struct net *net,
532 struct dst_entry *dst_orig,
533 const struct flowi *fl,
534 const struct sock *sk,
535 int flags)
536 {
537 return dst_orig;
538 }
539
540 static inline struct dst_entry *xfrm_lookup_route(struct net *net,
541 struct dst_entry *dst_orig,
542 const struct flowi *fl,
543 const struct sock *sk,
544 int flags)
545 {
546 return dst_orig;
547 }
548
549 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
550 {
551 return NULL;
552 }
553
554 #else
555 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
556 const struct flowi *fl, const struct sock *sk,
557 int flags);
558
559 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
560 const struct flowi *fl, const struct sock *sk,
561 int flags);
562
563 /* skb attached with this dst needs transformation if dst->xfrm is valid */
564 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
565 {
566 return dst->xfrm;
567 }
568 #endif
569
570 #endif /* _NET_DST_H */
This page took 0.042086 seconds and 6 git commands to generate.