Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / include / net / dst.h
1 /*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8 #ifndef _NET_DST_H
9 #define _NET_DST_H
10
11 #include <net/dst_ops.h>
12 #include <linux/netdevice.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/rcupdate.h>
15 #include <linux/bug.h>
16 #include <linux/jiffies.h>
17 #include <net/neighbour.h>
18 #include <asm/processor.h>
19
20 #define DST_GC_MIN (HZ/10)
21 #define DST_GC_INC (HZ/2)
22 #define DST_GC_MAX (120*HZ)
23
24 /* Each dst_entry has reference count and sits in some parent list(s).
25 * When it is removed from parent list, it is "freed" (dst_free).
26 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
27 * is zero, it can be destroyed immediately, otherwise it is added
28 * to gc list and garbage collector periodically checks the refcnt.
29 */
30
31 struct sk_buff;
32
33 struct dst_entry {
34 struct rcu_head rcu_head;
35 struct dst_entry *child;
36 struct net_device *dev;
37 struct dst_ops *ops;
38 unsigned long _metrics;
39 unsigned long expires;
40 struct dst_entry *path;
41 struct dst_entry *from;
42 #ifdef CONFIG_XFRM
43 struct xfrm_state *xfrm;
44 #else
45 void *__pad1;
46 #endif
47 struct lwtunnel_state *lwtstate;
48 int (*input)(struct sk_buff *);
49 int (*output)(struct sock *sk, struct sk_buff *skb);
50
51 unsigned short flags;
52 #define DST_HOST 0x0001
53 #define DST_NOXFRM 0x0002
54 #define DST_NOPOLICY 0x0004
55 #define DST_NOHASH 0x0008
56 #define DST_NOCACHE 0x0010
57 #define DST_NOCOUNT 0x0020
58 #define DST_FAKE_RTABLE 0x0040
59 #define DST_XFRM_TUNNEL 0x0080
60 #define DST_XFRM_QUEUE 0x0100
61 #define DST_METADATA 0x0200
62
63 unsigned short pending_confirm;
64
65 short error;
66
67 /* A non-zero value of dst->obsolete forces by-hand validation
68 * of the route entry. Positive values are set by the generic
69 * dst layer to indicate that the entry has been forcefully
70 * destroyed.
71 *
72 * Negative values are used by the implementation layer code to
73 * force invocation of the dst_ops->check() method.
74 */
75 short obsolete;
76 #define DST_OBSOLETE_NONE 0
77 #define DST_OBSOLETE_DEAD 2
78 #define DST_OBSOLETE_FORCE_CHK -1
79 #define DST_OBSOLETE_KILL -2
80 unsigned short header_len; /* more space at head required */
81 unsigned short trailer_len; /* space to reserve at tail */
82 #ifdef CONFIG_IP_ROUTE_CLASSID
83 __u32 tclassid;
84 #else
85 __u32 __pad2;
86 #endif
87
88 /*
89 * Align __refcnt to a 64 bytes alignment
90 * (L1_CACHE_SIZE would be too much)
91 */
92 #ifdef CONFIG_64BIT
93 long __pad_to_align_refcnt[1];
94 #endif
95 /*
96 * __refcnt wants to be on a different cache line from
97 * input/output/ops or performance tanks badly
98 */
99 atomic_t __refcnt; /* client references */
100 int __use;
101 unsigned long lastuse;
102 union {
103 struct dst_entry *next;
104 struct rtable __rcu *rt_next;
105 struct rt6_info *rt6_next;
106 struct dn_route __rcu *dn_next;
107 };
108 };
109
110 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
111 extern const u32 dst_default_metrics[];
112
113 #define DST_METRICS_READ_ONLY 0x1UL
114 #define DST_METRICS_FLAGS 0x3UL
115 #define __DST_METRICS_PTR(Y) \
116 ((u32 *)((Y) & ~DST_METRICS_FLAGS))
117 #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
118
119 static inline bool dst_metrics_read_only(const struct dst_entry *dst)
120 {
121 return dst->_metrics & DST_METRICS_READ_ONLY;
122 }
123
124 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
125
126 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
127 {
128 unsigned long val = dst->_metrics;
129 if (!(val & DST_METRICS_READ_ONLY))
130 __dst_destroy_metrics_generic(dst, val);
131 }
132
133 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
134 {
135 unsigned long p = dst->_metrics;
136
137 BUG_ON(!p);
138
139 if (p & DST_METRICS_READ_ONLY)
140 return dst->ops->cow_metrics(dst, p);
141 return __DST_METRICS_PTR(p);
142 }
143
144 /* This may only be invoked before the entry has reached global
145 * visibility.
146 */
147 static inline void dst_init_metrics(struct dst_entry *dst,
148 const u32 *src_metrics,
149 bool read_only)
150 {
151 dst->_metrics = ((unsigned long) src_metrics) |
152 (read_only ? DST_METRICS_READ_ONLY : 0);
153 }
154
155 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
156 {
157 u32 *dst_metrics = dst_metrics_write_ptr(dest);
158
159 if (dst_metrics) {
160 u32 *src_metrics = DST_METRICS_PTR(src);
161
162 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
163 }
164 }
165
166 static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
167 {
168 return DST_METRICS_PTR(dst);
169 }
170
171 static inline u32
172 dst_metric_raw(const struct dst_entry *dst, const int metric)
173 {
174 u32 *p = DST_METRICS_PTR(dst);
175
176 return p[metric-1];
177 }
178
179 static inline u32
180 dst_metric(const struct dst_entry *dst, const int metric)
181 {
182 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
183 metric == RTAX_ADVMSS ||
184 metric == RTAX_MTU);
185 return dst_metric_raw(dst, metric);
186 }
187
188 static inline u32
189 dst_metric_advmss(const struct dst_entry *dst)
190 {
191 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
192
193 if (!advmss)
194 advmss = dst->ops->default_advmss(dst);
195
196 return advmss;
197 }
198
199 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
200 {
201 u32 *p = dst_metrics_write_ptr(dst);
202
203 if (p)
204 p[metric-1] = val;
205 }
206
207 static inline u32
208 dst_feature(const struct dst_entry *dst, u32 feature)
209 {
210 return dst_metric(dst, RTAX_FEATURES) & feature;
211 }
212
213 static inline u32 dst_mtu(const struct dst_entry *dst)
214 {
215 return dst->ops->mtu(dst);
216 }
217
218 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
219 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
220 {
221 return msecs_to_jiffies(dst_metric(dst, metric));
222 }
223
224 static inline u32
225 dst_allfrag(const struct dst_entry *dst)
226 {
227 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
228 return ret;
229 }
230
231 static inline int
232 dst_metric_locked(const struct dst_entry *dst, int metric)
233 {
234 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
235 }
236
237 static inline void dst_hold(struct dst_entry *dst)
238 {
239 /*
240 * If your kernel compilation stops here, please check
241 * __pad_to_align_refcnt declaration in struct dst_entry
242 */
243 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
244 atomic_inc(&dst->__refcnt);
245 }
246
247 static inline void dst_use(struct dst_entry *dst, unsigned long time)
248 {
249 dst_hold(dst);
250 dst->__use++;
251 dst->lastuse = time;
252 }
253
254 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
255 {
256 dst->__use++;
257 dst->lastuse = time;
258 }
259
260 static inline struct dst_entry *dst_clone(struct dst_entry *dst)
261 {
262 if (dst)
263 atomic_inc(&dst->__refcnt);
264 return dst;
265 }
266
267 void dst_release(struct dst_entry *dst);
268
269 static inline void refdst_drop(unsigned long refdst)
270 {
271 if (!(refdst & SKB_DST_NOREF))
272 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
273 }
274
275 /**
276 * skb_dst_drop - drops skb dst
277 * @skb: buffer
278 *
279 * Drops dst reference count if a reference was taken.
280 */
281 static inline void skb_dst_drop(struct sk_buff *skb)
282 {
283 if (skb->_skb_refdst) {
284 refdst_drop(skb->_skb_refdst);
285 skb->_skb_refdst = 0UL;
286 }
287 }
288
289 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
290 {
291 nskb->_skb_refdst = oskb->_skb_refdst;
292 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
293 dst_clone(skb_dst(nskb));
294 }
295
296 /**
297 * skb_dst_force - makes sure skb dst is refcounted
298 * @skb: buffer
299 *
300 * If dst is not yet refcounted, let's do it
301 */
302 static inline void skb_dst_force(struct sk_buff *skb)
303 {
304 if (skb_dst_is_noref(skb)) {
305 WARN_ON(!rcu_read_lock_held());
306 skb->_skb_refdst &= ~SKB_DST_NOREF;
307 dst_clone(skb_dst(skb));
308 }
309 }
310
311
312 /**
313 * __skb_tunnel_rx - prepare skb for rx reinsert
314 * @skb: buffer
315 * @dev: tunnel device
316 * @net: netns for packet i/o
317 *
318 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
319 * so make some cleanups. (no accounting done)
320 */
321 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
322 struct net *net)
323 {
324 skb->dev = dev;
325
326 /*
327 * Clear hash so that we can recalulate the hash for the
328 * encapsulated packet, unless we have already determine the hash
329 * over the L4 4-tuple.
330 */
331 skb_clear_hash_if_not_l4(skb);
332 skb_set_queue_mapping(skb, 0);
333 skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
334 }
335
336 /**
337 * skb_tunnel_rx - prepare skb for rx reinsert
338 * @skb: buffer
339 * @dev: tunnel device
340 *
341 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
342 * so make some cleanups, and perform accounting.
343 * Note: this accounting is not SMP safe.
344 */
345 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
346 struct net *net)
347 {
348 /* TODO : stats should be SMP safe */
349 dev->stats.rx_packets++;
350 dev->stats.rx_bytes += skb->len;
351 __skb_tunnel_rx(skb, dev, net);
352 }
353
354 int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
355 static inline int dst_discard(struct sk_buff *skb)
356 {
357 return dst_discard_sk(skb->sk, skb);
358 }
359 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
360 int initial_obsolete, unsigned short flags);
361 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
362 struct net_device *dev, int initial_ref, int initial_obsolete,
363 unsigned short flags);
364 void __dst_free(struct dst_entry *dst);
365 struct dst_entry *dst_destroy(struct dst_entry *dst);
366
367 static inline void dst_free(struct dst_entry *dst)
368 {
369 if (dst->obsolete > 0)
370 return;
371 if (!atomic_read(&dst->__refcnt)) {
372 dst = dst_destroy(dst);
373 if (!dst)
374 return;
375 }
376 __dst_free(dst);
377 }
378
379 static inline void dst_rcu_free(struct rcu_head *head)
380 {
381 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
382 dst_free(dst);
383 }
384
385 static inline void dst_confirm(struct dst_entry *dst)
386 {
387 dst->pending_confirm = 1;
388 }
389
390 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
391 struct sk_buff *skb)
392 {
393 const struct hh_cache *hh;
394
395 if (dst->pending_confirm) {
396 unsigned long now = jiffies;
397
398 dst->pending_confirm = 0;
399 /* avoid dirtying neighbour */
400 if (n->confirmed != now)
401 n->confirmed = now;
402 }
403
404 hh = &n->hh;
405 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
406 return neigh_hh_output(hh, skb);
407 else
408 return n->output(n, skb);
409 }
410
411 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
412 {
413 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
414 return IS_ERR(n) ? NULL : n;
415 }
416
417 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
418 struct sk_buff *skb)
419 {
420 struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
421 return IS_ERR(n) ? NULL : n;
422 }
423
424 static inline void dst_link_failure(struct sk_buff *skb)
425 {
426 struct dst_entry *dst = skb_dst(skb);
427 if (dst && dst->ops && dst->ops->link_failure)
428 dst->ops->link_failure(skb);
429 }
430
431 static inline void dst_set_expires(struct dst_entry *dst, int timeout)
432 {
433 unsigned long expires = jiffies + timeout;
434
435 if (expires == 0)
436 expires = 1;
437
438 if (dst->expires == 0 || time_before(expires, dst->expires))
439 dst->expires = expires;
440 }
441
442 /* Output packet to network from transport. */
443 static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb)
444 {
445 return skb_dst(skb)->output(sk, skb);
446 }
447 static inline int dst_output(struct sk_buff *skb)
448 {
449 return dst_output_sk(skb->sk, skb);
450 }
451
452 /* Input packet from network to transport. */
453 static inline int dst_input(struct sk_buff *skb)
454 {
455 return skb_dst(skb)->input(skb);
456 }
457
458 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
459 {
460 if (dst->obsolete)
461 dst = dst->ops->check(dst, cookie);
462 return dst;
463 }
464
465 void dst_subsys_init(void);
466
467 /* Flags for xfrm_lookup flags argument. */
468 enum {
469 XFRM_LOOKUP_ICMP = 1 << 0,
470 XFRM_LOOKUP_QUEUE = 1 << 1,
471 XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
472 };
473
474 struct flowi;
475 #ifndef CONFIG_XFRM
476 static inline struct dst_entry *xfrm_lookup(struct net *net,
477 struct dst_entry *dst_orig,
478 const struct flowi *fl, struct sock *sk,
479 int flags)
480 {
481 return dst_orig;
482 }
483
484 static inline struct dst_entry *xfrm_lookup_route(struct net *net,
485 struct dst_entry *dst_orig,
486 const struct flowi *fl,
487 struct sock *sk,
488 int flags)
489 {
490 return dst_orig;
491 }
492
493 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
494 {
495 return NULL;
496 }
497
498 #else
499 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
500 const struct flowi *fl, struct sock *sk,
501 int flags);
502
503 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
504 const struct flowi *fl, struct sock *sk,
505 int flags);
506
507 /* skb attached with this dst needs transformation if dst->xfrm is valid */
508 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
509 {
510 return dst->xfrm;
511 }
512 #endif
513
514 #endif /* _NET_DST_H */
This page took 0.051481 seconds and 6 git commands to generate.