Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / include / net / dst.h
1 /*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8 #ifndef _NET_DST_H
9 #define _NET_DST_H
10
11 #include <net/dst_ops.h>
12 #include <linux/netdevice.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/rcupdate.h>
15 #include <linux/bug.h>
16 #include <linux/jiffies.h>
17 #include <net/neighbour.h>
18 #include <asm/processor.h>
19
20 #define DST_GC_MIN (HZ/10)
21 #define DST_GC_INC (HZ/2)
22 #define DST_GC_MAX (120*HZ)
23
24 /* Each dst_entry has reference count and sits in some parent list(s).
25 * When it is removed from parent list, it is "freed" (dst_free).
26 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
27 * is zero, it can be destroyed immediately, otherwise it is added
28 * to gc list and garbage collector periodically checks the refcnt.
29 */
30
31 struct sk_buff;
32
33 struct dst_entry {
34 struct rcu_head rcu_head;
35 struct dst_entry *child;
36 struct net_device *dev;
37 struct dst_ops *ops;
38 unsigned long _metrics;
39 unsigned long expires;
40 struct dst_entry *path;
41 struct dst_entry *from;
42 #ifdef CONFIG_XFRM
43 struct xfrm_state *xfrm;
44 #else
45 void *__pad1;
46 #endif
47 int (*input)(struct sk_buff *);
48 int (*output)(struct sock *sk, struct sk_buff *skb);
49
50 unsigned short flags;
51 #define DST_HOST 0x0001
52 #define DST_NOXFRM 0x0002
53 #define DST_NOPOLICY 0x0004
54 #define DST_NOHASH 0x0008
55 #define DST_NOCACHE 0x0010
56 #define DST_NOCOUNT 0x0020
57 #define DST_FAKE_RTABLE 0x0040
58 #define DST_XFRM_TUNNEL 0x0080
59 #define DST_XFRM_QUEUE 0x0100
60 #define DST_METADATA 0x0200
61
62 unsigned short pending_confirm;
63
64 short error;
65
66 /* A non-zero value of dst->obsolete forces by-hand validation
67 * of the route entry. Positive values are set by the generic
68 * dst layer to indicate that the entry has been forcefully
69 * destroyed.
70 *
71 * Negative values are used by the implementation layer code to
72 * force invocation of the dst_ops->check() method.
73 */
74 short obsolete;
75 #define DST_OBSOLETE_NONE 0
76 #define DST_OBSOLETE_DEAD 2
77 #define DST_OBSOLETE_FORCE_CHK -1
78 #define DST_OBSOLETE_KILL -2
79 unsigned short header_len; /* more space at head required */
80 unsigned short trailer_len; /* space to reserve at tail */
81 #ifdef CONFIG_IP_ROUTE_CLASSID
82 __u32 tclassid;
83 #else
84 __u32 __pad2;
85 #endif
86
87 #ifdef CONFIG_64BIT
88 struct lwtunnel_state *lwtstate;
89 /*
90 * Align __refcnt to a 64 bytes alignment
91 * (L1_CACHE_SIZE would be too much)
92 */
93 long __pad_to_align_refcnt[1];
94 #endif
95 /*
96 * __refcnt wants to be on a different cache line from
97 * input/output/ops or performance tanks badly
98 */
99 atomic_t __refcnt; /* client references */
100 int __use;
101 unsigned long lastuse;
102 #ifndef CONFIG_64BIT
103 struct lwtunnel_state *lwtstate;
104 #endif
105 union {
106 struct dst_entry *next;
107 struct rtable __rcu *rt_next;
108 struct rt6_info *rt6_next;
109 struct dn_route __rcu *dn_next;
110 };
111 };
112
113 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
114 extern const u32 dst_default_metrics[];
115
116 #define DST_METRICS_READ_ONLY 0x1UL
117 #define DST_METRICS_FLAGS 0x3UL
118 #define __DST_METRICS_PTR(Y) \
119 ((u32 *)((Y) & ~DST_METRICS_FLAGS))
120 #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
121
122 static inline bool dst_metrics_read_only(const struct dst_entry *dst)
123 {
124 return dst->_metrics & DST_METRICS_READ_ONLY;
125 }
126
127 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
128
129 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
130 {
131 unsigned long val = dst->_metrics;
132 if (!(val & DST_METRICS_READ_ONLY))
133 __dst_destroy_metrics_generic(dst, val);
134 }
135
136 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
137 {
138 unsigned long p = dst->_metrics;
139
140 BUG_ON(!p);
141
142 if (p & DST_METRICS_READ_ONLY)
143 return dst->ops->cow_metrics(dst, p);
144 return __DST_METRICS_PTR(p);
145 }
146
147 /* This may only be invoked before the entry has reached global
148 * visibility.
149 */
150 static inline void dst_init_metrics(struct dst_entry *dst,
151 const u32 *src_metrics,
152 bool read_only)
153 {
154 dst->_metrics = ((unsigned long) src_metrics) |
155 (read_only ? DST_METRICS_READ_ONLY : 0);
156 }
157
158 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
159 {
160 u32 *dst_metrics = dst_metrics_write_ptr(dest);
161
162 if (dst_metrics) {
163 u32 *src_metrics = DST_METRICS_PTR(src);
164
165 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
166 }
167 }
168
169 static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
170 {
171 return DST_METRICS_PTR(dst);
172 }
173
174 static inline u32
175 dst_metric_raw(const struct dst_entry *dst, const int metric)
176 {
177 u32 *p = DST_METRICS_PTR(dst);
178
179 return p[metric-1];
180 }
181
182 static inline u32
183 dst_metric(const struct dst_entry *dst, const int metric)
184 {
185 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
186 metric == RTAX_ADVMSS ||
187 metric == RTAX_MTU);
188 return dst_metric_raw(dst, metric);
189 }
190
191 static inline u32
192 dst_metric_advmss(const struct dst_entry *dst)
193 {
194 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
195
196 if (!advmss)
197 advmss = dst->ops->default_advmss(dst);
198
199 return advmss;
200 }
201
202 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
203 {
204 u32 *p = dst_metrics_write_ptr(dst);
205
206 if (p)
207 p[metric-1] = val;
208 }
209
210 static inline u32
211 dst_feature(const struct dst_entry *dst, u32 feature)
212 {
213 return dst_metric(dst, RTAX_FEATURES) & feature;
214 }
215
216 static inline u32 dst_mtu(const struct dst_entry *dst)
217 {
218 return dst->ops->mtu(dst);
219 }
220
221 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
222 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
223 {
224 return msecs_to_jiffies(dst_metric(dst, metric));
225 }
226
227 static inline u32
228 dst_allfrag(const struct dst_entry *dst)
229 {
230 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
231 return ret;
232 }
233
234 static inline int
235 dst_metric_locked(const struct dst_entry *dst, int metric)
236 {
237 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
238 }
239
240 static inline void dst_hold(struct dst_entry *dst)
241 {
242 /*
243 * If your kernel compilation stops here, please check
244 * __pad_to_align_refcnt declaration in struct dst_entry
245 */
246 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
247 atomic_inc(&dst->__refcnt);
248 }
249
250 static inline void dst_use(struct dst_entry *dst, unsigned long time)
251 {
252 dst_hold(dst);
253 dst->__use++;
254 dst->lastuse = time;
255 }
256
257 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
258 {
259 dst->__use++;
260 dst->lastuse = time;
261 }
262
263 static inline struct dst_entry *dst_clone(struct dst_entry *dst)
264 {
265 if (dst)
266 atomic_inc(&dst->__refcnt);
267 return dst;
268 }
269
270 void dst_release(struct dst_entry *dst);
271
272 static inline void refdst_drop(unsigned long refdst)
273 {
274 if (!(refdst & SKB_DST_NOREF))
275 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
276 }
277
278 /**
279 * skb_dst_drop - drops skb dst
280 * @skb: buffer
281 *
282 * Drops dst reference count if a reference was taken.
283 */
284 static inline void skb_dst_drop(struct sk_buff *skb)
285 {
286 if (skb->_skb_refdst) {
287 refdst_drop(skb->_skb_refdst);
288 skb->_skb_refdst = 0UL;
289 }
290 }
291
292 static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
293 {
294 nskb->_skb_refdst = refdst;
295 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
296 dst_clone(skb_dst(nskb));
297 }
298
299 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
300 {
301 __skb_dst_copy(nskb, oskb->_skb_refdst);
302 }
303
304 /**
305 * skb_dst_force - makes sure skb dst is refcounted
306 * @skb: buffer
307 *
308 * If dst is not yet refcounted, let's do it
309 */
310 static inline void skb_dst_force(struct sk_buff *skb)
311 {
312 if (skb_dst_is_noref(skb)) {
313 WARN_ON(!rcu_read_lock_held());
314 skb->_skb_refdst &= ~SKB_DST_NOREF;
315 dst_clone(skb_dst(skb));
316 }
317 }
318
319
320 /**
321 * __skb_tunnel_rx - prepare skb for rx reinsert
322 * @skb: buffer
323 * @dev: tunnel device
324 * @net: netns for packet i/o
325 *
326 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
327 * so make some cleanups. (no accounting done)
328 */
329 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
330 struct net *net)
331 {
332 skb->dev = dev;
333
334 /*
335 * Clear hash so that we can recalulate the hash for the
336 * encapsulated packet, unless we have already determine the hash
337 * over the L4 4-tuple.
338 */
339 skb_clear_hash_if_not_l4(skb);
340 skb_set_queue_mapping(skb, 0);
341 skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
342 }
343
344 /**
345 * skb_tunnel_rx - prepare skb for rx reinsert
346 * @skb: buffer
347 * @dev: tunnel device
348 *
349 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
350 * so make some cleanups, and perform accounting.
351 * Note: this accounting is not SMP safe.
352 */
353 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
354 struct net *net)
355 {
356 /* TODO : stats should be SMP safe */
357 dev->stats.rx_packets++;
358 dev->stats.rx_bytes += skb->len;
359 __skb_tunnel_rx(skb, dev, net);
360 }
361
362 int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
363 static inline int dst_discard(struct sk_buff *skb)
364 {
365 return dst_discard_sk(skb->sk, skb);
366 }
367 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
368 int initial_obsolete, unsigned short flags);
369 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
370 struct net_device *dev, int initial_ref, int initial_obsolete,
371 unsigned short flags);
372 void __dst_free(struct dst_entry *dst);
373 struct dst_entry *dst_destroy(struct dst_entry *dst);
374
375 static inline void dst_free(struct dst_entry *dst)
376 {
377 if (dst->obsolete > 0)
378 return;
379 if (!atomic_read(&dst->__refcnt)) {
380 dst = dst_destroy(dst);
381 if (!dst)
382 return;
383 }
384 __dst_free(dst);
385 }
386
387 static inline void dst_rcu_free(struct rcu_head *head)
388 {
389 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
390 dst_free(dst);
391 }
392
393 static inline void dst_confirm(struct dst_entry *dst)
394 {
395 dst->pending_confirm = 1;
396 }
397
398 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
399 struct sk_buff *skb)
400 {
401 const struct hh_cache *hh;
402
403 if (dst->pending_confirm) {
404 unsigned long now = jiffies;
405
406 dst->pending_confirm = 0;
407 /* avoid dirtying neighbour */
408 if (n->confirmed != now)
409 n->confirmed = now;
410 }
411
412 hh = &n->hh;
413 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
414 return neigh_hh_output(hh, skb);
415 else
416 return n->output(n, skb);
417 }
418
419 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
420 {
421 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
422 return IS_ERR(n) ? NULL : n;
423 }
424
425 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
426 struct sk_buff *skb)
427 {
428 struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
429 return IS_ERR(n) ? NULL : n;
430 }
431
432 static inline void dst_link_failure(struct sk_buff *skb)
433 {
434 struct dst_entry *dst = skb_dst(skb);
435 if (dst && dst->ops && dst->ops->link_failure)
436 dst->ops->link_failure(skb);
437 }
438
439 static inline void dst_set_expires(struct dst_entry *dst, int timeout)
440 {
441 unsigned long expires = jiffies + timeout;
442
443 if (expires == 0)
444 expires = 1;
445
446 if (dst->expires == 0 || time_before(expires, dst->expires))
447 dst->expires = expires;
448 }
449
450 /* Output packet to network from transport. */
451 static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb)
452 {
453 return skb_dst(skb)->output(sk, skb);
454 }
455 static inline int dst_output(struct sk_buff *skb)
456 {
457 return dst_output_sk(skb->sk, skb);
458 }
459
460 /* Input packet from network to transport. */
461 static inline int dst_input(struct sk_buff *skb)
462 {
463 return skb_dst(skb)->input(skb);
464 }
465
466 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
467 {
468 if (dst->obsolete)
469 dst = dst->ops->check(dst, cookie);
470 return dst;
471 }
472
473 void dst_subsys_init(void);
474
475 /* Flags for xfrm_lookup flags argument. */
476 enum {
477 XFRM_LOOKUP_ICMP = 1 << 0,
478 XFRM_LOOKUP_QUEUE = 1 << 1,
479 XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
480 };
481
482 struct flowi;
483 #ifndef CONFIG_XFRM
484 static inline struct dst_entry *xfrm_lookup(struct net *net,
485 struct dst_entry *dst_orig,
486 const struct flowi *fl, struct sock *sk,
487 int flags)
488 {
489 return dst_orig;
490 }
491
492 static inline struct dst_entry *xfrm_lookup_route(struct net *net,
493 struct dst_entry *dst_orig,
494 const struct flowi *fl,
495 struct sock *sk,
496 int flags)
497 {
498 return dst_orig;
499 }
500
501 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
502 {
503 return NULL;
504 }
505
506 #else
507 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
508 const struct flowi *fl, struct sock *sk,
509 int flags);
510
511 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
512 const struct flowi *fl, struct sock *sk,
513 int flags);
514
515 /* skb attached with this dst needs transformation if dst->xfrm is valid */
516 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
517 {
518 return dst->xfrm;
519 }
520 #endif
521
522 #endif /* _NET_DST_H */
This page took 0.050701 seconds and 6 git commands to generate.