net: Add initial_ref arg to dst_alloc().
[deliverable/linux.git] / include / net / dst.h
1 /*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8 #ifndef _NET_DST_H
9 #define _NET_DST_H
10
11 #include <net/dst_ops.h>
12 #include <linux/netdevice.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/rcupdate.h>
15 #include <linux/jiffies.h>
16 #include <net/neighbour.h>
17 #include <asm/processor.h>
18
19 /*
20 * 0 - no debugging messages
21 * 1 - rare events and bugs (default)
22 * 2 - trace mode.
23 */
24 #define RT_CACHE_DEBUG 0
25
26 #define DST_GC_MIN (HZ/10)
27 #define DST_GC_INC (HZ/2)
28 #define DST_GC_MAX (120*HZ)
29
30 /* Each dst_entry has reference count and sits in some parent list(s).
31 * When it is removed from parent list, it is "freed" (dst_free).
32 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
33 * is zero, it can be destroyed immediately, otherwise it is added
34 * to gc list and garbage collector periodically checks the refcnt.
35 */
36
37 struct sk_buff;
38
39 struct dst_entry {
40 struct rcu_head rcu_head;
41 struct dst_entry *child;
42 struct net_device *dev;
43 struct dst_ops *ops;
44 unsigned long _metrics;
45 unsigned long expires;
46 struct dst_entry *path;
47 struct neighbour *neighbour;
48 struct hh_cache *hh;
49 #ifdef CONFIG_XFRM
50 struct xfrm_state *xfrm;
51 #else
52 void *__pad1;
53 #endif
54 int (*input)(struct sk_buff*);
55 int (*output)(struct sk_buff*);
56
57 short error;
58 short obsolete;
59 unsigned short header_len; /* more space at head required */
60 unsigned short trailer_len; /* space to reserve at tail */
61 #ifdef CONFIG_IP_ROUTE_CLASSID
62 __u32 tclassid;
63 #else
64 __u32 __pad2;
65 #endif
66
67 /*
68 * Align __refcnt to a 64 bytes alignment
69 * (L1_CACHE_SIZE would be too much)
70 */
71 #ifdef CONFIG_64BIT
72 long __pad_to_align_refcnt[1];
73 #endif
74 /*
75 * __refcnt wants to be on a different cache line from
76 * input/output/ops or performance tanks badly
77 */
78 atomic_t __refcnt; /* client references */
79 int __use;
80 unsigned long lastuse;
81 int flags;
82 #define DST_HOST 0x0001
83 #define DST_NOXFRM 0x0002
84 #define DST_NOPOLICY 0x0004
85 #define DST_NOHASH 0x0008
86 #define DST_NOCACHE 0x0010
87 union {
88 struct dst_entry *next;
89 struct rtable __rcu *rt_next;
90 struct rt6_info *rt6_next;
91 struct dn_route __rcu *dn_next;
92 };
93 };
94
95 #ifdef __KERNEL__
96
97 extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
98 extern const u32 dst_default_metrics[RTAX_MAX];
99
100 #define DST_METRICS_READ_ONLY 0x1UL
101 #define __DST_METRICS_PTR(Y) \
102 ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
103 #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
104
105 static inline bool dst_metrics_read_only(const struct dst_entry *dst)
106 {
107 return dst->_metrics & DST_METRICS_READ_ONLY;
108 }
109
110 extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
111
112 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
113 {
114 unsigned long val = dst->_metrics;
115 if (!(val & DST_METRICS_READ_ONLY))
116 __dst_destroy_metrics_generic(dst, val);
117 }
118
119 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
120 {
121 unsigned long p = dst->_metrics;
122
123 if (p & DST_METRICS_READ_ONLY)
124 return dst->ops->cow_metrics(dst, p);
125 return __DST_METRICS_PTR(p);
126 }
127
128 /* This may only be invoked before the entry has reached global
129 * visibility.
130 */
131 static inline void dst_init_metrics(struct dst_entry *dst,
132 const u32 *src_metrics,
133 bool read_only)
134 {
135 dst->_metrics = ((unsigned long) src_metrics) |
136 (read_only ? DST_METRICS_READ_ONLY : 0);
137 }
138
139 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
140 {
141 u32 *dst_metrics = dst_metrics_write_ptr(dest);
142
143 if (dst_metrics) {
144 u32 *src_metrics = DST_METRICS_PTR(src);
145
146 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
147 }
148 }
149
150 static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
151 {
152 return DST_METRICS_PTR(dst);
153 }
154
155 static inline u32
156 dst_metric_raw(const struct dst_entry *dst, const int metric)
157 {
158 u32 *p = DST_METRICS_PTR(dst);
159
160 return p[metric-1];
161 }
162
163 static inline u32
164 dst_metric(const struct dst_entry *dst, const int metric)
165 {
166 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
167 metric == RTAX_ADVMSS ||
168 metric == RTAX_MTU);
169 return dst_metric_raw(dst, metric);
170 }
171
172 static inline u32
173 dst_metric_advmss(const struct dst_entry *dst)
174 {
175 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
176
177 if (!advmss)
178 advmss = dst->ops->default_advmss(dst);
179
180 return advmss;
181 }
182
183 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
184 {
185 u32 *p = dst_metrics_write_ptr(dst);
186
187 if (p)
188 p[metric-1] = val;
189 }
190
191 static inline u32
192 dst_feature(const struct dst_entry *dst, u32 feature)
193 {
194 return dst_metric(dst, RTAX_FEATURES) & feature;
195 }
196
197 static inline u32 dst_mtu(const struct dst_entry *dst)
198 {
199 u32 mtu = dst_metric_raw(dst, RTAX_MTU);
200
201 if (!mtu)
202 mtu = dst->ops->default_mtu(dst);
203
204 return mtu;
205 }
206
207 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
208 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
209 {
210 return msecs_to_jiffies(dst_metric(dst, metric));
211 }
212
213 static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
214 unsigned long rtt)
215 {
216 dst_metric_set(dst, metric, jiffies_to_msecs(rtt));
217 }
218
219 static inline u32
220 dst_allfrag(const struct dst_entry *dst)
221 {
222 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
223 return ret;
224 }
225
226 static inline int
227 dst_metric_locked(const struct dst_entry *dst, int metric)
228 {
229 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
230 }
231
232 static inline void dst_hold(struct dst_entry * dst)
233 {
234 /*
235 * If your kernel compilation stops here, please check
236 * __pad_to_align_refcnt declaration in struct dst_entry
237 */
238 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
239 atomic_inc(&dst->__refcnt);
240 }
241
242 static inline void dst_use(struct dst_entry *dst, unsigned long time)
243 {
244 dst_hold(dst);
245 dst->__use++;
246 dst->lastuse = time;
247 }
248
249 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
250 {
251 dst->__use++;
252 dst->lastuse = time;
253 }
254
255 static inline
256 struct dst_entry * dst_clone(struct dst_entry * dst)
257 {
258 if (dst)
259 atomic_inc(&dst->__refcnt);
260 return dst;
261 }
262
263 extern void dst_release(struct dst_entry *dst);
264
265 static inline void refdst_drop(unsigned long refdst)
266 {
267 if (!(refdst & SKB_DST_NOREF))
268 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
269 }
270
271 /**
272 * skb_dst_drop - drops skb dst
273 * @skb: buffer
274 *
275 * Drops dst reference count if a reference was taken.
276 */
277 static inline void skb_dst_drop(struct sk_buff *skb)
278 {
279 if (skb->_skb_refdst) {
280 refdst_drop(skb->_skb_refdst);
281 skb->_skb_refdst = 0UL;
282 }
283 }
284
285 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
286 {
287 nskb->_skb_refdst = oskb->_skb_refdst;
288 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
289 dst_clone(skb_dst(nskb));
290 }
291
292 /**
293 * skb_dst_force - makes sure skb dst is refcounted
294 * @skb: buffer
295 *
296 * If dst is not yet refcounted, let's do it
297 */
298 static inline void skb_dst_force(struct sk_buff *skb)
299 {
300 if (skb_dst_is_noref(skb)) {
301 WARN_ON(!rcu_read_lock_held());
302 skb->_skb_refdst &= ~SKB_DST_NOREF;
303 dst_clone(skb_dst(skb));
304 }
305 }
306
307
308 /**
309 * __skb_tunnel_rx - prepare skb for rx reinsert
310 * @skb: buffer
311 * @dev: tunnel device
312 *
313 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
314 * so make some cleanups. (no accounting done)
315 */
316 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
317 {
318 skb->dev = dev;
319 skb->rxhash = 0;
320 skb_set_queue_mapping(skb, 0);
321 skb_dst_drop(skb);
322 nf_reset(skb);
323 }
324
325 /**
326 * skb_tunnel_rx - prepare skb for rx reinsert
327 * @skb: buffer
328 * @dev: tunnel device
329 *
330 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
331 * so make some cleanups, and perform accounting.
332 * Note: this accounting is not SMP safe.
333 */
334 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
335 {
336 /* TODO : stats should be SMP safe */
337 dev->stats.rx_packets++;
338 dev->stats.rx_bytes += skb->len;
339 __skb_tunnel_rx(skb, dev);
340 }
341
342 /* Children define the path of the packet through the
343 * Linux networking. Thus, destinations are stackable.
344 */
345
346 static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
347 {
348 struct dst_entry *child = skb_dst(skb)->child;
349
350 skb_dst_drop(skb);
351 return child;
352 }
353
354 extern int dst_discard(struct sk_buff *skb);
355 extern void *dst_alloc(struct dst_ops * ops, int initial_ref);
356 extern void __dst_free(struct dst_entry * dst);
357 extern struct dst_entry *dst_destroy(struct dst_entry * dst);
358
359 static inline void dst_free(struct dst_entry * dst)
360 {
361 if (dst->obsolete > 1)
362 return;
363 if (!atomic_read(&dst->__refcnt)) {
364 dst = dst_destroy(dst);
365 if (!dst)
366 return;
367 }
368 __dst_free(dst);
369 }
370
371 static inline void dst_rcu_free(struct rcu_head *head)
372 {
373 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
374 dst_free(dst);
375 }
376
377 static inline void dst_confirm(struct dst_entry *dst)
378 {
379 if (dst)
380 neigh_confirm(dst->neighbour);
381 }
382
383 static inline void dst_link_failure(struct sk_buff *skb)
384 {
385 struct dst_entry *dst = skb_dst(skb);
386 if (dst && dst->ops && dst->ops->link_failure)
387 dst->ops->link_failure(skb);
388 }
389
390 static inline void dst_set_expires(struct dst_entry *dst, int timeout)
391 {
392 unsigned long expires = jiffies + timeout;
393
394 if (expires == 0)
395 expires = 1;
396
397 if (dst->expires == 0 || time_before(expires, dst->expires))
398 dst->expires = expires;
399 }
400
401 /* Output packet to network from transport. */
402 static inline int dst_output(struct sk_buff *skb)
403 {
404 return skb_dst(skb)->output(skb);
405 }
406
407 /* Input packet from network to transport. */
408 static inline int dst_input(struct sk_buff *skb)
409 {
410 return skb_dst(skb)->input(skb);
411 }
412
413 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
414 {
415 if (dst->obsolete)
416 dst = dst->ops->check(dst, cookie);
417 return dst;
418 }
419
420 extern void dst_init(void);
421
422 /* Flags for xfrm_lookup flags argument. */
423 enum {
424 XFRM_LOOKUP_WAIT = 1 << 0,
425 XFRM_LOOKUP_ICMP = 1 << 1,
426 };
427
428 struct flowi;
429 #ifndef CONFIG_XFRM
430 static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
431 struct flowi *fl, struct sock *sk, int flags)
432 {
433 return 0;
434 }
435 static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
436 struct flowi *fl, struct sock *sk, int flags)
437 {
438 return 0;
439 }
440 #else
441 extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
442 struct flowi *fl, struct sock *sk, int flags);
443 extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
444 struct flowi *fl, struct sock *sk, int flags);
445 #endif
446 #endif
447
448 #endif /* _NET_DST_H */
This page took 0.048275 seconds and 5 git commands to generate.