4 #include <linux/percpu_counter.h>
8 struct list_head lru_list
;
11 /* The percpu_counter "mem" need to be cacheline aligned.
12 * mem.count must not share cacheline with other writers
14 struct percpu_counter mem ____cacheline_aligned_in_smp
;
22 struct inet_frag_queue
{
24 struct timer_list timer
; /* when will this queue expire? */
25 struct list_head lru_list
; /* lru list member */
26 struct hlist_node list
;
28 struct sk_buff
*fragments
; /* list of received fragments */
29 struct sk_buff
*fragments_tail
;
31 int len
; /* total length of orig datagram */
33 __u8 last_in
; /* first/last segment arrived? */
35 #define INET_FRAG_COMPLETE 4
36 #define INET_FRAG_FIRST_IN 2
37 #define INET_FRAG_LAST_IN 1
41 struct netns_frags
*net
;
44 #define INETFRAGS_HASHSZ 64
47 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
48 * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
51 #define INETFRAGS_MAXDEPTH 128
54 struct hlist_head hash
[INETFRAGS_HASHSZ
];
55 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
56 * netfilter). Important to keep this on a seperate cacheline.
58 rwlock_t lock ____cacheline_aligned_in_smp
;
60 struct timer_list secret_timer
;
64 unsigned int (*hashfn
)(struct inet_frag_queue
*);
65 bool (*match
)(struct inet_frag_queue
*q
, void *arg
);
66 void (*constructor
)(struct inet_frag_queue
*q
,
68 void (*destructor
)(struct inet_frag_queue
*);
69 void (*skb_free
)(struct sk_buff
*);
70 void (*frag_expire
)(unsigned long data
);
73 void inet_frags_init(struct inet_frags
*);
74 void inet_frags_fini(struct inet_frags
*);
76 void inet_frags_init_net(struct netns_frags
*nf
);
77 void inet_frags_exit_net(struct netns_frags
*nf
, struct inet_frags
*f
);
79 void inet_frag_kill(struct inet_frag_queue
*q
, struct inet_frags
*f
);
80 void inet_frag_destroy(struct inet_frag_queue
*q
,
81 struct inet_frags
*f
, int *work
);
82 int inet_frag_evictor(struct netns_frags
*nf
, struct inet_frags
*f
, bool force
);
83 struct inet_frag_queue
*inet_frag_find(struct netns_frags
*nf
,
84 struct inet_frags
*f
, void *key
, unsigned int hash
)
86 void inet_frag_maybe_warn_overflow(struct inet_frag_queue
*q
,
89 static inline void inet_frag_put(struct inet_frag_queue
*q
, struct inet_frags
*f
)
91 if (atomic_dec_and_test(&q
->refcnt
))
92 inet_frag_destroy(q
, f
, NULL
);
95 /* Memory Tracking Functions. */
97 /* The default percpu_counter batch size is not big enough to scale to
98 * fragmentation mem acct sizes.
99 * The mem size of a 64K fragment is approx:
100 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
102 static unsigned int frag_percpu_counter_batch
= 130000;
104 static inline int frag_mem_limit(struct netns_frags
*nf
)
106 return percpu_counter_read(&nf
->mem
);
109 static inline void sub_frag_mem_limit(struct inet_frag_queue
*q
, int i
)
111 __percpu_counter_add(&q
->net
->mem
, -i
, frag_percpu_counter_batch
);
114 static inline void add_frag_mem_limit(struct inet_frag_queue
*q
, int i
)
116 __percpu_counter_add(&q
->net
->mem
, i
, frag_percpu_counter_batch
);
119 static inline void init_frag_mem_limit(struct netns_frags
*nf
)
121 percpu_counter_init(&nf
->mem
, 0);
124 static inline int sum_frag_mem_limit(struct netns_frags
*nf
)
129 res
= percpu_counter_sum_positive(&nf
->mem
);
135 static inline void inet_frag_lru_move(struct inet_frag_queue
*q
)
137 spin_lock(&q
->net
->lru_lock
);
138 list_move_tail(&q
->lru_list
, &q
->net
->lru_list
);
139 spin_unlock(&q
->net
->lru_lock
);
142 static inline void inet_frag_lru_del(struct inet_frag_queue
*q
)
144 spin_lock(&q
->net
->lru_lock
);
145 list_del(&q
->lru_list
);
146 spin_unlock(&q
->net
->lru_lock
);
149 static inline void inet_frag_lru_add(struct netns_frags
*nf
,
150 struct inet_frag_queue
*q
)
152 spin_lock(&nf
->lru_lock
);
153 list_add_tail(&q
->lru_list
, &nf
->lru_list
);
154 spin_unlock(&nf
->lru_lock
);