Merge remote-tracking branch 'spi/topic/core' into spi-next
[deliverable/linux.git] / include / linux / percpu_counter.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_PERCPU_COUNTER_H
2#define _LINUX_PERCPU_COUNTER_H
3/*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5 *
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
8
1da177e4
LT
9#include <linux/spinlock.h>
10#include <linux/smp.h>
c67ad917 11#include <linux/list.h>
1da177e4
LT
12#include <linux/threads.h>
13#include <linux/percpu.h>
0216bfcf 14#include <linux/types.h>
908c7f19 15#include <linux/gfp.h>
1da177e4
LT
16
17#ifdef CONFIG_SMP
18
19struct percpu_counter {
f032a450 20 raw_spinlock_t lock;
0216bfcf 21 s64 count;
c67ad917
AM
22#ifdef CONFIG_HOTPLUG_CPU
23 struct list_head list; /* All percpu_counters are on a list */
24#endif
43cf38eb 25 s32 __percpu *counters;
1da177e4
LT
26};
27
179f7ebf 28extern int percpu_counter_batch;
1da177e4 29
908c7f19 30int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
ea319518
PZ
31 struct lock_class_key *key);
32
908c7f19 33#define percpu_counter_init(fbc, value, gfp) \
ea319518
PZ
34 ({ \
35 static struct lock_class_key __key; \
36 \
908c7f19 37 __percpu_counter_init(fbc, value, gfp, &__key); \
ea319518
PZ
38 })
39
c67ad917 40void percpu_counter_destroy(struct percpu_counter *fbc);
3a587f47 41void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
20e89767 42void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
02d21168 43s64 __percpu_counter_sum(struct percpu_counter *fbc);
27f5e0f6 44int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
1da177e4 45
20e89767 46static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
252e0ba6 47{
179f7ebf 48 __percpu_counter_add(fbc, amount, percpu_counter_batch);
252e0ba6
PZ
49}
50
bf1d89c8
PZ
51static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
52{
02d21168 53 s64 ret = __percpu_counter_sum(fbc);
bf1d89c8
PZ
54 return ret < 0 ? 0 : ret;
55}
56
57static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
58{
02d21168 59 return __percpu_counter_sum(fbc);
bf1d89c8
PZ
60}
61
0216bfcf 62static inline s64 percpu_counter_read(struct percpu_counter *fbc)
1da177e4
LT
63{
64 return fbc->count;
65}
66
67/*
68 * It is possible for the percpu_counter_read() to return a small negative
69 * number for some counter which should never be negative.
0216bfcf 70 *
1da177e4 71 */
0216bfcf 72static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
1da177e4 73{
0216bfcf 74 s64 ret = fbc->count;
1da177e4
LT
75
76 barrier(); /* Prevent reloads of fbc->count */
0216bfcf 77 if (ret >= 0)
1da177e4 78 return ret;
c84598bb 79 return 0;
1da177e4
LT
80}
81
7f93cff9
TT
82static inline int percpu_counter_initialized(struct percpu_counter *fbc)
83{
84 return (fbc->counters != NULL);
85}
86
7fa4cf92 87#else /* !CONFIG_SMP */
1da177e4
LT
88
89struct percpu_counter {
0216bfcf 90 s64 count;
1da177e4
LT
91};
92
908c7f19
TH
93static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
94 gfp_t gfp)
1da177e4 95{
0216bfcf 96 fbc->count = amount;
833f4077 97 return 0;
1da177e4
LT
98}
99
100static inline void percpu_counter_destroy(struct percpu_counter *fbc)
101{
102}
103
3a587f47
PZ
104static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
105{
106 fbc->count = amount;
107}
108
27f5e0f6
TC
109static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
110{
111 if (fbc->count > rhs)
112 return 1;
113 else if (fbc->count < rhs)
114 return -1;
115 else
116 return 0;
117}
118
1da177e4 119static inline void
20e89767 120percpu_counter_add(struct percpu_counter *fbc, s64 amount)
1da177e4
LT
121{
122 preempt_disable();
123 fbc->count += amount;
124 preempt_enable();
125}
126
0c9cf2ef
AB
127static inline void
128__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
129{
130 percpu_counter_add(fbc, amount);
131}
132
0216bfcf 133static inline s64 percpu_counter_read(struct percpu_counter *fbc)
1da177e4
LT
134{
135 return fbc->count;
136}
137
c84598bb
SL
138/*
139 * percpu_counter is intended to track positive numbers. In the UP case the
140 * number should never be negative.
141 */
0216bfcf 142static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
1da177e4
LT
143{
144 return fbc->count;
145}
146
52d9f3b4 147static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
e2bab3d9
AM
148{
149 return percpu_counter_read_positive(fbc);
150}
151
bf1d89c8
PZ
152static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
153{
154 return percpu_counter_read(fbc);
155}
156
7f93cff9
TT
157static inline int percpu_counter_initialized(struct percpu_counter *fbc)
158{
159 return 1;
160}
161
1da177e4
LT
162#endif /* CONFIG_SMP */
163
164static inline void percpu_counter_inc(struct percpu_counter *fbc)
165{
aa0dff2d 166 percpu_counter_add(fbc, 1);
1da177e4
LT
167}
168
169static inline void percpu_counter_dec(struct percpu_counter *fbc)
170{
aa0dff2d 171 percpu_counter_add(fbc, -1);
1da177e4
LT
172}
173
3cb4f9fa
PZ
174static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
175{
176 percpu_counter_add(fbc, -amount);
177}
178
1da177e4 179#endif /* _LINUX_PERCPU_COUNTER_H */
This page took 1.040408 seconds and 5 git commands to generate.