percpu: move accessors from include/linux/percpu.h to percpu-defs.h
[deliverable/linux.git] / include / linux / percpu.h
1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
3
4 #include <linux/mmdebug.h>
5 #include <linux/preempt.h>
6 #include <linux/smp.h>
7 #include <linux/cpumask.h>
8 #include <linux/pfn.h>
9 #include <linux/init.h>
10
11 #include <asm/percpu.h>
12
13 /* enough to cover all DEFINE_PER_CPUs in modules */
14 #ifdef CONFIG_MODULES
15 #define PERCPU_MODULE_RESERVE (8 << 10)
16 #else
17 #define PERCPU_MODULE_RESERVE 0
18 #endif
19
20 #ifndef PERCPU_ENOUGH_ROOM
21 #define PERCPU_ENOUGH_ROOM \
22 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
23 PERCPU_MODULE_RESERVE)
24 #endif
25
26 /* minimum unit size, also is the maximum supported allocation size */
27 #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
28
29 /*
30 * Percpu allocator can serve percpu allocations before slab is
31 * initialized which allows slab to depend on the percpu allocator.
32 * The following two parameters decide how much resource to
33 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
34 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
35 */
36 #define PERCPU_DYNAMIC_EARLY_SLOTS 128
37 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
38
39 /*
40 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
41 * back on the first chunk for dynamic percpu allocation if arch is
42 * manually allocating and mapping it for faster access (as a part of
43 * large page mapping for example).
44 *
45 * The following values give between one and two pages of free space
46 * after typical minimal boot (2-way SMP, single disk and NIC) with
47 * both defconfig and a distro config on x86_64 and 32. More
48 * intelligent way to determine this would be nice.
49 */
50 #if BITS_PER_LONG > 32
51 #define PERCPU_DYNAMIC_RESERVE (20 << 10)
52 #else
53 #define PERCPU_DYNAMIC_RESERVE (12 << 10)
54 #endif
55
56 extern void *pcpu_base_addr;
57 extern const unsigned long *pcpu_unit_offsets;
58
59 struct pcpu_group_info {
60 int nr_units; /* aligned # of units */
61 unsigned long base_offset; /* base address offset */
62 unsigned int *cpu_map; /* unit->cpu map, empty
63 * entries contain NR_CPUS */
64 };
65
66 struct pcpu_alloc_info {
67 size_t static_size;
68 size_t reserved_size;
69 size_t dyn_size;
70 size_t unit_size;
71 size_t atom_size;
72 size_t alloc_size;
73 size_t __ai_size; /* internal, don't use */
74 int nr_groups; /* 0 if grouping unnecessary */
75 struct pcpu_group_info groups[];
76 };
77
78 enum pcpu_fc {
79 PCPU_FC_AUTO,
80 PCPU_FC_EMBED,
81 PCPU_FC_PAGE,
82
83 PCPU_FC_NR,
84 };
85 extern const char * const pcpu_fc_names[PCPU_FC_NR];
86
87 extern enum pcpu_fc pcpu_chosen_fc;
88
89 typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
90 size_t align);
91 typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
92 typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
93 typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
94
95 extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
96 int nr_units);
97 extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
98
99 extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
100 void *base_addr);
101
102 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
103 extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
104 size_t atom_size,
105 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
106 pcpu_fc_alloc_fn_t alloc_fn,
107 pcpu_fc_free_fn_t free_fn);
108 #endif
109
110 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
111 extern int __init pcpu_page_first_chunk(size_t reserved_size,
112 pcpu_fc_alloc_fn_t alloc_fn,
113 pcpu_fc_free_fn_t free_fn,
114 pcpu_fc_populate_pte_fn_t populate_pte_fn);
115 #endif
116
117 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
118 extern bool is_kernel_percpu_address(unsigned long addr);
119
120 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
121 extern void __init setup_per_cpu_areas(void);
122 #endif
123 extern void __init percpu_init_late(void);
124
125 extern void __percpu *__alloc_percpu(size_t size, size_t align);
126 extern void free_percpu(void __percpu *__pdata);
127 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
128
129 #define alloc_percpu(type) \
130 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
131
132 /*
133 * Branching function to split up a function into a set of functions that
134 * are called for different scalar sizes of the objects handled.
135 */
136
137 extern void __bad_size_call_parameter(void);
138
139 #ifdef CONFIG_DEBUG_PREEMPT
140 extern void __this_cpu_preempt_check(const char *op);
141 #else
142 static inline void __this_cpu_preempt_check(const char *op) { }
143 #endif
144
145 #define __pcpu_size_call_return(stem, variable) \
146 ({ typeof(variable) pscr_ret__; \
147 __verify_pcpu_ptr(&(variable)); \
148 switch(sizeof(variable)) { \
149 case 1: pscr_ret__ = stem##1(variable);break; \
150 case 2: pscr_ret__ = stem##2(variable);break; \
151 case 4: pscr_ret__ = stem##4(variable);break; \
152 case 8: pscr_ret__ = stem##8(variable);break; \
153 default: \
154 __bad_size_call_parameter();break; \
155 } \
156 pscr_ret__; \
157 })
158
159 #define __pcpu_size_call_return2(stem, variable, ...) \
160 ({ \
161 typeof(variable) pscr2_ret__; \
162 __verify_pcpu_ptr(&(variable)); \
163 switch(sizeof(variable)) { \
164 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
165 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
166 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
167 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
168 default: \
169 __bad_size_call_parameter(); break; \
170 } \
171 pscr2_ret__; \
172 })
173
174 /*
175 * Special handling for cmpxchg_double. cmpxchg_double is passed two
176 * percpu variables. The first has to be aligned to a double word
177 * boundary and the second has to follow directly thereafter.
178 * We enforce this on all architectures even if they don't support
179 * a double cmpxchg instruction, since it's a cheap requirement, and it
180 * avoids breaking the requirement for architectures with the instruction.
181 */
182 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
183 ({ \
184 bool pdcrb_ret__; \
185 __verify_pcpu_ptr(&pcp1); \
186 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
187 VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
188 VM_BUG_ON((unsigned long)(&pcp2) != \
189 (unsigned long)(&pcp1) + sizeof(pcp1)); \
190 switch(sizeof(pcp1)) { \
191 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
192 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
193 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
194 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
195 default: \
196 __bad_size_call_parameter(); break; \
197 } \
198 pdcrb_ret__; \
199 })
200
201 #define __pcpu_size_call(stem, variable, ...) \
202 do { \
203 __verify_pcpu_ptr(&(variable)); \
204 switch(sizeof(variable)) { \
205 case 1: stem##1(variable, __VA_ARGS__);break; \
206 case 2: stem##2(variable, __VA_ARGS__);break; \
207 case 4: stem##4(variable, __VA_ARGS__);break; \
208 case 8: stem##8(variable, __VA_ARGS__);break; \
209 default: \
210 __bad_size_call_parameter();break; \
211 } \
212 } while (0)
213
214 /*
215 * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
216 *
217 * Optimized manipulation for memory allocated through the per cpu
218 * allocator or for addresses of per cpu variables.
219 *
220 * These operation guarantee exclusivity of access for other operations
221 * on the *same* processor. The assumption is that per cpu data is only
222 * accessed by a single processor instance (the current one).
223 *
224 * The first group is used for accesses that must be done in a
225 * preemption safe way since we know that the context is not preempt
226 * safe. Interrupts may occur. If the interrupt modifies the variable
227 * too then RMW actions will not be reliable.
228 *
229 * The arch code can provide optimized functions in two ways:
230 *
231 * 1. Override the function completely. F.e. define this_cpu_add().
232 * The arch must then ensure that the various scalar format passed
233 * are handled correctly.
234 *
235 * 2. Provide functions for certain scalar sizes. F.e. provide
236 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
237 * sized RMW actions. If arch code does not provide operations for
238 * a scalar size then the fallback in the generic code will be
239 * used.
240 */
241
242 #define _this_cpu_generic_read(pcp) \
243 ({ typeof(pcp) ret__; \
244 preempt_disable(); \
245 ret__ = *this_cpu_ptr(&(pcp)); \
246 preempt_enable(); \
247 ret__; \
248 })
249
250 #ifndef this_cpu_read
251 # ifndef this_cpu_read_1
252 # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
253 # endif
254 # ifndef this_cpu_read_2
255 # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
256 # endif
257 # ifndef this_cpu_read_4
258 # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
259 # endif
260 # ifndef this_cpu_read_8
261 # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
262 # endif
263 # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
264 #endif
265
266 #define _this_cpu_generic_to_op(pcp, val, op) \
267 do { \
268 unsigned long flags; \
269 raw_local_irq_save(flags); \
270 *raw_cpu_ptr(&(pcp)) op val; \
271 raw_local_irq_restore(flags); \
272 } while (0)
273
274 #ifndef this_cpu_write
275 # ifndef this_cpu_write_1
276 # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
277 # endif
278 # ifndef this_cpu_write_2
279 # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
280 # endif
281 # ifndef this_cpu_write_4
282 # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
283 # endif
284 # ifndef this_cpu_write_8
285 # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
286 # endif
287 # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
288 #endif
289
290 #ifndef this_cpu_add
291 # ifndef this_cpu_add_1
292 # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
293 # endif
294 # ifndef this_cpu_add_2
295 # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
296 # endif
297 # ifndef this_cpu_add_4
298 # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
299 # endif
300 # ifndef this_cpu_add_8
301 # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
302 # endif
303 # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
304 #endif
305
306 #ifndef this_cpu_sub
307 # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
308 #endif
309
310 #ifndef this_cpu_inc
311 # define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
312 #endif
313
314 #ifndef this_cpu_dec
315 # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
316 #endif
317
318 #ifndef this_cpu_and
319 # ifndef this_cpu_and_1
320 # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
321 # endif
322 # ifndef this_cpu_and_2
323 # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
324 # endif
325 # ifndef this_cpu_and_4
326 # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
327 # endif
328 # ifndef this_cpu_and_8
329 # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
330 # endif
331 # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
332 #endif
333
334 #ifndef this_cpu_or
335 # ifndef this_cpu_or_1
336 # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
337 # endif
338 # ifndef this_cpu_or_2
339 # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
340 # endif
341 # ifndef this_cpu_or_4
342 # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
343 # endif
344 # ifndef this_cpu_or_8
345 # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
346 # endif
347 # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
348 #endif
349
350 #define _this_cpu_generic_add_return(pcp, val) \
351 ({ \
352 typeof(pcp) ret__; \
353 unsigned long flags; \
354 raw_local_irq_save(flags); \
355 raw_cpu_add(pcp, val); \
356 ret__ = raw_cpu_read(pcp); \
357 raw_local_irq_restore(flags); \
358 ret__; \
359 })
360
361 #ifndef this_cpu_add_return
362 # ifndef this_cpu_add_return_1
363 # define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
364 # endif
365 # ifndef this_cpu_add_return_2
366 # define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
367 # endif
368 # ifndef this_cpu_add_return_4
369 # define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
370 # endif
371 # ifndef this_cpu_add_return_8
372 # define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
373 # endif
374 # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
375 #endif
376
377 #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
378 #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
379 #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
380
381 #define _this_cpu_generic_xchg(pcp, nval) \
382 ({ typeof(pcp) ret__; \
383 unsigned long flags; \
384 raw_local_irq_save(flags); \
385 ret__ = raw_cpu_read(pcp); \
386 raw_cpu_write(pcp, nval); \
387 raw_local_irq_restore(flags); \
388 ret__; \
389 })
390
391 #ifndef this_cpu_xchg
392 # ifndef this_cpu_xchg_1
393 # define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
394 # endif
395 # ifndef this_cpu_xchg_2
396 # define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
397 # endif
398 # ifndef this_cpu_xchg_4
399 # define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
400 # endif
401 # ifndef this_cpu_xchg_8
402 # define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
403 # endif
404 # define this_cpu_xchg(pcp, nval) \
405 __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
406 #endif
407
408 #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
409 ({ \
410 typeof(pcp) ret__; \
411 unsigned long flags; \
412 raw_local_irq_save(flags); \
413 ret__ = raw_cpu_read(pcp); \
414 if (ret__ == (oval)) \
415 raw_cpu_write(pcp, nval); \
416 raw_local_irq_restore(flags); \
417 ret__; \
418 })
419
420 #ifndef this_cpu_cmpxchg
421 # ifndef this_cpu_cmpxchg_1
422 # define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
423 # endif
424 # ifndef this_cpu_cmpxchg_2
425 # define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
426 # endif
427 # ifndef this_cpu_cmpxchg_4
428 # define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
429 # endif
430 # ifndef this_cpu_cmpxchg_8
431 # define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
432 # endif
433 # define this_cpu_cmpxchg(pcp, oval, nval) \
434 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
435 #endif
436
437 /*
438 * cmpxchg_double replaces two adjacent scalars at once. The first
439 * two parameters are per cpu variables which have to be of the same
440 * size. A truth value is returned to indicate success or failure
441 * (since a double register result is difficult to handle). There is
442 * very limited hardware support for these operations, so only certain
443 * sizes may work.
444 */
445 #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
446 ({ \
447 int ret__; \
448 unsigned long flags; \
449 raw_local_irq_save(flags); \
450 ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
451 oval1, oval2, nval1, nval2); \
452 raw_local_irq_restore(flags); \
453 ret__; \
454 })
455
456 #ifndef this_cpu_cmpxchg_double
457 # ifndef this_cpu_cmpxchg_double_1
458 # define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
459 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
460 # endif
461 # ifndef this_cpu_cmpxchg_double_2
462 # define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
463 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
464 # endif
465 # ifndef this_cpu_cmpxchg_double_4
466 # define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
467 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
468 # endif
469 # ifndef this_cpu_cmpxchg_double_8
470 # define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
471 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
472 # endif
473 # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
474 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
475 #endif
476
477 /*
478 * Generic percpu operations for contexts where we do not want to do
479 * any checks for preemptiosn.
480 *
481 * If there is no other protection through preempt disable and/or
482 * disabling interupts then one of these RMW operations can show unexpected
483 * behavior because the execution thread was rescheduled on another processor
484 * or an interrupt occurred and the same percpu variable was modified from
485 * the interrupt context.
486 */
487 #ifndef raw_cpu_read
488 # ifndef raw_cpu_read_1
489 # define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp)))
490 # endif
491 # ifndef raw_cpu_read_2
492 # define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp)))
493 # endif
494 # ifndef raw_cpu_read_4
495 # define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp)))
496 # endif
497 # ifndef raw_cpu_read_8
498 # define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp)))
499 # endif
500 # define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp))
501 #endif
502
503 #define raw_cpu_generic_to_op(pcp, val, op) \
504 do { \
505 *raw_cpu_ptr(&(pcp)) op val; \
506 } while (0)
507
508
509 #ifndef raw_cpu_write
510 # ifndef raw_cpu_write_1
511 # define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
512 # endif
513 # ifndef raw_cpu_write_2
514 # define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
515 # endif
516 # ifndef raw_cpu_write_4
517 # define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
518 # endif
519 # ifndef raw_cpu_write_8
520 # define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
521 # endif
522 # define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val))
523 #endif
524
525 #ifndef raw_cpu_add
526 # ifndef raw_cpu_add_1
527 # define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
528 # endif
529 # ifndef raw_cpu_add_2
530 # define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
531 # endif
532 # ifndef raw_cpu_add_4
533 # define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
534 # endif
535 # ifndef raw_cpu_add_8
536 # define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
537 # endif
538 # define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val))
539 #endif
540
541 #ifndef raw_cpu_sub
542 # define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val))
543 #endif
544
545 #ifndef raw_cpu_inc
546 # define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1)
547 #endif
548
549 #ifndef raw_cpu_dec
550 # define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1)
551 #endif
552
553 #ifndef raw_cpu_and
554 # ifndef raw_cpu_and_1
555 # define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
556 # endif
557 # ifndef raw_cpu_and_2
558 # define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
559 # endif
560 # ifndef raw_cpu_and_4
561 # define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
562 # endif
563 # ifndef raw_cpu_and_8
564 # define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
565 # endif
566 # define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val))
567 #endif
568
569 #ifndef raw_cpu_or
570 # ifndef raw_cpu_or_1
571 # define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
572 # endif
573 # ifndef raw_cpu_or_2
574 # define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
575 # endif
576 # ifndef raw_cpu_or_4
577 # define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
578 # endif
579 # ifndef raw_cpu_or_8
580 # define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
581 # endif
582 # define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val))
583 #endif
584
585 #define raw_cpu_generic_add_return(pcp, val) \
586 ({ \
587 raw_cpu_add(pcp, val); \
588 raw_cpu_read(pcp); \
589 })
590
591 #ifndef raw_cpu_add_return
592 # ifndef raw_cpu_add_return_1
593 # define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
594 # endif
595 # ifndef raw_cpu_add_return_2
596 # define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
597 # endif
598 # ifndef raw_cpu_add_return_4
599 # define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
600 # endif
601 # ifndef raw_cpu_add_return_8
602 # define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
603 # endif
604 # define raw_cpu_add_return(pcp, val) \
605 __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
606 #endif
607
608 #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val))
609 #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1)
610 #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1)
611
612 #define raw_cpu_generic_xchg(pcp, nval) \
613 ({ typeof(pcp) ret__; \
614 ret__ = raw_cpu_read(pcp); \
615 raw_cpu_write(pcp, nval); \
616 ret__; \
617 })
618
619 #ifndef raw_cpu_xchg
620 # ifndef raw_cpu_xchg_1
621 # define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
622 # endif
623 # ifndef raw_cpu_xchg_2
624 # define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
625 # endif
626 # ifndef raw_cpu_xchg_4
627 # define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
628 # endif
629 # ifndef raw_cpu_xchg_8
630 # define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
631 # endif
632 # define raw_cpu_xchg(pcp, nval) \
633 __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
634 #endif
635
636 #define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
637 ({ \
638 typeof(pcp) ret__; \
639 ret__ = raw_cpu_read(pcp); \
640 if (ret__ == (oval)) \
641 raw_cpu_write(pcp, nval); \
642 ret__; \
643 })
644
645 #ifndef raw_cpu_cmpxchg
646 # ifndef raw_cpu_cmpxchg_1
647 # define raw_cpu_cmpxchg_1(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
648 # endif
649 # ifndef raw_cpu_cmpxchg_2
650 # define raw_cpu_cmpxchg_2(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
651 # endif
652 # ifndef raw_cpu_cmpxchg_4
653 # define raw_cpu_cmpxchg_4(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
654 # endif
655 # ifndef raw_cpu_cmpxchg_8
656 # define raw_cpu_cmpxchg_8(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
657 # endif
658 # define raw_cpu_cmpxchg(pcp, oval, nval) \
659 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
660 #endif
661
662 #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
663 ({ \
664 int __ret = 0; \
665 if (raw_cpu_read(pcp1) == (oval1) && \
666 raw_cpu_read(pcp2) == (oval2)) { \
667 raw_cpu_write(pcp1, (nval1)); \
668 raw_cpu_write(pcp2, (nval2)); \
669 __ret = 1; \
670 } \
671 (__ret); \
672 })
673
674 #ifndef raw_cpu_cmpxchg_double
675 # ifndef raw_cpu_cmpxchg_double_1
676 # define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
677 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
678 # endif
679 # ifndef raw_cpu_cmpxchg_double_2
680 # define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
681 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
682 # endif
683 # ifndef raw_cpu_cmpxchg_double_4
684 # define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
685 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
686 # endif
687 # ifndef raw_cpu_cmpxchg_double_8
688 # define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
689 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
690 # endif
691 # define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
692 __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
693 #endif
694
695 /*
696 * Generic percpu operations for context that are safe from preemption/interrupts.
697 */
698 #ifndef __this_cpu_read
699 # define __this_cpu_read(pcp) \
700 (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
701 #endif
702
703 #ifndef __this_cpu_write
704 # define __this_cpu_write(pcp, val) \
705 do { __this_cpu_preempt_check("write"); \
706 __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \
707 } while (0)
708 #endif
709
710 #ifndef __this_cpu_add
711 # define __this_cpu_add(pcp, val) \
712 do { __this_cpu_preempt_check("add"); \
713 __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \
714 } while (0)
715 #endif
716
717 #ifndef __this_cpu_sub
718 # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
719 #endif
720
721 #ifndef __this_cpu_inc
722 # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
723 #endif
724
725 #ifndef __this_cpu_dec
726 # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
727 #endif
728
729 #ifndef __this_cpu_and
730 # define __this_cpu_and(pcp, val) \
731 do { __this_cpu_preempt_check("and"); \
732 __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \
733 } while (0)
734
735 #endif
736
737 #ifndef __this_cpu_or
738 # define __this_cpu_or(pcp, val) \
739 do { __this_cpu_preempt_check("or"); \
740 __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \
741 } while (0)
742 #endif
743
744 #ifndef __this_cpu_add_return
745 # define __this_cpu_add_return(pcp, val) \
746 (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
747 #endif
748
749 #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
750 #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
751 #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
752
753 #ifndef __this_cpu_xchg
754 # define __this_cpu_xchg(pcp, nval) \
755 (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
756 #endif
757
758 #ifndef __this_cpu_cmpxchg
759 # define __this_cpu_cmpxchg(pcp, oval, nval) \
760 (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
761 #endif
762
763 #ifndef __this_cpu_cmpxchg_double
764 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
765 (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
766 #endif
767
768 #endif /* __LINUX_PERCPU_H */
This page took 0.047492 seconds and 5 git commands to generate.