[PATCH] x86_64: TASK_SIZE fixes for compatibility mode processes
[deliverable/linux.git] / include / linux / smp.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_SMP_H
2#define __LINUX_SMP_H
3
4/*
5 * Generic SMP support
6 * Alan Cox. <alan@redhat.com>
7 */
8
9#include <linux/config.h>
10
11extern void cpu_idle(void);
12
13#ifdef CONFIG_SMP
14
15#include <linux/preempt.h>
16#include <linux/kernel.h>
17#include <linux/compiler.h>
18#include <linux/thread_info.h>
19#include <asm/smp.h>
20#include <asm/bug.h>
21
22/*
23 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
24 * (defined in asm header):
25 */
26
27/*
28 * stops all CPUs but the current one:
29 */
30extern void smp_send_stop(void);
31
32/*
33 * sends a 'reschedule' event to another CPU:
34 */
35extern void smp_send_reschedule(int cpu);
36
37
38/*
39 * Prepare machine for booting other CPUs.
40 */
41extern void smp_prepare_cpus(unsigned int max_cpus);
42
43/*
44 * Bring a CPU up
45 */
46extern int __cpu_up(unsigned int cpunum);
47
48/*
49 * Final polishing of CPUs
50 */
51extern void smp_cpus_done(unsigned int max_cpus);
52
53/*
54 * Call a function on all other processors
55 */
56extern int smp_call_function (void (*func) (void *info), void *info,
57 int retry, int wait);
58
59/*
60 * Call a function on all processors
61 */
62static inline int on_each_cpu(void (*func) (void *info), void *info,
63 int retry, int wait)
64{
65 int ret = 0;
66
67 preempt_disable();
68 ret = smp_call_function(func, info, retry, wait);
69 func(info);
70 preempt_enable();
71 return ret;
72}
73
74#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
75#define MSG_ALL 0x8001
76
77#define MSG_INVALIDATE_TLB 0x0001 /* Remote processor TLB invalidate */
78#define MSG_STOP_CPU 0x0002 /* Sent to shut down slave CPU's
79 * when rebooting
80 */
81#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/
82#define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */
83
84/*
85 * Mark the boot cpu "online" so that it can call console drivers in
86 * printk() and can access its per-cpu storage.
87 */
88void smp_prepare_boot_cpu(void);
89
90#else /* !SMP */
91
92/*
93 * These macros fold the SMP functionality into a single CPU system
94 */
95
96#if !defined(__smp_processor_id) || !defined(CONFIG_PREEMPT)
97# define smp_processor_id() 0
98#endif
99#define hard_smp_processor_id() 0
100#define smp_call_function(func,info,retry,wait) ({ 0; })
101#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
102static inline void smp_send_reschedule(int cpu) { }
103#define num_booting_cpus() 1
104#define smp_prepare_boot_cpu() do {} while (0)
105
106#endif /* !SMP */
107
108/*
109 * DEBUG_PREEMPT support: check whether smp_processor_id() is being
110 * used in a preemption-safe way.
111 *
112 * An architecture has to enable this debugging code explicitly.
113 * It can do so by renaming the smp_processor_id() macro to
114 * __smp_processor_id(). This should only be done after some minimal
115 * testing, because usually there are a number of false positives
116 * that an architecture will trigger.
117 *
118 * To fix a false positive (i.e. smp_processor_id() use that the
119 * debugging code reports but which use for some reason is legal),
120 * change the smp_processor_id() reference to _smp_processor_id(),
121 * which is the nondebug variant. NOTE: don't use this to hack around
122 * real bugs.
123 */
124#ifdef __smp_processor_id
125# if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
126 extern unsigned int smp_processor_id(void);
127# else
128# define smp_processor_id() __smp_processor_id()
129# endif
130# define _smp_processor_id() __smp_processor_id()
131#else
132# define _smp_processor_id() smp_processor_id()
133#endif
134
135#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
136#define put_cpu() preempt_enable()
137#define put_cpu_no_resched() preempt_enable_no_resched()
138
139#endif /* __LINUX_SMP_H */
This page took 0.04703 seconds and 5 git commands to generate.