Commit | Line | Data |
---|---|---|
39b8d525 RB |
1 | /* |
2 | * Count register synchronisation. | |
3 | * | |
eb9b5141 | 4 | * All CPUs will have their count registers synchronised to the CPU0 next time |
39b8d525 RB |
5 | * value. This can cause a small timewarp for CPU0. All other CPU's should |
6 | * not have done anything significant (but they may have had interrupts | |
7 | * enabled briefly - prom_smp_finish() should not be responsible for enabling | |
8 | * interrupts...) | |
39b8d525 RB |
9 | */ |
10 | ||
11 | #include <linux/kernel.h> | |
39b8d525 | 12 | #include <linux/irqflags.h> |
eb9b5141 | 13 | #include <linux/cpumask.h> |
39b8d525 | 14 | |
eb9b5141 | 15 | #include <asm/r4k-timer.h> |
60063497 | 16 | #include <linux/atomic.h> |
39b8d525 | 17 | #include <asm/barrier.h> |
39b8d525 RB |
18 | #include <asm/mipsregs.h> |
19 | ||
078a55fc PG |
20 | static atomic_t count_start_flag = ATOMIC_INIT(0); |
21 | static atomic_t count_count_start = ATOMIC_INIT(0); | |
22 | static atomic_t count_count_stop = ATOMIC_INIT(0); | |
23 | static atomic_t count_reference = ATOMIC_INIT(0); | |
39b8d525 | 24 | |
70342287 | 25 | #define COUNTON 100 |
39b8d525 RB |
26 | #define NR_LOOPS 5 |
27 | ||
078a55fc | 28 | void synchronise_count_master(int cpu) |
39b8d525 RB |
29 | { |
30 | int i; | |
31 | unsigned long flags; | |
32 | unsigned int initcount; | |
39b8d525 | 33 | |
cf9bfe55 | 34 | printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); |
39b8d525 RB |
35 | |
36 | local_irq_save(flags); | |
37 | ||
38 | /* | |
39 | * Notify the slaves that it's time to start | |
40 | */ | |
eb9b5141 | 41 | atomic_set(&count_reference, read_c0_count()); |
cf9bfe55 | 42 | atomic_set(&count_start_flag, cpu); |
39b8d525 RB |
43 | smp_wmb(); |
44 | ||
eb9b5141 TA |
45 | /* Count will be initialised to current timer for all CPU's */ |
46 | initcount = read_c0_count(); | |
39b8d525 RB |
47 | |
48 | /* | |
49 | * We loop a few times to get a primed instruction cache, | |
50 | * then the last pass is more or less synchronised and | |
51 | * the master and slaves each set their cycle counters to a known | |
52 | * value all at once. This reduces the chance of having random offsets | |
53 | * between the processors, and guarantees that the maximum | |
54 | * delay between the cycle counters is never bigger than | |
55 | * the latency of information-passing (cachelines) between | |
56 | * two CPUs. | |
57 | */ | |
58 | ||
39b8d525 | 59 | for (i = 0; i < NR_LOOPS; i++) { |
cf9bfe55 J |
60 | /* slaves loop on '!= 2' */ |
61 | while (atomic_read(&count_count_start) != 1) | |
39b8d525 RB |
62 | mb(); |
63 | atomic_set(&count_count_stop, 0); | |
64 | smp_wmb(); | |
65 | ||
66 | /* this lets the slaves write their count register */ | |
67 | atomic_inc(&count_count_start); | |
68 | ||
69 | /* | |
70 | * Everyone initialises count in the last loop: | |
71 | */ | |
72 | if (i == NR_LOOPS-1) | |
73 | write_c0_count(initcount); | |
74 | ||
75 | /* | |
76 | * Wait for all slaves to leave the synchronization point: | |
77 | */ | |
cf9bfe55 | 78 | while (atomic_read(&count_count_stop) != 1) |
39b8d525 RB |
79 | mb(); |
80 | atomic_set(&count_count_start, 0); | |
81 | smp_wmb(); | |
82 | atomic_inc(&count_count_stop); | |
83 | } | |
84 | /* Arrange for an interrupt in a short while */ | |
85 | write_c0_compare(read_c0_count() + COUNTON); | |
cf9bfe55 | 86 | atomic_set(&count_start_flag, 0); |
39b8d525 RB |
87 | |
88 | local_irq_restore(flags); | |
89 | ||
90 | /* | |
91 | * i386 code reported the skew here, but the | |
92 | * count registers were almost certainly out of sync | |
93 | * so no point in alarming people | |
94 | */ | |
95 | printk("done.\n"); | |
96 | } | |
97 | ||
078a55fc | 98 | void synchronise_count_slave(int cpu) |
39b8d525 RB |
99 | { |
100 | int i; | |
39b8d525 | 101 | unsigned int initcount; |
39b8d525 | 102 | |
39b8d525 RB |
103 | /* |
104 | * Not every cpu is online at the time this gets called, | |
105 | * so we first wait for the master to say everyone is ready | |
106 | */ | |
107 | ||
cf9bfe55 | 108 | while (atomic_read(&count_start_flag) != cpu) |
39b8d525 RB |
109 | mb(); |
110 | ||
eb9b5141 TA |
111 | /* Count will be initialised to next expire for all CPU's */ |
112 | initcount = atomic_read(&count_reference); | |
39b8d525 | 113 | |
39b8d525 RB |
114 | for (i = 0; i < NR_LOOPS; i++) { |
115 | atomic_inc(&count_count_start); | |
cf9bfe55 | 116 | while (atomic_read(&count_count_start) != 2) |
39b8d525 RB |
117 | mb(); |
118 | ||
119 | /* | |
120 | * Everyone initialises count in the last loop: | |
121 | */ | |
122 | if (i == NR_LOOPS-1) | |
123 | write_c0_count(initcount); | |
124 | ||
125 | atomic_inc(&count_count_stop); | |
cf9bfe55 | 126 | while (atomic_read(&count_count_stop) != 2) |
39b8d525 RB |
127 | mb(); |
128 | } | |
129 | /* Arrange for an interrupt in a short while */ | |
130 | write_c0_compare(read_c0_count() + COUNTON); | |
39b8d525 RB |
131 | } |
132 | #undef NR_LOOPS |