Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[deliverable/linux.git] / arch / m32r / kernel / smp.c
1 /*
2 * linux/arch/m32r/kernel/smp.c
3 *
4 * M32R SMP support routines.
5 *
6 * Copyright (c) 2001, 2002 Hitoshi Yamamoto
7 *
8 * Taken from i386 version.
9 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
10 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
11 *
12 * This code is released under the GNU General Public License version 2 or
13 * later.
14 */
15
16 #undef DEBUG_SMP
17
18 #include <linux/irq.h>
19 #include <linux/interrupt.h>
20 #include <linux/spinlock.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/profile.h>
24 #include <linux/cpu.h>
25
26 #include <asm/cacheflush.h>
27 #include <asm/pgalloc.h>
28 #include <asm/atomic.h>
29 #include <asm/io.h>
30 #include <asm/mmu_context.h>
31 #include <asm/m32r.h>
32
33 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
34 /* Data structures and variables */
35 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
36
37 /*
38 * For flush_cache_all()
39 */
40 static DEFINE_SPINLOCK(flushcache_lock);
41 static volatile unsigned long flushcache_cpumask = 0;
42
43 /*
44 * For flush_tlb_others()
45 */
46 static volatile cpumask_t flush_cpumask;
47 static struct mm_struct *flush_mm;
48 static struct vm_area_struct *flush_vma;
49 static volatile unsigned long flush_va;
50 static DEFINE_SPINLOCK(tlbstate_lock);
51 #define FLUSH_ALL 0xffffffff
52
53 DECLARE_PER_CPU(int, prof_multiplier);
54 DECLARE_PER_CPU(int, prof_old_multiplier);
55 DECLARE_PER_CPU(int, prof_counter);
56
57 extern spinlock_t ipi_lock[];
58
59 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
60 /* Function Prototypes */
61 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
62
63 void smp_send_reschedule(int);
64 void smp_reschedule_interrupt(void);
65
66 void smp_flush_cache_all(void);
67 void smp_flush_cache_all_interrupt(void);
68
69 void smp_flush_tlb_all(void);
70 static void flush_tlb_all_ipi(void *);
71
72 void smp_flush_tlb_mm(struct mm_struct *);
73 void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
74 unsigned long);
75 void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
76 static void flush_tlb_others(cpumask_t, struct mm_struct *,
77 struct vm_area_struct *, unsigned long);
78 void smp_invalidate_interrupt(void);
79
80 void smp_send_stop(void);
81 static void stop_this_cpu(void *);
82
83 void smp_send_timer(void);
84 void smp_ipi_timer_interrupt(struct pt_regs *);
85 void smp_local_timer_interrupt(void);
86
87 static void send_IPI_allbutself(int, int);
88 static void send_IPI_mask(cpumask_t, int, int);
89 unsigned long send_IPI_mask_phys(cpumask_t, int, int);
90
91 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
92 /* Rescheduling request Routines */
93 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
94
95 /*==========================================================================*
96 * Name: smp_send_reschedule
97 *
98 * Description: This routine requests other CPU to execute rescheduling.
99 * 1.Send 'RESCHEDULE_IPI' to other CPU.
100 * Request other CPU to execute 'smp_reschedule_interrupt()'.
101 *
102 * Born on Date: 2002.02.05
103 *
104 * Arguments: cpu_id - Target CPU ID
105 *
106 * Returns: void (cannot fail)
107 *
108 * Modification log:
109 * Date Who Description
110 * ---------- --- --------------------------------------------------------
111 *
112 *==========================================================================*/
113 void smp_send_reschedule(int cpu_id)
114 {
115 WARN_ON(cpu_is_offline(cpu_id));
116 send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
117 }
118
119 /*==========================================================================*
120 * Name: smp_reschedule_interrupt
121 *
122 * Description: This routine executes on CPU which received
123 * 'RESCHEDULE_IPI'.
124 * Rescheduling is processed at the exit of interrupt
125 * operation.
126 *
127 * Born on Date: 2002.02.05
128 *
129 * Arguments: NONE
130 *
131 * Returns: void (cannot fail)
132 *
133 * Modification log:
134 * Date Who Description
135 * ---------- --- --------------------------------------------------------
136 *
137 *==========================================================================*/
138 void smp_reschedule_interrupt(void)
139 {
140 /* nothing to do */
141 }
142
143 /*==========================================================================*
144 * Name: smp_flush_cache_all
145 *
146 * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
147 * CPUs in the system.
148 *
149 * Born on Date: 2003-05-28
150 *
151 * Arguments: NONE
152 *
153 * Returns: void (cannot fail)
154 *
155 * Modification log:
156 * Date Who Description
157 * ---------- --- --------------------------------------------------------
158 *
159 *==========================================================================*/
160 void smp_flush_cache_all(void)
161 {
162 cpumask_t cpumask;
163 unsigned long *mask;
164
165 preempt_disable();
166 cpumask = cpu_online_map;
167 cpu_clear(smp_processor_id(), cpumask);
168 spin_lock(&flushcache_lock);
169 mask=cpus_addr(cpumask);
170 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
171 send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
172 _flush_cache_copyback_all();
173 while (flushcache_cpumask)
174 mb();
175 spin_unlock(&flushcache_lock);
176 preempt_enable();
177 }
178
179 void smp_flush_cache_all_interrupt(void)
180 {
181 _flush_cache_copyback_all();
182 clear_bit(smp_processor_id(), &flushcache_cpumask);
183 }
184
185 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
186 /* TLB flush request Routines */
187 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
188
189 /*==========================================================================*
190 * Name: smp_flush_tlb_all
191 *
192 * Description: This routine flushes all processes TLBs.
193 * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
194 * 2.Execute 'do_flush_tlb_all_local()'.
195 *
196 * Born on Date: 2002.02.05
197 *
198 * Arguments: NONE
199 *
200 * Returns: void (cannot fail)
201 *
202 * Modification log:
203 * Date Who Description
204 * ---------- --- --------------------------------------------------------
205 *
206 *==========================================================================*/
207 void smp_flush_tlb_all(void)
208 {
209 unsigned long flags;
210
211 preempt_disable();
212 local_irq_save(flags);
213 __flush_tlb_all();
214 local_irq_restore(flags);
215 smp_call_function(flush_tlb_all_ipi, NULL, 1);
216 preempt_enable();
217 }
218
219 /*==========================================================================*
220 * Name: flush_tlb_all_ipi
221 *
222 * Description: This routine flushes all local TLBs.
223 * 1.Execute 'do_flush_tlb_all_local()'.
224 *
225 * Born on Date: 2002.02.05
226 *
227 * Arguments: *info - not used
228 *
229 * Returns: void (cannot fail)
230 *
231 * Modification log:
232 * Date Who Description
233 * ---------- --- --------------------------------------------------------
234 *
235 *==========================================================================*/
236 static void flush_tlb_all_ipi(void *info)
237 {
238 __flush_tlb_all();
239 }
240
241 /*==========================================================================*
242 * Name: smp_flush_tlb_mm
243 *
244 * Description: This routine flushes the specified mm context TLB's.
245 *
246 * Born on Date: 2002.02.05
247 *
248 * Arguments: *mm - a pointer to the mm struct for flush TLB
249 *
250 * Returns: void (cannot fail)
251 *
252 * Modification log:
253 * Date Who Description
254 * ---------- --- --------------------------------------------------------
255 *
256 *==========================================================================*/
257 void smp_flush_tlb_mm(struct mm_struct *mm)
258 {
259 int cpu_id;
260 cpumask_t cpu_mask;
261 unsigned long *mmc;
262 unsigned long flags;
263
264 preempt_disable();
265 cpu_id = smp_processor_id();
266 mmc = &mm->context[cpu_id];
267 cpu_mask = mm->cpu_vm_mask;
268 cpu_clear(cpu_id, cpu_mask);
269
270 if (*mmc != NO_CONTEXT) {
271 local_irq_save(flags);
272 *mmc = NO_CONTEXT;
273 if (mm == current->mm)
274 activate_context(mm);
275 else
276 cpu_clear(cpu_id, mm->cpu_vm_mask);
277 local_irq_restore(flags);
278 }
279 if (!cpus_empty(cpu_mask))
280 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
281
282 preempt_enable();
283 }
284
285 /*==========================================================================*
286 * Name: smp_flush_tlb_range
287 *
288 * Description: This routine flushes a range of pages.
289 *
290 * Born on Date: 2002.02.05
291 *
292 * Arguments: *mm - a pointer to the mm struct for flush TLB
293 * start - not used
294 * end - not used
295 *
296 * Returns: void (cannot fail)
297 *
298 * Modification log:
299 * Date Who Description
300 * ---------- --- --------------------------------------------------------
301 *
302 *==========================================================================*/
303 void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
304 unsigned long end)
305 {
306 smp_flush_tlb_mm(vma->vm_mm);
307 }
308
309 /*==========================================================================*
310 * Name: smp_flush_tlb_page
311 *
312 * Description: This routine flushes one page.
313 *
314 * Born on Date: 2002.02.05
315 *
316 * Arguments: *vma - a pointer to the vma struct include va
317 * va - virtual address for flush TLB
318 *
319 * Returns: void (cannot fail)
320 *
321 * Modification log:
322 * Date Who Description
323 * ---------- --- --------------------------------------------------------
324 *
325 *==========================================================================*/
326 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
327 {
328 struct mm_struct *mm = vma->vm_mm;
329 int cpu_id;
330 cpumask_t cpu_mask;
331 unsigned long *mmc;
332 unsigned long flags;
333
334 preempt_disable();
335 cpu_id = smp_processor_id();
336 mmc = &mm->context[cpu_id];
337 cpu_mask = mm->cpu_vm_mask;
338 cpu_clear(cpu_id, cpu_mask);
339
340 #ifdef DEBUG_SMP
341 if (!mm)
342 BUG();
343 #endif
344
345 if (*mmc != NO_CONTEXT) {
346 local_irq_save(flags);
347 va &= PAGE_MASK;
348 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
349 __flush_tlb_page(va);
350 local_irq_restore(flags);
351 }
352 if (!cpus_empty(cpu_mask))
353 flush_tlb_others(cpu_mask, mm, vma, va);
354
355 preempt_enable();
356 }
357
358 /*==========================================================================*
359 * Name: flush_tlb_others
360 *
361 * Description: This routine requests other CPU to execute flush TLB.
362 * 1.Setup parameters.
363 * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
364 * Request other CPU to execute 'smp_invalidate_interrupt()'.
365 * 3.Wait for other CPUs operation finished.
366 *
367 * Born on Date: 2002.02.05
368 *
369 * Arguments: cpumask - bitmap of target CPUs
370 * *mm - a pointer to the mm struct for flush TLB
371 * *vma - a pointer to the vma struct include va
372 * va - virtual address for flush TLB
373 *
374 * Returns: void (cannot fail)
375 *
376 * Modification log:
377 * Date Who Description
378 * ---------- --- --------------------------------------------------------
379 *
380 *==========================================================================*/
381 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
382 struct vm_area_struct *vma, unsigned long va)
383 {
384 unsigned long *mask;
385 #ifdef DEBUG_SMP
386 unsigned long flags;
387 __save_flags(flags);
388 if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
389 BUG();
390 #endif /* DEBUG_SMP */
391
392 /*
393 * A couple of (to be removed) sanity checks:
394 *
395 * - we do not send IPIs to not-yet booted CPUs.
396 * - current CPU must not be in mask
397 * - mask must exist :)
398 */
399 BUG_ON(cpus_empty(cpumask));
400
401 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
402 BUG_ON(!mm);
403
404 /* If a CPU which we ran on has gone down, OK. */
405 cpus_and(cpumask, cpumask, cpu_online_map);
406 if (cpus_empty(cpumask))
407 return;
408
409 /*
410 * i'm not happy about this global shared spinlock in the
411 * MM hot path, but we'll see how contended it is.
412 * Temporarily this turns IRQs off, so that lockups are
413 * detected by the NMI watchdog.
414 */
415 spin_lock(&tlbstate_lock);
416
417 flush_mm = mm;
418 flush_vma = vma;
419 flush_va = va;
420 mask=cpus_addr(cpumask);
421 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
422
423 /*
424 * We have to send the IPI only to
425 * CPUs affected.
426 */
427 send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
428
429 while (!cpus_empty(flush_cpumask)) {
430 /* nothing. lockup detection does not belong here */
431 mb();
432 }
433
434 flush_mm = NULL;
435 flush_vma = NULL;
436 flush_va = 0;
437 spin_unlock(&tlbstate_lock);
438 }
439
440 /*==========================================================================*
441 * Name: smp_invalidate_interrupt
442 *
443 * Description: This routine executes on CPU which received
444 * 'INVALIDATE_TLB_IPI'.
445 * 1.Flush local TLB.
446 * 2.Report flush TLB process was finished.
447 *
448 * Born on Date: 2002.02.05
449 *
450 * Arguments: NONE
451 *
452 * Returns: void (cannot fail)
453 *
454 * Modification log:
455 * Date Who Description
456 * ---------- --- --------------------------------------------------------
457 *
458 *==========================================================================*/
459 void smp_invalidate_interrupt(void)
460 {
461 int cpu_id = smp_processor_id();
462 unsigned long *mmc = &flush_mm->context[cpu_id];
463
464 if (!cpu_isset(cpu_id, flush_cpumask))
465 return;
466
467 if (flush_va == FLUSH_ALL) {
468 *mmc = NO_CONTEXT;
469 if (flush_mm == current->active_mm)
470 activate_context(flush_mm);
471 else
472 cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
473 } else {
474 unsigned long va = flush_va;
475
476 if (*mmc != NO_CONTEXT) {
477 va &= PAGE_MASK;
478 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
479 __flush_tlb_page(va);
480 }
481 }
482 cpu_clear(cpu_id, flush_cpumask);
483 }
484
485 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
486 /* Stop CPU request Routines */
487 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
488
489 /*==========================================================================*
490 * Name: smp_send_stop
491 *
492 * Description: This routine requests stop all CPUs.
493 * 1.Request other CPU to execute 'stop_this_cpu()'.
494 *
495 * Born on Date: 2002.02.05
496 *
497 * Arguments: NONE
498 *
499 * Returns: void (cannot fail)
500 *
501 * Modification log:
502 * Date Who Description
503 * ---------- --- --------------------------------------------------------
504 *
505 *==========================================================================*/
506 void smp_send_stop(void)
507 {
508 smp_call_function(stop_this_cpu, NULL, 0);
509 }
510
511 /*==========================================================================*
512 * Name: stop_this_cpu
513 *
514 * Description: This routine halt CPU.
515 *
516 * Born on Date: 2002.02.05
517 *
518 * Arguments: NONE
519 *
520 * Returns: void (cannot fail)
521 *
522 * Modification log:
523 * Date Who Description
524 * ---------- --- --------------------------------------------------------
525 *
526 *==========================================================================*/
527 static void stop_this_cpu(void *dummy)
528 {
529 int cpu_id = smp_processor_id();
530
531 /*
532 * Remove this CPU:
533 */
534 cpu_clear(cpu_id, cpu_online_map);
535
536 /*
537 * PSW IE = 1;
538 * IMASK = 0;
539 * goto SLEEP
540 */
541 local_irq_disable();
542 outl(0, M32R_ICU_IMASK_PORTL);
543 inl(M32R_ICU_IMASK_PORTL); /* dummy read */
544 local_irq_enable();
545
546 for ( ; ; );
547 }
548
549 void arch_send_call_function_ipi(cpumask_t mask)
550 {
551 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
552 }
553
554 void arch_send_call_function_single_ipi(int cpu)
555 {
556 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0);
557 }
558
559 /*==========================================================================*
560 * Name: smp_call_function_interrupt
561 *
562 * Description: This routine executes on CPU which received
563 * 'CALL_FUNCTION_IPI'.
564 *
565 * Born on Date: 2002.02.05
566 *
567 * Arguments: NONE
568 *
569 * Returns: void (cannot fail)
570 *
571 * Modification log:
572 * Date Who Description
573 * ---------- --- --------------------------------------------------------
574 *
575 *==========================================================================*/
576 void smp_call_function_interrupt(void)
577 {
578 irq_enter();
579 generic_smp_call_function_interrupt();
580 irq_exit();
581 }
582
583 void smp_call_function_single_interrupt(void)
584 {
585 irq_enter();
586 generic_smp_call_function_single_interrupt();
587 irq_exit();
588 }
589
590 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
591 /* Timer Routines */
592 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
593
594 /*==========================================================================*
595 * Name: smp_send_timer
596 *
597 * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
598 * in the system.
599 *
600 * Born on Date: 2002.02.05
601 *
602 * Arguments: NONE
603 *
604 * Returns: void (cannot fail)
605 *
606 * Modification log:
607 * Date Who Description
608 * ---------- --- --------------------------------------------------------
609 *
610 *==========================================================================*/
611 void smp_send_timer(void)
612 {
613 send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
614 }
615
616 /*==========================================================================*
617 * Name: smp_send_timer
618 *
619 * Description: This routine executes on CPU which received
620 * 'LOCAL_TIMER_IPI'.
621 *
622 * Born on Date: 2002.02.05
623 *
624 * Arguments: *regs - a pointer to the saved regster info
625 *
626 * Returns: void (cannot fail)
627 *
628 * Modification log:
629 * Date Who Description
630 * ---------- --- --------------------------------------------------------
631 *
632 *==========================================================================*/
633 void smp_ipi_timer_interrupt(struct pt_regs *regs)
634 {
635 struct pt_regs *old_regs;
636 old_regs = set_irq_regs(regs);
637 irq_enter();
638 smp_local_timer_interrupt();
639 irq_exit();
640 set_irq_regs(old_regs);
641 }
642
643 /*==========================================================================*
644 * Name: smp_local_timer_interrupt
645 *
646 * Description: Local timer interrupt handler. It does both profiling and
647 * process statistics/rescheduling.
648 * We do profiling in every local tick, statistics/rescheduling
649 * happen only every 'profiling multiplier' ticks. The default
650 * multiplier is 1 and it can be changed by writing the new
651 * multiplier value into /proc/profile.
652 *
653 * Born on Date: 2002.02.05
654 *
655 * Arguments: *regs - a pointer to the saved regster info
656 *
657 * Returns: void (cannot fail)
658 *
659 * Original: arch/i386/kernel/apic.c
660 *
661 * Modification log:
662 * Date Who Description
663 * ---------- --- --------------------------------------------------------
664 * 2003-06-24 hy use per_cpu structure.
665 *==========================================================================*/
666 void smp_local_timer_interrupt(void)
667 {
668 int user = user_mode(get_irq_regs());
669 int cpu_id = smp_processor_id();
670
671 /*
672 * The profiling function is SMP safe. (nothing can mess
673 * around with "current", and the profiling counters are
674 * updated with atomic operations). This is especially
675 * useful with a profiling multiplier != 1
676 */
677
678 profile_tick(CPU_PROFILING);
679
680 if (--per_cpu(prof_counter, cpu_id) <= 0) {
681 /*
682 * The multiplier may have changed since the last time we got
683 * to this point as a result of the user writing to
684 * /proc/profile. In this case we need to adjust the APIC
685 * timer accordingly.
686 *
687 * Interrupts are already masked off at this point.
688 */
689 per_cpu(prof_counter, cpu_id)
690 = per_cpu(prof_multiplier, cpu_id);
691 if (per_cpu(prof_counter, cpu_id)
692 != per_cpu(prof_old_multiplier, cpu_id))
693 {
694 per_cpu(prof_old_multiplier, cpu_id)
695 = per_cpu(prof_counter, cpu_id);
696 }
697
698 update_process_times(user);
699 }
700 }
701
702 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
703 /* Send IPI Routines */
704 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
705
706 /*==========================================================================*
707 * Name: send_IPI_allbutself
708 *
709 * Description: This routine sends a IPI to all other CPUs in the system.
710 *
711 * Born on Date: 2002.02.05
712 *
713 * Arguments: ipi_num - Number of IPI
714 * try - 0 : Send IPI certainly.
715 * !0 : The following IPI is not sent when Target CPU
716 * has not received the before IPI.
717 *
718 * Returns: void (cannot fail)
719 *
720 * Modification log:
721 * Date Who Description
722 * ---------- --- --------------------------------------------------------
723 *
724 *==========================================================================*/
725 static void send_IPI_allbutself(int ipi_num, int try)
726 {
727 cpumask_t cpumask;
728
729 cpumask = cpu_online_map;
730 cpu_clear(smp_processor_id(), cpumask);
731
732 send_IPI_mask(cpumask, ipi_num, try);
733 }
734
735 /*==========================================================================*
736 * Name: send_IPI_mask
737 *
738 * Description: This routine sends a IPI to CPUs in the system.
739 *
740 * Born on Date: 2002.02.05
741 *
742 * Arguments: cpu_mask - Bitmap of target CPUs logical ID
743 * ipi_num - Number of IPI
744 * try - 0 : Send IPI certainly.
745 * !0 : The following IPI is not sent when Target CPU
746 * has not received the before IPI.
747 *
748 * Returns: void (cannot fail)
749 *
750 * Modification log:
751 * Date Who Description
752 * ---------- --- --------------------------------------------------------
753 *
754 *==========================================================================*/
755 static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
756 {
757 cpumask_t physid_mask, tmp;
758 int cpu_id, phys_id;
759 int num_cpus = num_online_cpus();
760
761 if (num_cpus <= 1) /* NO MP */
762 return;
763
764 cpus_and(tmp, cpumask, cpu_online_map);
765 BUG_ON(!cpus_equal(cpumask, tmp));
766
767 physid_mask = CPU_MASK_NONE;
768 for_each_cpu_mask(cpu_id, cpumask){
769 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
770 cpu_set(phys_id, physid_mask);
771 }
772
773 send_IPI_mask_phys(physid_mask, ipi_num, try);
774 }
775
776 /*==========================================================================*
777 * Name: send_IPI_mask_phys
778 *
779 * Description: This routine sends a IPI to other CPUs in the system.
780 *
781 * Born on Date: 2002.02.05
782 *
783 * Arguments: cpu_mask - Bitmap of target CPUs physical ID
784 * ipi_num - Number of IPI
785 * try - 0 : Send IPI certainly.
786 * !0 : The following IPI is not sent when Target CPU
787 * has not received the before IPI.
788 *
789 * Returns: IPICRi regster value.
790 *
791 * Modification log:
792 * Date Who Description
793 * ---------- --- --------------------------------------------------------
794 *
795 *==========================================================================*/
796 unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
797 int try)
798 {
799 spinlock_t *ipilock;
800 volatile unsigned long *ipicr_addr;
801 unsigned long ipicr_val;
802 unsigned long my_physid_mask;
803 unsigned long mask = cpus_addr(physid_mask)[0];
804
805
806 if (mask & ~physids_coerce(phys_cpu_present_map))
807 BUG();
808 if (ipi_num >= NR_IPIS)
809 BUG();
810
811 mask <<= IPI_SHIFT;
812 ipilock = &ipi_lock[ipi_num];
813 ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
814 + (ipi_num << 2));
815 my_physid_mask = ~(1 << smp_processor_id());
816
817 /*
818 * lock ipi_lock[i]
819 * check IPICRi == 0
820 * write IPICRi (send IPIi)
821 * unlock ipi_lock[i]
822 */
823 spin_lock(ipilock);
824 __asm__ __volatile__ (
825 ";; CHECK IPICRi == 0 \n\t"
826 ".fillinsn \n"
827 "1: \n\t"
828 "ld %0, @%1 \n\t"
829 "and %0, %4 \n\t"
830 "beqz %0, 2f \n\t"
831 "bnez %3, 3f \n\t"
832 "bra 1b \n\t"
833 ";; WRITE IPICRi (send IPIi) \n\t"
834 ".fillinsn \n"
835 "2: \n\t"
836 "st %2, @%1 \n\t"
837 ".fillinsn \n"
838 "3: \n\t"
839 : "=&r"(ipicr_val)
840 : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
841 : "memory"
842 );
843 spin_unlock(ipilock);
844
845 return ipicr_val;
846 }
This page took 0.050843 seconds and 5 git commands to generate.