1 /* Copyright (C) 2004 Mips Technologies, Inc */
3 #include <linux/kernel.h>
4 #include <linux/sched.h>
5 #include <linux/cpumask.h>
6 #include <linux/interrupt.h>
9 #include <asm/processor.h>
10 #include <asm/atomic.h>
11 #include <asm/system.h>
12 #include <asm/hardirq.h>
13 #include <asm/hazards.h>
14 #include <asm/mmu_context.h>
16 #include <asm/mipsregs.h>
17 #include <asm/cacheflush.h>
19 #include <asm/addrspace.h>
21 #include <asm/smtc_ipi.h>
22 #include <asm/smtc_proc.h>
25 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
29 * MIPSCPU_INT_BASE is identically defined in both
30 * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
31 * but as yet there's no properly organized include structure that
32 * will ensure that the right *int.h file will be included for a
33 * given platform build.
36 #define MIPSCPU_INT_BASE 16
38 #define MIPS_CPU_IPI_IRQ 1
40 #define LOCK_MT_PRA() \
41 local_irq_save(flags); \
44 #define UNLOCK_MT_PRA() \
46 local_irq_restore(flags)
48 #define LOCK_CORE_PRA() \
49 local_irq_save(flags); \
52 #define UNLOCK_CORE_PRA() \
54 local_irq_restore(flags)
57 * Data structures purely associated with SMTC parallelism
62 * Table for tracking ASIDs whose lifetime is prolonged.
65 asiduse smtc_live_asid
[MAX_SMTC_TLBS
][MAX_SMTC_ASIDS
];
68 * Clock interrupt "latch" buffers, per "CPU"
71 unsigned int ipi_timer_latch
[NR_CPUS
];
74 * Number of InterProcessor Interupt (IPI) message buffers to allocate
77 #define IPIBUF_PER_CPU 4
79 struct smtc_ipi_q IPIQ
[NR_CPUS
];
80 struct smtc_ipi_q freeIPIq
;
83 /* Forward declarations */
85 void ipi_decode(struct smtc_ipi
*);
86 void post_direct_ipi(int cpu
, struct smtc_ipi
*pipi
);
87 void setup_cross_vpe_interrupts(void);
88 void init_smtc_stats(void);
90 /* Global SMTC Status */
92 unsigned int smtc_status
= 0;
94 /* Boot command line configuration overrides */
96 static int vpelimit
= 0;
97 static int tclimit
= 0;
98 static int ipibuffers
= 0;
99 static int nostlb
= 0;
100 static int asidmask
= 0;
101 unsigned long smtc_asid_mask
= 0xff;
103 static int __init
maxvpes(char *str
)
105 get_option(&str
, &vpelimit
);
109 static int __init
maxtcs(char *str
)
111 get_option(&str
, &tclimit
);
115 static int __init
ipibufs(char *str
)
117 get_option(&str
, &ipibuffers
);
121 static int __init
stlb_disable(char *s
)
127 static int __init
asidmask_set(char *str
)
129 get_option(&str
, &asidmask
);
139 smtc_asid_mask
= (unsigned long)asidmask
;
142 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask
);
147 __setup("maxvpes=", maxvpes
);
148 __setup("maxtcs=", maxtcs
);
149 __setup("ipibufs=", ipibufs
);
150 __setup("nostlb", stlb_disable
);
151 __setup("asidmask=", asidmask_set
);
153 /* Enable additional debug checks before going into CPU idle loop */
154 #define SMTC_IDLE_HOOK_DEBUG
156 #ifdef SMTC_IDLE_HOOK_DEBUG
158 static int hang_trig
= 0;
160 static int __init
hangtrig_enable(char *s
)
167 __setup("hangtrig", hangtrig_enable
);
169 #define DEFAULT_BLOCKED_IPI_LIMIT 32
171 static int timerq_limit
= DEFAULT_BLOCKED_IPI_LIMIT
;
173 static int __init
tintq(char *str
)
175 get_option(&str
, &timerq_limit
);
179 __setup("tintq=", tintq
);
181 int imstuckcount
[2][8];
182 /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
183 int vpemask
[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}};
184 int tcnoprog
[NR_CPUS
];
185 static atomic_t idle_hook_initialized
= {0};
186 static int clock_hang_reported
[NR_CPUS
];
188 #endif /* SMTC_IDLE_HOOK_DEBUG */
190 /* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
192 void __init
sanitize_tlb_entries(void)
194 printk("Deprecated sanitize_tlb_entries() invoked\n");
199 * Configure shared TLB - VPC configuration bit must be set by caller
202 void smtc_configure_tlb(void)
205 unsigned long mvpconf0
;
206 unsigned long config1val
;
208 /* Set up ASID preservation table */
209 for (vpes
=0; vpes
<MAX_SMTC_TLBS
; vpes
++) {
210 for(i
= 0; i
< MAX_SMTC_ASIDS
; i
++) {
211 smtc_live_asid
[vpes
][i
] = 0;
214 mvpconf0
= read_c0_mvpconf0();
216 if ((vpes
= ((mvpconf0
& MVPCONF0_PVPE
)
217 >> MVPCONF0_PVPE_SHIFT
) + 1) > 1) {
218 /* If we have multiple VPEs, try to share the TLB */
219 if ((mvpconf0
& MVPCONF0_TLBS
) && !nostlb
) {
221 * If TLB sizing is programmable, shared TLB
222 * size is the total available complement.
223 * Otherwise, we have to take the sum of all
224 * static VPE TLB entries.
226 if ((tlbsiz
= ((mvpconf0
& MVPCONF0_PTLBE
)
227 >> MVPCONF0_PTLBE_SHIFT
)) == 0) {
229 * If there's more than one VPE, there had better
230 * be more than one TC, because we need one to bind
231 * to each VPE in turn to be able to read
232 * its configuration state!
235 /* Stop the TC from doing anything foolish */
236 write_tc_c0_tchalt(TCHALT_H
);
238 /* No need to un-Halt - that happens later anyway */
239 for (i
=0; i
< vpes
; i
++) {
240 write_tc_c0_tcbind(i
);
242 * To be 100% sure we're really getting the right
243 * information, we exit the configuration state
244 * and do an IHB after each rebinding.
247 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC
);
250 * Only count if the MMU Type indicated is TLB
252 if (((read_vpe_c0_config() & MIPS_CONF_MT
) >> 7) == 1) {
253 config1val
= read_vpe_c0_config1();
254 tlbsiz
+= ((config1val
>> 25) & 0x3f) + 1;
257 /* Put core back in configuration state */
259 read_c0_mvpcontrol() | MVPCONTROL_VPC
);
263 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB
);
267 * Setup kernel data structures to use software total,
268 * rather than read the per-VPE Config1 value. The values
269 * for "CPU 0" gets copied to all the other CPUs as part
270 * of their initialization in smtc_cpu_setup().
273 tlbsiz
= tlbsiz
& 0x3f; /* MIPS32 limits TLB indices to 64 */
274 cpu_data
[0].tlbsize
= tlbsiz
;
275 smtc_status
|= SMTC_TLB_SHARED
;
277 printk("TLB of %d entry pairs shared by %d VPEs\n",
280 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
287 * Incrementally build the CPU map out of constituent MIPS MT cores,
288 * using the specified available VPEs and TCs. Plaform code needs
289 * to ensure that each MIPS MT core invokes this routine on reset,
292 * This version of the build_cpu_map and prepare_cpus routines assumes
293 * that *all* TCs of a MIPS MT core will be used for Linux, and that
294 * they will be spread across *all* available VPEs (to minimise the
295 * loss of efficiency due to exception service serialization).
296 * An improved version would pick up configuration information and
297 * possibly leave some TCs/VPEs as "slave" processors.
299 * Use c0_MVPConf0 to find out how many TCs are available, setting up
300 * phys_cpu_present_map and the logical/physical mappings.
303 int __init
mipsmt_build_cpu_map(int start_cpu_slot
)
308 * The CPU map isn't actually used for anything at this point,
309 * so it's not clear what else we should do apart from set
310 * everything up so that "logical" = "physical".
312 ntcs
= ((read_c0_mvpconf0() & MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
313 for (i
=start_cpu_slot
; i
<NR_CPUS
&& i
<ntcs
; i
++) {
314 cpu_set(i
, phys_cpu_present_map
);
315 __cpu_number_map
[i
] = i
;
316 __cpu_logical_map
[i
] = i
;
318 /* Initialize map of CPUs with FPUs */
319 cpus_clear(mt_fpu_cpumask
);
321 /* One of those TC's is the one booting, and not a secondary... */
322 printk("%i available secondary CPU TC(s)\n", i
- 1);
328 * Common setup before any secondaries are started
329 * Make sure all CPU's are in a sensible state before we boot any of the
332 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
333 * as possible across the available VPEs.
336 static void smtc_tc_setup(int vpe
, int tc
, int cpu
)
339 write_tc_c0_tchalt(TCHALT_H
);
341 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
342 & ~(TCSTATUS_TKSU
| TCSTATUS_DA
| TCSTATUS_IXMT
))
344 write_tc_c0_tccontext(0);
346 write_tc_c0_tcbind(vpe
);
347 /* In general, all TCs should have the same cpu_data indications */
348 memcpy(&cpu_data
[cpu
], &cpu_data
[0], sizeof(struct cpuinfo_mips
));
349 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
350 if (cpu_data
[0].cputype
== CPU_34K
)
351 cpu_data
[cpu
].options
&= ~MIPS_CPU_FPU
;
352 cpu_data
[cpu
].vpe_id
= vpe
;
353 cpu_data
[cpu
].tc_id
= tc
;
357 void mipsmt_prepare_cpus(void)
359 int i
, vpe
, tc
, ntc
, nvpe
, tcpervpe
, slop
, cpu
;
363 struct smtc_ipi
*pipi
;
365 /* disable interrupts so we can disable MT */
366 local_irq_save(flags
);
367 /* disable MT so we can configure */
371 spin_lock_init(&freeIPIq
.lock
);
374 * We probably don't have as many VPEs as we do SMP "CPUs",
375 * but it's possible - and in any case we'll never use more!
377 for (i
=0; i
<NR_CPUS
; i
++) {
378 IPIQ
[i
].head
= IPIQ
[i
].tail
= NULL
;
379 spin_lock_init(&IPIQ
[i
].lock
);
381 ipi_timer_latch
[i
] = 0;
384 /* cpu_data index starts at zero */
386 cpu_data
[cpu
].vpe_id
= 0;
387 cpu_data
[cpu
].tc_id
= 0;
390 /* Report on boot-time options */
391 mips_mt_set_cpuoptions ();
393 printk("Limit of %d VPEs set\n", vpelimit
);
395 printk("Limit of %d TCs set\n", tclimit
);
397 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
400 printk("ASID mask value override to 0x%x\n", asidmask
);
403 #ifdef SMTC_IDLE_HOOK_DEBUG
405 printk("Logic Analyser Trigger on suspected TC hang\n");
406 #endif /* SMTC_IDLE_HOOK_DEBUG */
408 /* Put MVPE's into 'configuration state' */
409 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC
);
411 val
= read_c0_mvpconf0();
412 nvpe
= ((val
& MVPCONF0_PVPE
) >> MVPCONF0_PVPE_SHIFT
) + 1;
413 if (vpelimit
> 0 && nvpe
> vpelimit
)
415 ntc
= ((val
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
418 if (tclimit
> 0 && ntc
> tclimit
)
420 tcpervpe
= ntc
/ nvpe
;
421 slop
= ntc
% nvpe
; /* Residual TCs, < NVPE */
423 /* Set up shared TLB */
424 smtc_configure_tlb();
426 for (tc
= 0, vpe
= 0 ; (vpe
< nvpe
) && (tc
< ntc
) ; vpe
++) {
431 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP
);
434 printk("VPE %d: TC", vpe
);
435 for (i
= 0; i
< tcpervpe
; i
++) {
437 * TC 0 is bound to VPE 0 at reset,
438 * and is presumably executing this
439 * code. Leave it alone!
442 smtc_tc_setup(vpe
,tc
, cpu
);
450 smtc_tc_setup(vpe
,tc
, cpu
);
459 * Clear any stale software interrupts from VPE's Cause
461 write_vpe_c0_cause(0);
464 * Clear ERL/EXL of VPEs other than 0
465 * and set restricted interrupt enable/mask.
467 write_vpe_c0_status((read_vpe_c0_status()
468 & ~(ST0_BEV
| ST0_ERL
| ST0_EXL
| ST0_IM
))
469 | (STATUSF_IP0
| STATUSF_IP1
| STATUSF_IP7
472 * set config to be the same as vpe0,
473 * particularly kseg0 coherency alg
475 write_vpe_c0_config(read_c0_config());
476 /* Clear any pending timer interrupt */
477 write_vpe_c0_compare(0);
478 /* Propagate Config7 */
479 write_vpe_c0_config7(read_c0_config7());
480 write_vpe_c0_count(read_c0_count());
482 /* enable multi-threading within VPE */
483 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE
);
485 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA
);
489 * Pull any physically present but unused TCs out of circulation.
491 while (tc
< (((val
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1)) {
492 cpu_clear(tc
, phys_cpu_present_map
);
493 cpu_clear(tc
, cpu_present_map
);
497 /* release config state */
498 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC
);
502 /* Set up coprocessor affinity CPU mask(s) */
504 for (tc
= 0; tc
< ntc
; tc
++) {
505 if (cpu_data
[tc
].options
& MIPS_CPU_FPU
)
506 cpu_set(tc
, mt_fpu_cpumask
);
509 /* set up ipi interrupts... */
511 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
514 setup_cross_vpe_interrupts();
516 /* Set up queue of free IPI "messages". */
517 nipi
= NR_CPUS
* IPIBUF_PER_CPU
;
521 pipi
= kmalloc(nipi
*sizeof(struct smtc_ipi
), GFP_KERNEL
);
523 panic("kmalloc of IPI message buffers failed\n");
525 printk("IPI buffer pool of %d buffers\n", nipi
);
526 for (i
= 0; i
< nipi
; i
++) {
527 smtc_ipi_nq(&freeIPIq
, pipi
);
531 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
534 local_irq_restore(flags
);
535 /* Initialize SMTC /proc statistics/diagnostics */
541 * Setup the PC, SP, and GP of a secondary processor and start it
543 * smp_bootstrap is the place to resume from
544 * __KSTK_TOS(idle) is apparently the stack pointer
545 * (unsigned long)idle->thread_info the gp
548 void smtc_boot_secondary(int cpu
, struct task_struct
*idle
)
550 extern u32 kernelsp
[NR_CPUS
];
555 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
558 settc(cpu_data
[cpu
].tc_id
);
561 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap
);
564 kernelsp
[cpu
] = __KSTK_TOS(idle
);
565 write_tc_gpr_sp(__KSTK_TOS(idle
));
568 write_tc_gpr_gp((unsigned long)idle
->thread_info
);
570 smtc_status
|= SMTC_MTC_ACTIVE
;
571 write_tc_c0_tchalt(0);
572 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
578 void smtc_init_secondary(void)
581 * Start timer on secondary VPEs if necessary.
582 * plat_timer_setup has already have been invoked by init/main
583 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
584 * SMTC init code assigns TCs consdecutively and in ascending order
585 * to across available VPEs.
587 if (((read_c0_tcbind() & TCBIND_CURTC
) != 0) &&
588 ((read_c0_tcbind() & TCBIND_CURVPE
)
589 != cpu_data
[smp_processor_id() - 1].vpe_id
)){
590 write_c0_compare (read_c0_count() + mips_hpt_frequency
/HZ
);
596 void smtc_smp_finish(void)
598 printk("TC %d going on-line as CPU %d\n",
599 cpu_data
[smp_processor_id()].tc_id
, smp_processor_id());
602 void smtc_cpus_done(void)
607 * Support for SMTC-optimized driver IRQ registration
611 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
612 * in do_IRQ. These are passed in setup_irq_smtc() and stored
616 int setup_irq_smtc(unsigned int irq
, struct irqaction
* new,
617 unsigned long hwmask
)
619 irq_hwmask
[irq
] = hwmask
;
621 return setup_irq(irq
, new);
625 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
626 * Within a VPE one TC can interrupt another by different approaches.
627 * The easiest to get right would probably be to make all TCs except
628 * the target IXMT and set a software interrupt, but an IXMT-based
629 * scheme requires that a handler must run before a new IPI could
630 * be sent, which would break the "broadcast" loops in MIPS MT.
631 * A more gonzo approach within a VPE is to halt the TC, extract
632 * its Restart, Status, and a couple of GPRs, and program the Restart
633 * address to emulate an interrupt.
635 * Within a VPE, one can be confident that the target TC isn't in
636 * a critical EXL state when halted, since the write to the Halt
637 * register could not have issued on the writing thread if the
638 * halting thread had EXL set. So k0 and k1 of the target TC
639 * can be used by the injection code. Across VPEs, one can't
640 * be certain that the target TC isn't in a critical exception
641 * state. So we try a two-step process of sending a software
642 * interrupt to the target VPE, which either handles the event
643 * itself (if it was the target) or injects the event within
647 void smtc_ipi_qdump(void)
651 for (i
= 0; i
< NR_CPUS
;i
++) {
652 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
653 i
, (unsigned)IPIQ
[i
].head
, (unsigned)IPIQ
[i
].tail
,
659 * The standard atomic.h primitives don't quite do what we want
660 * here: We need an atomic add-and-return-previous-value (which
661 * could be done with atomic_add_return and a decrement) and an
662 * atomic set/zero-and-return-previous-value (which can't really
663 * be done with the atomic.h primitives). And since this is
664 * MIPS MT, we can assume that we have LL/SC.
666 static __inline__
int atomic_postincrement(unsigned int *pv
)
668 unsigned long result
;
672 __asm__
__volatile__(
678 : "=&r" (result
), "=&r" (temp
), "=m" (*pv
)
685 /* No longer used in IPI dispatch, but retained for future recycling */
687 static __inline__
int atomic_postclear(unsigned int *pv
)
689 unsigned long result
;
693 __asm__
__volatile__(
699 : "=&r" (result
), "=&r" (temp
), "=m" (*pv
)
707 void smtc_send_ipi(int cpu
, int type
, unsigned int action
)
710 struct smtc_ipi
*pipi
;
714 if (cpu
== smp_processor_id()) {
715 printk("Cannot Send IPI to self!\n");
718 /* Set up a descriptor, to be delivered either promptly or queued */
719 pipi
= smtc_ipi_dq(&freeIPIq
);
722 mips_mt_regdump(dvpe());
723 panic("IPI Msg. Buffers Depleted\n");
726 pipi
->arg
= (void *)action
;
728 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
729 /* If not on same VPE, enqueue and send cross-VPE interupt */
730 smtc_ipi_nq(&IPIQ
[cpu
], pipi
);
732 settc(cpu_data
[cpu
].tc_id
);
733 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1
);
737 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
738 * since ASID shootdown on the other VPE may
739 * collide with this operation.
742 settc(cpu_data
[cpu
].tc_id
);
743 /* Halt the targeted TC */
744 write_tc_c0_tchalt(TCHALT_H
);
748 * Inspect TCStatus - if IXMT is set, we have to queue
749 * a message. Otherwise, we set up the "interrupt"
752 tcstatus
= read_tc_c0_tcstatus();
754 if ((tcstatus
& TCSTATUS_IXMT
) != 0) {
756 * Spin-waiting here can deadlock,
757 * so we queue the message for the target TC.
759 write_tc_c0_tchalt(0);
761 /* Try to reduce redundant timer interrupt messages */
762 if (type
== SMTC_CLOCK_TICK
) {
763 if (atomic_postincrement(&ipi_timer_latch
[cpu
])!=0){
764 smtc_ipi_nq(&freeIPIq
, pipi
);
768 smtc_ipi_nq(&IPIQ
[cpu
], pipi
);
770 post_direct_ipi(cpu
, pipi
);
771 write_tc_c0_tchalt(0);
778 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
780 void post_direct_ipi(int cpu
, struct smtc_ipi
*pipi
)
782 struct pt_regs
*kstack
;
783 unsigned long tcstatus
;
784 unsigned long tcrestart
;
785 extern u32 kernelsp
[NR_CPUS
];
786 extern void __smtc_ipi_vector(void);
788 /* Extract Status, EPC from halted TC */
789 tcstatus
= read_tc_c0_tcstatus();
790 tcrestart
= read_tc_c0_tcrestart();
791 /* If TCRestart indicates a WAIT instruction, advance the PC */
792 if ((tcrestart
& 0x80000000)
793 && ((*(unsigned int *)tcrestart
& 0xfe00003f) == 0x42000020)) {
797 * Save on TC's future kernel stack
799 * CU bit of Status is indicator that TC was
800 * already running on a kernel stack...
802 if (tcstatus
& ST0_CU0
) {
803 /* Note that this "- 1" is pointer arithmetic */
804 kstack
= ((struct pt_regs
*)read_tc_gpr_sp()) - 1;
806 kstack
= ((struct pt_regs
*)kernelsp
[cpu
]) - 1;
809 kstack
->cp0_epc
= (long)tcrestart
;
811 kstack
->cp0_tcstatus
= tcstatus
;
812 /* Pass token of operation to be performed kernel stack pad area */
813 kstack
->pad0
[4] = (unsigned long)pipi
;
814 /* Pass address of function to be called likewise */
815 kstack
->pad0
[5] = (unsigned long)&ipi_decode
;
816 /* Set interrupt exempt and kernel mode */
817 tcstatus
|= TCSTATUS_IXMT
;
818 tcstatus
&= ~TCSTATUS_TKSU
;
819 write_tc_c0_tcstatus(tcstatus
);
821 /* Set TC Restart address to be SMTC IPI vector */
822 write_tc_c0_tcrestart(__smtc_ipi_vector
);
825 static void ipi_resched_interrupt(void)
827 /* Return from interrupt should be enough to cause scheduler check */
831 static void ipi_call_interrupt(void)
833 /* Invoke generic function invocation code in smp.c */
834 smp_call_function_interrupt();
837 void ipi_decode(struct smtc_ipi
*pipi
)
839 void *arg_copy
= pipi
->arg
;
840 int type_copy
= pipi
->type
;
841 int dest_copy
= pipi
->dest
;
843 smtc_ipi_nq(&freeIPIq
, pipi
);
845 case SMTC_CLOCK_TICK
:
846 /* Invoke Clock "Interrupt" */
847 ipi_timer_latch
[dest_copy
] = 0;
848 #ifdef SMTC_IDLE_HOOK_DEBUG
849 clock_hang_reported
[dest_copy
] = 0;
850 #endif /* SMTC_IDLE_HOOK_DEBUG */
851 local_timer_interrupt(0, NULL
);
854 switch ((int)arg_copy
) {
855 case SMP_RESCHEDULE_YOURSELF
:
856 ipi_resched_interrupt();
858 case SMP_CALL_FUNCTION
:
859 ipi_call_interrupt();
862 printk("Impossible SMTC IPI Argument 0x%x\n",
868 printk("Impossible SMTC IPI Type 0x%x\n", type_copy
);
873 void deferred_smtc_ipi(void)
875 struct smtc_ipi
*pipi
;
878 int q
= smp_processor_id();
881 * Test is not atomic, but much faster than a dequeue,
882 * and the vast majority of invocations will have a null queue.
884 if (IPIQ
[q
].head
!= NULL
) {
885 while((pipi
= smtc_ipi_dq(&IPIQ
[q
])) != NULL
) {
886 /* ipi_decode() should be called with interrupts off */
887 local_irq_save(flags
);
889 local_irq_restore(flags
);
895 * Send clock tick to all TCs except the one executing the funtion
898 void smtc_timer_broadcast(int vpe
)
901 int myTC
= cpu_data
[smp_processor_id()].tc_id
;
902 int myVPE
= cpu_data
[smp_processor_id()].vpe_id
;
904 smtc_cpu_stats
[smp_processor_id()].timerints
++;
906 for_each_online_cpu(cpu
) {
907 if (cpu_data
[cpu
].vpe_id
== myVPE
&&
908 cpu_data
[cpu
].tc_id
!= myTC
)
909 smtc_send_ipi(cpu
, SMTC_CLOCK_TICK
, 0);
914 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
915 * set via cross-VPE MTTR manipulation of the Cause register. It would be
916 * in some regards preferable to have external logic for "doorbell" hardware
920 static int cpu_ipi_irq
= MIPSCPU_INT_BASE
+ MIPS_CPU_IPI_IRQ
;
922 static irqreturn_t
ipi_interrupt(int irq
, void *dev_idm
)
924 int my_vpe
= cpu_data
[smp_processor_id()].vpe_id
;
925 int my_tc
= cpu_data
[smp_processor_id()].tc_id
;
927 struct smtc_ipi
*pipi
;
928 unsigned long tcstatus
;
931 unsigned int mtflags
;
932 unsigned int vpflags
;
935 * So long as cross-VPE interrupts are done via
936 * MFTR/MTTR read-modify-writes of Cause, we need
937 * to stop other VPEs whenever the local VPE does
940 local_irq_save(flags
);
942 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ
);
943 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ
);
946 local_irq_restore(flags
);
949 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
950 * queued for TCs on this VPE other than the current one.
951 * Return-from-interrupt should cause us to drain the queue
952 * for the current TC, so we ought not to have to do it explicitly here.
955 for_each_online_cpu(cpu
) {
956 if (cpu_data
[cpu
].vpe_id
!= my_vpe
)
959 pipi
= smtc_ipi_dq(&IPIQ
[cpu
]);
961 if (cpu_data
[cpu
].tc_id
!= my_tc
) {
964 settc(cpu_data
[cpu
].tc_id
);
965 write_tc_c0_tchalt(TCHALT_H
);
967 tcstatus
= read_tc_c0_tcstatus();
968 if ((tcstatus
& TCSTATUS_IXMT
) == 0) {
969 post_direct_ipi(cpu
, pipi
);
972 write_tc_c0_tchalt(0);
975 smtc_ipi_req(&IPIQ
[cpu
], pipi
);
979 * ipi_decode() should be called
980 * with interrupts off
982 local_irq_save(flags
);
984 local_irq_restore(flags
);
992 static void ipi_irq_dispatch(void)
997 static struct irqaction irq_ipi
;
999 void setup_cross_vpe_interrupts(void)
1002 panic("SMTC Kernel requires Vectored Interupt support");
1004 set_vi_handler(MIPS_CPU_IPI_IRQ
, ipi_irq_dispatch
);
1006 irq_ipi
.handler
= ipi_interrupt
;
1007 irq_ipi
.flags
= IRQF_DISABLED
;
1008 irq_ipi
.name
= "SMTC_IPI";
1010 setup_irq_smtc(cpu_ipi_irq
, &irq_ipi
, (0x100 << MIPS_CPU_IPI_IRQ
));
1012 irq_desc
[cpu_ipi_irq
].status
|= IRQ_PER_CPU
;
1013 set_irq_handler(cpu_ipi_irq
, handle_percpu_irq
);
1017 * SMTC-specific hacks invoked from elsewhere in the kernel.
1020 void smtc_ipi_replay(void)
1023 * To the extent that we've ever turned interrupts off,
1024 * we may have accumulated deferred IPIs. This is subtle.
1025 * If we use the smtc_ipi_qdepth() macro, we'll get an
1026 * exact number - but we'll also disable interrupts
1027 * and create a window of failure where a new IPI gets
1028 * queued after we test the depth but before we re-enable
1029 * interrupts. So long as IXMT never gets set, however,
1030 * we should be OK: If we pick up something and dispatch
1031 * it here, that's great. If we see nothing, but concurrent
1032 * with this operation, another TC sends us an IPI, IXMT
1033 * is clear, and we'll handle it as a real pseudo-interrupt
1034 * and not a pseudo-pseudo interrupt.
1036 if (IPIQ
[smp_processor_id()].depth
> 0) {
1037 struct smtc_ipi
*pipi
;
1038 extern void self_ipi(struct smtc_ipi
*);
1040 while ((pipi
= smtc_ipi_dq(&IPIQ
[smp_processor_id()]))) {
1042 smtc_cpu_stats
[smp_processor_id()].selfipis
++;
1047 void smtc_idle_loop_hook(void)
1049 #ifdef SMTC_IDLE_HOOK_DEBUG
1058 * printk within DMT-protected regions can deadlock,
1059 * so buffer diagnostic messages for later output.
1062 char id_ho_db_msg
[768]; /* worst-case use should be less than 700 */
1064 if (atomic_read(&idle_hook_initialized
) == 0) { /* fast test */
1065 if (atomic_add_return(1, &idle_hook_initialized
) == 1) {
1067 /* Tedious stuff to just do once */
1068 mvpconf0
= read_c0_mvpconf0();
1069 hook_ntcs
= ((mvpconf0
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
1070 if (hook_ntcs
> NR_CPUS
)
1071 hook_ntcs
= NR_CPUS
;
1072 for (tc
= 0; tc
< hook_ntcs
; tc
++) {
1074 clock_hang_reported
[tc
] = 0;
1076 for (vpe
= 0; vpe
< 2; vpe
++)
1077 for (im
= 0; im
< 8; im
++)
1078 imstuckcount
[vpe
][im
] = 0;
1079 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs
);
1080 atomic_set(&idle_hook_initialized
, 1000);
1082 /* Someone else is initializing in parallel - let 'em finish */
1083 while (atomic_read(&idle_hook_initialized
) < 1000)
1088 /* Have we stupidly left IXMT set somewhere? */
1089 if (read_c0_tcstatus() & 0x400) {
1090 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1092 printk("Dangling IXMT in cpu_idle()\n");
1095 /* Have we stupidly left an IM bit turned off? */
1096 #define IM_LIMIT 2000
1097 local_irq_save(flags
);
1099 pdb_msg
= &id_ho_db_msg
[0];
1100 im
= read_c0_status();
1101 vpe
= cpu_data
[smp_processor_id()].vpe_id
;
1102 for (bit
= 0; bit
< 8; bit
++) {
1104 * In current prototype, I/O interrupts
1105 * are masked for VPE > 0
1107 if (vpemask
[vpe
][bit
]) {
1108 if (!(im
& (0x100 << bit
)))
1109 imstuckcount
[vpe
][bit
]++;
1111 imstuckcount
[vpe
][bit
] = 0;
1112 if (imstuckcount
[vpe
][bit
] > IM_LIMIT
) {
1113 set_c0_status(0x100 << bit
);
1115 imstuckcount
[vpe
][bit
] = 0;
1116 pdb_msg
+= sprintf(pdb_msg
,
1117 "Dangling IM %d fixed for VPE %d\n", bit
,
1124 * Now that we limit outstanding timer IPIs, check for hung TC
1126 for (tc
= 0; tc
< NR_CPUS
; tc
++) {
1127 /* Don't check ourself - we'll dequeue IPIs just below */
1128 if ((tc
!= smp_processor_id()) &&
1129 ipi_timer_latch
[tc
] > timerq_limit
) {
1130 if (clock_hang_reported
[tc
] == 0) {
1131 pdb_msg
+= sprintf(pdb_msg
,
1132 "TC %d looks hung with timer latch at %d\n",
1133 tc
, ipi_timer_latch
[tc
]);
1134 clock_hang_reported
[tc
]++;
1139 local_irq_restore(flags
);
1140 if (pdb_msg
!= &id_ho_db_msg
[0])
1141 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg
);
1142 #endif /* SMTC_IDLE_HOOK_DEBUG */
1145 * Replay any accumulated deferred IPIs. If "Instant Replay"
1146 * is in use, there should never be any.
1148 #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1150 #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1153 void smtc_soft_dump(void)
1157 printk("Counter Interrupts taken per CPU (TC)\n");
1158 for (i
=0; i
< NR_CPUS
; i
++) {
1159 printk("%d: %ld\n", i
, smtc_cpu_stats
[i
].timerints
);
1161 printk("Self-IPI invocations:\n");
1162 for (i
=0; i
< NR_CPUS
; i
++) {
1163 printk("%d: %ld\n", i
, smtc_cpu_stats
[i
].selfipis
);
1166 printk("Timer IPI Backlogs:\n");
1167 for (i
=0; i
< NR_CPUS
; i
++) {
1168 printk("%d: %d\n", i
, ipi_timer_latch
[i
]);
1170 printk("%d Recoveries of \"stolen\" FPU\n",
1171 atomic_read(&smtc_fpu_recoveries
));
1176 * TLB management routines special to SMTC
1179 void smtc_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
)
1181 unsigned long flags
, mtflags
, tcstat
, prevhalt
, asid
;
1185 * It would be nice to be able to use a spinlock here,
1186 * but this is invoked from within TLB flush routines
1187 * that protect themselves with DVPE, so if a lock is
1188 * held by another TC, it'll never be freed.
1190 * DVPE/DMT must not be done with interrupts enabled,
1191 * so even so most callers will already have disabled
1192 * them, let's be really careful...
1195 local_irq_save(flags
);
1196 if (smtc_status
& SMTC_TLB_SHARED
) {
1201 tlb
= cpu_data
[cpu
].vpe_id
;
1203 asid
= asid_cache(cpu
);
1206 if (!((asid
+= ASID_INC
) & ASID_MASK
) ) {
1207 if (cpu_has_vtag_icache
)
1209 /* Traverse all online CPUs (hack requires contigous range) */
1210 for (i
= 0; i
< num_online_cpus(); i
++) {
1212 * We don't need to worry about our own CPU, nor those of
1213 * CPUs who don't share our TLB.
1215 if ((i
!= smp_processor_id()) &&
1216 ((smtc_status
& SMTC_TLB_SHARED
) ||
1217 (cpu_data
[i
].vpe_id
== cpu_data
[cpu
].vpe_id
))) {
1218 settc(cpu_data
[i
].tc_id
);
1219 prevhalt
= read_tc_c0_tchalt() & TCHALT_H
;
1221 write_tc_c0_tchalt(TCHALT_H
);
1224 tcstat
= read_tc_c0_tcstatus();
1225 smtc_live_asid
[tlb
][(tcstat
& ASID_MASK
)] |= (asiduse
)(0x1 << i
);
1227 write_tc_c0_tchalt(0);
1230 if (!asid
) /* fix version if needed */
1231 asid
= ASID_FIRST_VERSION
;
1232 local_flush_tlb_all(); /* start new asid cycle */
1234 } while (smtc_live_asid
[tlb
][(asid
& ASID_MASK
)]);
1237 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1239 for (i
= 0; i
< num_online_cpus(); i
++) {
1240 if ((smtc_status
& SMTC_TLB_SHARED
) ||
1241 (cpu_data
[i
].vpe_id
== cpu_data
[cpu
].vpe_id
))
1242 cpu_context(i
, mm
) = asid_cache(i
) = asid
;
1245 if (smtc_status
& SMTC_TLB_SHARED
)
1249 local_irq_restore(flags
);
1253 * Invoked from macros defined in mmu_context.h
1254 * which must already have disabled interrupts
1255 * and done a DVPE or DMT as appropriate.
1258 void smtc_flush_tlb_asid(unsigned long asid
)
1263 entry
= read_c0_wired();
1265 /* Traverse all non-wired entries */
1266 while (entry
< current_cpu_data
.tlbsize
) {
1267 write_c0_index(entry
);
1271 ehi
= read_c0_entryhi();
1272 if ((ehi
& ASID_MASK
) == asid
) {
1274 * Invalidate only entries with specified ASID,
1275 * makiing sure all entries differ.
1277 write_c0_entryhi(CKSEG0
+ (entry
<< (PAGE_SHIFT
+ 1)));
1278 write_c0_entrylo0(0);
1279 write_c0_entrylo1(0);
1281 tlb_write_indexed();
1285 write_c0_index(PARKED_INDEX
);
1290 * Support for single-threading cache flush operations.
1293 int halt_state_save
[NR_CPUS
];
1296 * To really, really be sure that nothing is being done
1297 * by other TCs, halt them all. This code assumes that
1298 * a DVPE has already been done, so while their Halted
1299 * state is theoretically architecturally unstable, in
1300 * practice, it's not going to change while we're looking
1304 void smtc_cflush_lockdown(void)
1308 for_each_online_cpu(cpu
) {
1309 if (cpu
!= smp_processor_id()) {
1310 settc(cpu_data
[cpu
].tc_id
);
1311 halt_state_save
[cpu
] = read_tc_c0_tchalt();
1312 write_tc_c0_tchalt(TCHALT_H
);
1318 /* It would be cheating to change the cpu_online states during a flush! */
1320 void smtc_cflush_release(void)
1325 * Start with a hazard barrier to ensure
1326 * that all CACHE ops have played through.
1330 for_each_online_cpu(cpu
) {
1331 if (cpu
!= smp_processor_id()) {
1332 settc(cpu_data
[cpu
].tc_id
);
1333 write_tc_c0_tchalt(halt_state_save
[cpu
]);