MIPS: Octeon: Dlink_dsr-1000n.dts: add more leds.
[deliverable/linux.git] / arch / mips / kernel / process.c
index 92880cee449e147043f4007204f286b6cd869f88..7429ad09fbe3e1178ad37f0e34ed2f1651ac70ac 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
 #include <asm/cpu.h>
+#include <asm/dsemul.h>
 #include <asm/dsp.h>
 #include <asm/fpu.h>
 #include <asm/msa.h>
@@ -68,17 +69,20 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
        lose_fpu(0);
        clear_thread_flag(TIF_MSA_CTX_LIVE);
        clear_used_math();
+       atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
        init_dsp();
        regs->cp0_epc = pc;
        regs->regs[29] = sp;
 }
 
-void exit_thread(void)
-{
-}
-
-void flush_thread(void)
+void exit_thread(struct task_struct *tsk)
 {
+       /*
+        * User threads may have allocated a delay slot emulation frame.
+        * If so, clean up that allocation.
+        */
+       if (!(current->flags & PF_KTHREAD))
+               dsemul_thread_cleanup(tsk);
 }
 
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -167,6 +171,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        clear_tsk_thread_flag(p, TIF_FPUBOUND);
 #endif /* CONFIG_MIPS_MT_FPAFF */
 
+       atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+
        if (clone_flags & CLONE_SETTLS)
                ti->tp_value = regs->regs[7];
 
@@ -353,7 +359,7 @@ static int get_frame_info(struct mips_frame_info *info)
                return 0;
        if (info->pc_offset < 0) /* leaf */
                return 1;
-       /* prologue seems boggus... */
+       /* prologue seems bogus... */
 err:
        return -1;
 }
@@ -455,7 +461,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
                    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
                        regs = (struct pt_regs *)*sp;
                        pc = regs->cp0_epc;
-                       if (__kernel_text_address(pc)) {
+                       if (!user_mode(regs) && __kernel_text_address(pc)) {
                                *sp = regs->regs[29];
                                *ra = regs->regs[31];
                                return pc;
@@ -580,11 +586,19 @@ int mips_get_process_fp_mode(struct task_struct *task)
        return value;
 }
 
+static void prepare_for_fp_mode_switch(void *info)
+{
+       struct mm_struct *mm = info;
+
+       if (current->mm == mm)
+               lose_fpu(1);
+}
+
 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
 {
        const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
-       unsigned long switch_count;
        struct task_struct *t;
+       int max_users;
 
        /* Check the value is valid */
        if (value & ~known_bits)
@@ -601,6 +615,9 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
        if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
                return -EOPNOTSUPP;
 
+       /* Proceed with the mode switch */
+       preempt_disable();
+
        /* Save FP & vector context, then disable FPU & MSA */
        if (task->signal == current->signal)
                lose_fpu(1);
@@ -610,31 +627,17 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
        smp_mb__after_atomic();
 
        /*
-        * If there are multiple online CPUs then wait until all threads whose
-        * FP mode is about to change have been context switched. This approach
-        * allows us to only worry about whether an FP mode switch is in
-        * progress when FP is first used in a tasks time slice. Pretty much all
-        * of the mode switch overhead can thus be confined to cases where mode
-        * switches are actually occurring. That is, to here. However for the
-        * thread performing the mode switch it may take a while...
+        * If there are multiple online CPUs then force any which are running
+        * threads in this process to lose their FPU context, which they can't
+        * regain until fp_mode_switching is cleared later.
         */
        if (num_online_cpus() > 1) {
-               spin_lock_irq(&task->sighand->siglock);
-
-               for_each_thread(task, t) {
-                       if (t == current)
-                               continue;
+               /* No need to send an IPI for the local CPU */
+               max_users = (task->mm == current->mm) ? 1 : 0;
 
-                       switch_count = t->nvcsw + t->nivcsw;
-
-                       do {
-                               spin_unlock_irq(&task->sighand->siglock);
-                               cond_resched();
-                               spin_lock_irq(&task->sighand->siglock);
-                       } while ((t->nvcsw + t->nivcsw) == switch_count);
-               }
-
-               spin_unlock_irq(&task->sighand->siglock);
+               if (atomic_read(&current->mm->mm_users) > max_users)
+                       smp_call_function(prepare_for_fp_mode_switch,
+                                         (void *)current->mm, 1);
        }
 
        /*
@@ -659,6 +662,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
 
        /* Allow threads to use FP again */
        atomic_set(&task->mm->context.fp_mode_switching, 0);
+       preempt_enable();
 
        return 0;
 }
This page took 0.027826 seconds and 5 git commands to generate.