summaryrefslogtreecommitdiffstats
path: root/arch/arm/vfp/vfpmodule.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/vfp/vfpmodule.c')
-rw-r--r--arch/arm/vfp/vfpmodule.c74
1 files changed, 53 insertions, 21 deletions
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index b68efe643a12..d44867fc0c5e 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -56,6 +56,34 @@ extern unsigned int VFP_arch_feroceon __alias(VFP_arch);
union vfp_state *vfp_current_hw_state[NR_CPUS];
/*
+ * Claim ownership of the VFP unit.
+ *
+ * The caller may change VFP registers until vfp_state_release() is called.
+ *
+ * local_bh_disable() is used to disable preemption and to disable VFP
+ * processing in softirq context. On PREEMPT_RT kernels local_bh_disable() is
+ * not sufficient because it only serializes soft interrupt related sections
+ * via a local lock, but stays preemptible. Disabling preemption is the right
+ * choice here as bottom half processing is always in thread context on RT
+ * kernels so it implicitly prevents bottom half processing as well.
+ */
+static void vfp_state_hold(void)
+{
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_disable();
+ else
+ preempt_disable();
+}
+
+static void vfp_state_release(void)
+{
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_enable();
+ else
+ preempt_enable();
+}
+
+/*
* Is 'thread's most up to date state stored in this CPUs hardware?
* Must be called from non-preemptible context.
*/
@@ -240,7 +268,7 @@ static void vfp_panic(char *reason, u32 inst)
/*
* Process bitmask of exception conditions.
*/
-static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
+static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr)
{
int si_code = 0;
@@ -248,8 +276,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
if (exceptions == VFP_EXCEPTION_ERROR) {
vfp_panic("unhandled bounce", inst);
- vfp_raise_sigfpe(FPE_FLTINV, regs);
- return;
+ return FPE_FLTINV;
}
/*
@@ -277,8 +304,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
- if (si_code)
- vfp_raise_sigfpe(si_code, regs);
+ return si_code;
}
/*
@@ -324,6 +350,8 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
{
u32 fpscr, orig_fpscr, fpsid, exceptions;
+ int si_code2 = 0;
+ int si_code = 0;
pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
@@ -369,8 +397,8 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
* unallocated VFP instruction but with FPSCR.IXE set and not
* on VFP subarch 1.
*/
- vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
- return;
+ si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr);
+ goto exit;
}
/*
@@ -394,14 +422,14 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
*/
exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
if (exceptions)
- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
/*
* If there isn't a second FP instruction, exit now. Note that
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
*/
if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
- return;
+ goto exit;
/*
* The barrier() here prevents fpinst2 being read
@@ -413,7 +441,13 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
emulate:
exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
if (exceptions)
- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
+exit:
+ vfp_state_release();
+ if (si_code2)
+ vfp_raise_sigfpe(si_code2, regs);
+ if (si_code)
+ vfp_raise_sigfpe(si_code, regs);
}
static void vfp_enable(void *unused)
@@ -512,11 +546,9 @@ static inline void vfp_pm_init(void) { }
*/
void vfp_sync_hwstate(struct thread_info *thread)
{
- unsigned int cpu = get_cpu();
+ vfp_state_hold();
- local_bh_disable();
-
- if (vfp_state_in_hw(cpu, thread)) {
+ if (vfp_state_in_hw(raw_smp_processor_id(), thread)) {
u32 fpexc = fmrx(FPEXC);
/*
@@ -527,8 +559,7 @@ void vfp_sync_hwstate(struct thread_info *thread)
fmxr(FPEXC, fpexc);
}
- local_bh_enable();
- put_cpu();
+ vfp_state_release();
}
/* Ensure that the thread reloads the hardware VFP state on the next use. */
@@ -683,7 +714,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
if (!user_mode(regs))
return vfp_kmode_exception(regs, trigger);
- local_bh_disable();
+ vfp_state_hold();
fpexc = fmrx(FPEXC);
/*
@@ -748,6 +779,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
* replay the instruction that trapped.
*/
fmxr(FPEXC, fpexc);
+ vfp_state_release();
} else {
/* Check for synchronous or asynchronous exceptions */
if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
@@ -762,17 +794,17 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
if (!(fpscr & FPSCR_IXE)) {
if (!(fpscr & FPSCR_LENGTH_MASK)) {
pr_debug("not VFP\n");
- local_bh_enable();
+ vfp_state_release();
return -ENOEXEC;
}
fpexc |= FPEXC_DEX;
}
}
bounce: regs->ARM_pc += 4;
+ /* VFP_bounce() will invoke vfp_state_release() */
VFP_bounce(trigger, fpexc, regs);
}
- local_bh_enable();
return 0;
}
@@ -837,7 +869,7 @@ void kernel_neon_begin(void)
unsigned int cpu;
u32 fpexc;
- local_bh_disable();
+ vfp_state_hold();
/*
* Kernel mode NEON is only allowed outside of hardirq context with
@@ -868,7 +900,7 @@ void kernel_neon_end(void)
{
/* Disable the NEON/VFP unit. */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
- local_bh_enable();
+ vfp_state_release();
}
EXPORT_SYMBOL(kernel_neon_end);