Skip to content
Snippets Groups Projects
Commit 61eece2d authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman
Browse files

powerpc/interrupt: Refactor prep_irq_for_{user/kernel_enabled}_exit()


prep_irq_for_user_exit() is a superset of
prep_irq_for_kernel_enabled_exit().

Rename prep_irq_for_kernel_enabled_exit() as prep_irq_for_enabled_exit()
and have prep_irq_for_user_exit() use it.

Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210617155116.2167984-17-npiggin@gmail.com
parent 99f98f84
No related branches found
No related tags found
No related merge requests found
...@@ -50,7 +50,7 @@ static inline bool exit_must_hard_disable(void) ...@@ -50,7 +50,7 @@ static inline bool exit_must_hard_disable(void)
* restartable is true then EE/RI can be left on because interrupts are handled * restartable is true then EE/RI can be left on because interrupts are handled
* with a restart sequence. * with a restart sequence.
*/ */
static notrace __always_inline bool prep_irq_for_kernel_enabled_exit(bool restartable) static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
{ {
/* This must be done with RI=1 because tracing may touch vmaps */ /* This must be done with RI=1 because tracing may touch vmaps */
trace_hardirqs_on(); trace_hardirqs_on();
...@@ -77,29 +77,14 @@ static notrace __always_inline bool prep_irq_for_kernel_enabled_exit(bool restar ...@@ -77,29 +77,14 @@ static notrace __always_inline bool prep_irq_for_kernel_enabled_exit(bool restar
static notrace __always_inline bool prep_irq_for_user_exit(void) static notrace __always_inline bool prep_irq_for_user_exit(void)
{ {
user_enter_irqoff(); bool ret;
/* This must be done with RI=1 because tracing may touch vmaps */
trace_hardirqs_on();
#ifdef CONFIG_PPC32
__hard_EE_RI_disable();
#else
if (exit_must_hard_disable())
__hard_EE_RI_disable();
/* This pattern matches prep_irq_for_idle */ user_enter_irqoff();
if (unlikely(lazy_irq_pending_nocheck())) { ret = prep_irq_for_enabled_exit(true);
if (exit_must_hard_disable()) { if (!ret)
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
__hard_RI_enable();
}
trace_hardirqs_off();
user_exit_irqoff(); user_exit_irqoff();
return false; return ret;
}
#endif
return true;
} }
/* Has to run notrace because it is entered not completely "reconciled" */ /* Has to run notrace because it is entered not completely "reconciled" */
...@@ -469,7 +454,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) ...@@ -469,7 +454,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
* Stack store exit can't be restarted because the interrupt * Stack store exit can't be restarted because the interrupt
* stack frame might have been clobbered. * stack frame might have been clobbered.
*/ */
if (!prep_irq_for_kernel_enabled_exit(unlikely(stack_store))) { if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
/* /*
* Replay pending soft-masked interrupts now. Don't * Replay pending soft-masked interrupts now. Don't
* just local_irq_enabe(); local_irq_disable(); because * just local_irq_enabe(); local_irq_disable(); because
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment