mirror of
https://github.com/torvalds/linux.git
synced 2026-01-12 00:42:35 +08:00
powerpc/32: Restore disabling of interrupts at interrupt/syscall exit
Commit2997876c4a("powerpc/32: Restore clearing of MSR[RI] at interrupt/syscall exit") delayed clearing of MSR[RI], but missed that both MSR[RI] and MSR[EE] are cleared at the same time, so the commit also delayed the disabling of interrupts, leading to unexpected behaviour. To fix that, mostly revert the blamed commit and restore the clearing of MSR[RI] in interrupt_exit_kernel_prepare() instead. For 8xx it implies adding a synchronising instruction after the mtspr in order to make sure no instruction counter interrupt (used for perf events) will fire just after clearing MSR[RI]. Reported-by: Christian Zigotzky <chzigotzky@xenosoft.de> Closes: https://lore.kernel.org/all/4d0bd05d-6158-1323-3509-744d3fbe8fc7@xenosoft.de/ Reported-by: Guenter Roeck <linux@roeck-us.net> Closes: https://lore.kernel.org/all/6b05eb1c-fdef-44e0-91a7-8286825e68f1@roeck-us.net/ Fixes:2997876c4a("powerpc/32: Restore clearing of MSR[RI] at interrupt/syscall exit") Signed-off-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/585ea521b2be99d293b539bbfae148366cfb3687.1766146895.git.chleroy@kernel.org
This commit is contained in:
committed by
Madhavan Srinivasan
parent
fbe409d138
commit
608328ba5b
@@ -90,7 +90,7 @@ static inline void __hard_EE_RI_disable(void)
|
||||
if (IS_ENABLED(CONFIG_BOOKE))
|
||||
wrtee(0);
|
||||
else if (IS_ENABLED(CONFIG_PPC_8xx))
|
||||
wrtspr(SPRN_NRI);
|
||||
wrtspr_sync(SPRN_NRI);
|
||||
else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
|
||||
__mtmsrd(0, 1);
|
||||
else
|
||||
|
||||
@@ -1400,6 +1400,7 @@ static inline void mtmsr_isync(unsigned long val)
|
||||
: "r" ((unsigned long)(v)) \
|
||||
: "memory")
|
||||
#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",2" : : : "memory")
|
||||
#define wrtspr_sync(rn) asm volatile("mtspr " __stringify(rn) ",2; sync" : : : "memory")
|
||||
|
||||
static inline void wrtee(unsigned long val)
|
||||
{
|
||||
|
||||
@@ -101,17 +101,6 @@ SYM_FUNC_END(__kuep_unlock)
|
||||
.endm
|
||||
#endif
|
||||
|
||||
.macro clr_ri trash
|
||||
#ifndef CONFIG_BOOKE
|
||||
#ifdef CONFIG_PPC_8xx
|
||||
mtspr SPRN_NRI, \trash
|
||||
#else
|
||||
li \trash, MSR_KERNEL & ~MSR_RI
|
||||
mtmsr \trash
|
||||
#endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.globl transfer_to_syscall
|
||||
transfer_to_syscall:
|
||||
stw r3, ORIG_GPR3(r1)
|
||||
@@ -160,7 +149,6 @@ ret_from_syscall:
|
||||
cmpwi r3,0
|
||||
REST_GPR(3, r1)
|
||||
syscall_exit_finish:
|
||||
clr_ri r4
|
||||
mtspr SPRN_SRR0,r7
|
||||
mtspr SPRN_SRR1,r8
|
||||
|
||||
@@ -237,7 +225,6 @@ fast_exception_return:
|
||||
/* Clear the exception marker on the stack to avoid confusing stacktrace */
|
||||
li r10, 0
|
||||
stw r10, 8(r11)
|
||||
clr_ri r10
|
||||
mtspr SPRN_SRR1,r9
|
||||
mtspr SPRN_SRR0,r12
|
||||
REST_GPR(9, r11)
|
||||
@@ -270,7 +257,6 @@ interrupt_return:
|
||||
.Lfast_user_interrupt_return:
|
||||
lwz r11,_NIP(r1)
|
||||
lwz r12,_MSR(r1)
|
||||
clr_ri r4
|
||||
mtspr SPRN_SRR0,r11
|
||||
mtspr SPRN_SRR1,r12
|
||||
|
||||
@@ -313,7 +299,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
|
||||
cmpwi cr1,r3,0
|
||||
lwz r11,_NIP(r1)
|
||||
lwz r12,_MSR(r1)
|
||||
clr_ri r4
|
||||
mtspr SPRN_SRR0,r11
|
||||
mtspr SPRN_SRR1,r12
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ static inline bool exit_must_hard_disable(void)
|
||||
#else
|
||||
static inline bool exit_must_hard_disable(void)
|
||||
{
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -443,6 +443,9 @@ again:
|
||||
|
||||
if (unlikely(stack_store))
|
||||
__hard_EE_RI_disable();
|
||||
#else
|
||||
} else {
|
||||
__hard_EE_RI_disable();
|
||||
#endif /* CONFIG_PPC64 */
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user