mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
Merge tag 'riscv-for-linus-6.3-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V fixes from Palmer Dabbelt: - fixes to the ASID allocator to avoid leaking stale mappings between tasks - fix the vmalloc fault handler to tolerate huge pages * tag 'riscv-for-linus-6.3-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: RISC-V: mm: Support huge page in vmalloc_fault() riscv: asid: Fixup stale TLB entry cause application crash Revert "riscv: mm: notify remote harts about mmu cache updates"
This commit is contained in:
@@ -19,8 +19,6 @@ typedef struct {
|
||||
#ifdef CONFIG_SMP
|
||||
/* A local icache flush is needed before user execution can resume. */
|
||||
cpumask_t icache_stale_mask;
|
||||
/* A local tlb flush is needed before user execution can resume. */
|
||||
cpumask_t tlb_stale_mask;
|
||||
#endif
|
||||
} mm_context_t;
|
||||
|
||||
|
||||
@@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
|
||||
{
|
||||
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
|
||||
}
|
||||
|
||||
static inline void local_flush_tlb_all_asid(unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma x0, %0"
|
||||
:
|
||||
: "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void local_flush_tlb_page_asid(unsigned long addr,
|
||||
unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma %0, %1"
|
||||
:
|
||||
: "r" (addr), "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
#define local_flush_tlb_all() do { } while (0)
|
||||
#define local_flush_tlb_page(addr) do { } while (0)
|
||||
|
||||
@@ -196,16 +196,6 @@ switch_mm_fast:
|
||||
|
||||
if (need_flush_tlb)
|
||||
local_flush_tlb_all();
|
||||
#ifdef CONFIG_SMP
|
||||
else {
|
||||
cpumask_t *mask = &mm->context.tlb_stale_mask;
|
||||
|
||||
if (cpumask_test_cpu(cpu, mask)) {
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
local_flush_tlb_all_asid(cntx & asid_mask);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void set_mm_noasid(struct mm_struct *mm)
|
||||
@@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm)
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
|
||||
static inline void set_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next, unsigned int cpu)
|
||||
{
|
||||
if (static_branch_unlikely(&use_asid_allocator))
|
||||
set_mm_asid(mm, cpu);
|
||||
else
|
||||
set_mm_noasid(mm);
|
||||
/*
|
||||
* The mm_cpumask indicates which harts' TLBs contain the virtual
|
||||
* address mapping of the mm. Compared to noasid, using asid
|
||||
* can't guarantee that stale TLB entries are invalidated because
|
||||
* the asid mechanism wouldn't flush TLB for every switch_mm for
|
||||
* performance. So when using asid, keep all CPUs footmarks in
|
||||
* cpumask() until mm reset.
|
||||
*/
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
if (static_branch_unlikely(&use_asid_allocator)) {
|
||||
set_mm_asid(next, cpu);
|
||||
} else {
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
set_mm_noasid(next);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init asids_init(void)
|
||||
@@ -274,7 +276,8 @@ static int __init asids_init(void)
|
||||
}
|
||||
early_initcall(asids_init);
|
||||
#else
|
||||
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
|
||||
static inline void set_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next, unsigned int cpu)
|
||||
{
|
||||
/* Nothing to do here when there is no MMU */
|
||||
}
|
||||
@@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
*/
|
||||
cpu = smp_processor_id();
|
||||
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
set_mm(next, cpu);
|
||||
set_mm(prev, next, cpu);
|
||||
|
||||
flush_icache_deferred(next, cpu);
|
||||
}
|
||||
|
||||
@@ -143,6 +143,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
if (pud_leaf(*pud_k))
|
||||
goto flush_tlb;
|
||||
|
||||
/*
|
||||
* Since the vmalloc area is global, it is unnecessary
|
||||
@@ -153,6 +155,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
if (pmd_leaf(*pmd_k))
|
||||
goto flush_tlb;
|
||||
|
||||
/*
|
||||
* Make sure the actual PTE exists as well to
|
||||
@@ -172,6 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
||||
* ordering constraint, not a cache flush; it is
|
||||
* necessary even after writing invalid entries.
|
||||
*/
|
||||
flush_tlb:
|
||||
local_flush_tlb_page(addr);
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,23 @@
|
||||
#include <linux/sched.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static inline void local_flush_tlb_all_asid(unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma x0, %0"
|
||||
:
|
||||
: "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void local_flush_tlb_page_asid(unsigned long addr,
|
||||
unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma %0, %1"
|
||||
:
|
||||
: "r" (addr), "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
@@ -15,7 +31,6 @@ void flush_tlb_all(void)
|
||||
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long size, unsigned long stride)
|
||||
{
|
||||
struct cpumask *pmask = &mm->context.tlb_stale_mask;
|
||||
struct cpumask *cmask = mm_cpumask(mm);
|
||||
unsigned int cpuid;
|
||||
bool broadcast;
|
||||
@@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
|
||||
if (static_branch_unlikely(&use_asid_allocator)) {
|
||||
unsigned long asid = atomic_long_read(&mm->context.id);
|
||||
|
||||
/*
|
||||
* TLB will be immediately flushed on harts concurrently
|
||||
* executing this MM context. TLB flush on other harts
|
||||
* is deferred until this MM context migrates there.
|
||||
*/
|
||||
cpumask_setall(pmask);
|
||||
cpumask_clear_cpu(cpuid, pmask);
|
||||
cpumask_andnot(pmask, pmask, cmask);
|
||||
|
||||
if (broadcast) {
|
||||
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
|
||||
} else if (size <= stride) {
|
||||
|
||||
Reference in New Issue
Block a user