KVM/arm64 changes for 6.17, round #3

 - Invalidate nested MMUs upon freeing the PGD to avoid WARNs when
   visiting from an MMU notifier

 - Fixes to the TLB match process and TLB invalidation range for
   managing the VCNR pseudo-TLB

 - Prevent SPE from erroneously profiling guests due to UNKNOWN reset
   values in PMSCR_EL1

 - Fix save/restore of host MDCR_EL2 to account for eagerly programming
   at vcpu_load() on VHE systems

 - Correct lock ordering when dealing with VGIC LPIs, avoiding scenarios
   where an xarray's spinlock was nested with a *raw* spinlock

 - Permit stage-2 read permission aborts which are possible in the case
   of NV depending on the guest hypervisor's stage-2 translation

 - Call raw_spin_unlock() instead of the internal spinlock API

 - Fix parameter ordering when assigning VBAR_EL1

[Pull into kvm/master to fix conflicts. - Paolo]
This commit is contained in:
Paolo Bonzini
2025-09-30 13:23:06 -04:00
322 changed files with 3024 additions and 2057 deletions

View File

@@ -3222,6 +3222,10 @@ D: AIC5800 IEEE 1394, RAW I/O on 1394
D: Starter of Linux1394 effort
S: ask per mail for current address
N: Boris Pismenny
E: borisp@mellanox.com
D: Kernel TLS implementation and offload support.
N: Nicolas Pitre
E: nico@fluxnic.net
D: StrongARM SA1100 support integrator & hacker
@@ -4168,6 +4172,9 @@ S: 1513 Brewster Dr.
S: Carrollton, TX 75010
S: USA
N: Dave Watson
D: Kernel TLS implementation.
N: Tim Waugh
E: tim@cyberelk.net
D: Co-architect of the parallel-port sharing system

View File

@@ -215,7 +215,7 @@ Spectre_v2 X X
Spectre_v2_user X X * (Note 1)
SRBDS X X X X
SRSO X X X X
SSB (Note 4)
SSB X
TAA X X X X * (Note 2)
TSA X X X X
=============== ============== ============ ============= ============== ============ ========
@@ -229,9 +229,6 @@ Notes:
3 -- Disables SMT if cross-thread mitigations are fully enabled, the CPU is
vulnerable, and STIBP is not supported
4 -- Speculative store bypass is always enabled by default (no kernel
mitigation applied) unless overridden with spec_store_bypass_disable option
When an attack-vector is disabled, all mitigations for the vulnerabilities
listed in the above table are disabled, unless mitigation is required for a
different enabled attack-vector or a mitigation is explicitly selected via a

View File

@@ -60,7 +60,6 @@ properties:
- const: bus
- const: core
- const: vsync
- const: lut
- const: tbu
- const: tbu_rt
# MSM8996 has additional iommu clock

View File

@@ -507,6 +507,8 @@ patternProperties:
description: Espressif Systems Co. Ltd.
"^est,.*":
description: ESTeem Wireless Modems
"^eswin,.*":
description: Beijing ESWIN Technology Group Co. Ltd.
"^ettus,.*":
description: NI Ettus Research
"^eukrea,.*":

View File

@@ -931,13 +931,13 @@ F: Documentation/devicetree/bindings/dma/altr,msgdma.yaml
F: drivers/dma/altera-msgdma.c
ALTERA PIO DRIVER
M: Mun Yew Tham <mun.yew.tham@intel.com>
M: Adrian Ng <adrianhoyin.ng@altera.com>
L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-altera.c
ALTERA TRIPLE SPEED ETHERNET DRIVER
M: Joyce Ooi <joyce.ooi@intel.com>
M: Boon Khai Ng <boon.khai.ng@altera.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/altera/
@@ -4205,7 +4205,7 @@ W: http://www.baycom.org/~tom/ham/ham.html
F: drivers/net/hamradio/baycom*
BCACHE (BLOCK LAYER CACHE)
M: Coly Li <colyli@kernel.org>
M: Coly Li <colyli@fnnas.com>
M: Kent Overstreet <kent.overstreet@linux.dev>
L: linux-bcache@vger.kernel.org
S: Maintained
@@ -4216,7 +4216,7 @@ F: drivers/md/bcache/
BCACHEFS
M: Kent Overstreet <kent.overstreet@linux.dev>
L: linux-bcachefs@vger.kernel.org
S: Supported
S: Externally maintained
C: irc://irc.oftc.net/bcache
P: Documentation/filesystems/bcachefs/SubmittingPatches.rst
T: git https://evilpiepirate.org/git/bcachefs.git
@@ -17848,7 +17848,6 @@ F: net/ipv6/syncookies.c
F: net/ipv6/tcp*.c
NETWORKING [TLS]
M: Boris Pismenny <borisp@nvidia.com>
M: John Fastabend <john.fastabend@gmail.com>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
@@ -20878,8 +20877,8 @@ S: Maintained
F: drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
QUALCOMM RMNET DRIVER
M: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
M: Sean Tranchetti <quic_stranche@quicinc.com>
M: Subash Abhinov Kasiviswanathan <subash.a.kasiviswanathan@oss.qualcomm.com>
M: Sean Tranchetti <sean.tranchetti@oss.qualcomm.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst

View File

@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 17
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@@ -2,8 +2,9 @@
#ifndef __ASM_STACKTRACE_H
#define __ASM_STACKTRACE_H
#include <asm/ptrace.h>
#include <linux/llist.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
struct stackframe {
/*

View File

@@ -1160,115 +1160,8 @@ u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
__v; \
})
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
{
/*
* *** VHE ONLY ***
*
* System registers listed in the switch are not saved on every
* exit from the guest but are only saved on vcpu_put.
*
* SYSREGS_ON_CPU *MUST* be checked before using this helper.
*
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
* should never be listed below, because the guest cannot modify its
* own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
* thread when emulating cross-VCPU communication.
*/
if (!has_vhe())
return false;
switch (reg) {
case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
case TCR2_EL1: *val = read_sysreg_s(SYS_TCR2_EL12); break;
case PIR_EL1: *val = read_sysreg_s(SYS_PIR_EL12); break;
case PIRE0_EL1: *val = read_sysreg_s(SYS_PIRE0_EL12); break;
case POR_EL1: *val = read_sysreg_s(SYS_POR_EL12); break;
case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break;
case PAR_EL1: *val = read_sysreg_par(); break;
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break;
case SCTLR2_EL1: *val = read_sysreg_s(SYS_SCTLR2_EL12); break;
default: return false;
}
return true;
}
static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
{
/*
* *** VHE ONLY ***
*
* System registers listed in the switch are not restored on every
* entry to the guest but are only restored on vcpu_load.
*
* SYSREGS_ON_CPU *MUST* be checked before using this helper.
*
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
* should never be listed below, because the MPIDR should only be set
* once, before running the VCPU, and never changed later.
*/
if (!has_vhe())
return false;
switch (reg) {
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
default: return false;
}
return true;
}
u64 vcpu_read_sys_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
void vcpu_write_sys_reg(struct kvm_vcpu *, u64, enum vcpu_sysreg);
struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
@@ -1476,6 +1369,7 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
}
void kvm_init_host_debug_data(void);
void kvm_debug_init_vhe(void);
void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu);
void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu);

View File

@@ -180,6 +180,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable);
int kvm_handle_guest_sea(struct kvm_vcpu *vcpu);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);

View File

@@ -1,25 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018 - Arm Ltd */
#ifndef __ARM64_KVM_RAS_H__
#define __ARM64_KVM_RAS_H__
#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <asm/acpi.h>
/*
* Was this synchronous external abort a RAS notification?
* Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
*/
static inline int kvm_handle_guest_sea(void)
{
/* apei_claim_sea(NULL) expects to mask interrupts itself */
lockdep_assert_irqs_enabled();
return apei_claim_sea(NULL);
}
#endif /* __ARM64_KVM_RAS_H__ */

View File

@@ -17,6 +17,13 @@
#include <linux/refcount.h>
#include <asm/cpufeature.h>
enum pgtable_type {
TABLE_PTE,
TABLE_PMD,
TABLE_PUD,
TABLE_P4D,
};
typedef struct {
atomic64_t id;
#ifdef CONFIG_COMPAT

View File

@@ -1142,9 +1142,6 @@
#define ARM64_FEATURE_FIELD_BITS 4
/* Defined for compatibility only, do not add new users. */
#define ARM64_FEATURE_MASK(x) (x##_MASK)
#ifdef __ASSEMBLY__
.macro mrs_s, rt, sreg

View File

@@ -84,6 +84,7 @@
#include <asm/hwcap.h>
#include <asm/insn.h>
#include <asm/kvm_host.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
#include <asm/hypervisor.h>
@@ -1945,11 +1946,11 @@ static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
extern
void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(int), int flags);
phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags);
static phys_addr_t __initdata kpti_ng_temp_alloc;
static phys_addr_t __init kpti_ng_pgd_alloc(int shift)
static phys_addr_t __init kpti_ng_pgd_alloc(enum pgtable_type type)
{
kpti_ng_temp_alloc -= PAGE_SIZE;
return kpti_ng_temp_alloc;
@@ -2269,6 +2270,24 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
/* Firmware may have left a deferred SError in this register. */
write_sysreg_s(0, SYS_DISR_EL1);
}
static bool has_rasv1p1(const struct arm64_cpu_capabilities *__unused, int scope)
{
const struct arm64_cpu_capabilities rasv1p1_caps[] = {
{
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, V1P1)
},
{
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP)
},
{
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, RAS_frac, RASv1p1)
},
};
return (has_cpuid_feature(&rasv1p1_caps[0], scope) ||
(has_cpuid_feature(&rasv1p1_caps[1], scope) &&
has_cpuid_feature(&rasv1p1_caps[2], scope)));
}
#endif /* CONFIG_ARM64_RAS_EXTN */
#ifdef CONFIG_ARM64_PTR_AUTH
@@ -2687,6 +2706,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_clear_disr,
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP)
},
{
.desc = "RASv1p1 Extension Support",
.capability = ARM64_HAS_RASV1P1_EXTN,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_rasv1p1,
},
#endif /* CONFIG_ARM64_RAS_EXTN */
#ifdef CONFIG_ARM64_AMU_EXTN
{

View File

@@ -2113,8 +2113,10 @@ static void cpu_hyp_init_features(void)
{
cpu_set_hyp_vector();
if (is_kernel_in_hyp_mode())
if (is_kernel_in_hyp_mode()) {
kvm_timer_init_vhe();
kvm_debug_init_vhe();
}
if (vgic_present)
kvm_vgic_init_cpu_hardware();
@@ -2408,12 +2410,12 @@ static u64 get_hyp_id_aa64pfr0_el1(void)
*/
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
val &= ~(ID_AA64PFR0_EL1_CSV2 |
ID_AA64PFR0_EL1_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV2,
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV3,
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
return val;

View File

@@ -1420,10 +1420,10 @@ void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
return;
/*
* If we only have a single stage of translation (E2H=0 or
* TGE=1), exit early. Same thing if {VM,DC}=={0,0}.
* If we only have a single stage of translation (EL2&0), exit
* early. Same thing if {VM,DC}=={0,0}.
*/
if (!vcpu_el2_e2h_is_set(vcpu) || vcpu_el2_tge_is_set(vcpu) ||
if (compute_translation_regime(vcpu, op) == TR_EL20 ||
!(vcpu_read_sys_reg(vcpu, HCR_EL2) & (HCR_VM | HCR_DC)))
return;

View File

@@ -96,6 +96,13 @@ void kvm_init_host_debug_data(void)
}
}
void kvm_debug_init_vhe(void)
{
/* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */
if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1)))
write_sysreg_el1(0, SYS_PMSCR);
}
/*
* Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host
* has taken over MDSCR_EL1.
@@ -138,6 +145,9 @@ void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
/* Must be called before kvm_vcpu_load_vhe() */
KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm);
if (has_vhe())
*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
/*
* Determine which of the possible debug states we're in:
*
@@ -184,6 +194,9 @@ void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu)
{
if (has_vhe())
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
return;

View File

@@ -2833,7 +2833,7 @@ int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
iabt ? ESR_ELx_EC_IABT_LOW : ESR_ELx_EC_DABT_LOW);
esr |= ESR_ELx_FSC_EXTABT | ESR_ELx_IL;
vcpu_write_sys_reg(vcpu, FAR_EL2, addr);
vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
if (__vcpu_sys_reg(vcpu, SCTLR2_EL2) & SCTLR2_EL1_EASE)
return kvm_inject_nested(vcpu, esr, except_type_serror);

View File

@@ -22,36 +22,28 @@
static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
{
u64 val;
if (unlikely(vcpu_has_nv(vcpu)))
if (has_vhe())
return vcpu_read_sys_reg(vcpu, reg);
else if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
__vcpu_read_sys_reg_from_cpu(reg, &val))
return val;
return __vcpu_sys_reg(vcpu, reg);
}
static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
{
if (unlikely(vcpu_has_nv(vcpu)))
if (has_vhe())
vcpu_write_sys_reg(vcpu, val, reg);
else if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU) ||
!__vcpu_write_sys_reg_to_cpu(val, reg))
else
__vcpu_assign_sys_reg(vcpu, reg, val);
}
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
u64 val)
{
if (unlikely(vcpu_has_nv(vcpu))) {
if (has_vhe()) {
if (target_mode == PSR_MODE_EL1h)
vcpu_write_sys_reg(vcpu, val, SPSR_EL1);
else
vcpu_write_sys_reg(vcpu, val, SPSR_EL2);
} else if (has_vhe()) {
write_sysreg_el1(val, SYS_SPSR);
} else {
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
}
@@ -59,7 +51,7 @@ static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
{
if (has_vhe())
if (has_vhe() && vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
write_sysreg(val, spsr_abt);
else
vcpu->arch.ctxt.spsr_abt = val;
@@ -67,7 +59,7 @@ static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
{
if (has_vhe())
if (has_vhe() && vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
write_sysreg(val, spsr_und);
else
vcpu->arch.ctxt.spsr_und = val;

View File

@@ -431,9 +431,6 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
}
*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
u64 hcrx = vcpu->arch.hcrx_el2;
if (is_nested_ctxt(vcpu)) {
@@ -454,8 +451,6 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
write_sysreg(0, hstr_el2);
if (system_supports_pmuv3()) {
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);

View File

@@ -17,7 +17,7 @@ static inline __must_check bool nvhe_check_data_corruption(bool v)
bool corruption = unlikely(condition); \
if (corruption) { \
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
BUG_ON(1); \
BUG(); \
} else \
WARN_ON(1); \
} \

View File

@@ -50,6 +50,10 @@ extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
static void __activate_traps(struct kvm_vcpu *vcpu)
{
___activate_traps(vcpu, vcpu->arch.hcr_el2);
*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
__activate_traps_common(vcpu);
__activate_cptr_traps(vcpu);
@@ -93,6 +97,8 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
isb();
}
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
__deactivate_traps_common(vcpu);
write_sysreg_hcr(this_cpu_ptr(&kvm_init_params)->hcr_el2);

View File

@@ -253,6 +253,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
__vcpu_assign_sys_reg(vcpu, VBAR_EL1, read_sysreg_el1(SYS_VBAR));
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
@@ -372,6 +373,9 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
/* Debug and Trace Registers are restricted. */
/* Group 1 ID registers */
HOST_HANDLED(SYS_REVIDR_EL1),
/* AArch64 mappings of the AArch32 ID registers */
/* CRm=1 */
AARCH32(SYS_ID_PFR0_EL1),
@@ -460,6 +464,7 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
HOST_HANDLED(SYS_CCSIDR_EL1),
HOST_HANDLED(SYS_CLIDR_EL1),
HOST_HANDLED(SYS_AIDR_EL1),
HOST_HANDLED(SYS_CSSELR_EL1),
HOST_HANDLED(SYS_CTR_EL0),

View File

@@ -20,7 +20,7 @@ static bool __is_be(struct kvm_vcpu *vcpu)
if (vcpu_mode_is_32bit(vcpu))
return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT);
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
return !!(read_sysreg_el1(SYS_SCTLR) & SCTLR_ELx_EE);
}
/*

View File

@@ -43,8 +43,11 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
*
* - API/APK: they are already accounted for by vcpu_load(), and can
* only take effect across a load/put cycle (such as ERET)
*
* - FIEN: no way we let a guest have access to the RAS "Common Fault
* Injection" thing, whatever that does
*/
#define NV_HCR_GUEST_EXCLUDE (HCR_TGE | HCR_API | HCR_APK)
#define NV_HCR_GUEST_EXCLUDE (HCR_TGE | HCR_API | HCR_APK | HCR_FIEN)
static u64 __compute_hcr(struct kvm_vcpu *vcpu)
{

View File

@@ -4,19 +4,20 @@
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/acpi.h>
#include <linux/mman.h>
#include <linux/kvm_host.h>
#include <linux/io.h>
#include <linux/hugetlb.h>
#include <linux/sched/signal.h>
#include <trace/events/kvm.h>
#include <asm/acpi.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
#include <asm/kvm_pkvm.h>
#include <asm/kvm_ras.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/virt.h>
@@ -1073,6 +1074,10 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
mmu->pgt = NULL;
free_percpu(mmu->last_vcpu_ran);
}
if (kvm_is_nested_s2_mmu(kvm, mmu))
kvm_init_nested_s2_mmu(mmu);
write_unlock(&kvm->mmu_lock);
if (pgt) {
@@ -1892,6 +1897,19 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
read_unlock(&vcpu->kvm->mmu_lock);
}
int kvm_handle_guest_sea(struct kvm_vcpu *vcpu)
{
/*
* Give APEI the opportunity to claim the abort before handling it
* within KVM. apei_claim_sea() expects to be called with IRQs enabled.
*/
lockdep_assert_irqs_enabled();
if (apei_claim_sea(NULL) == 0)
return 1;
return kvm_inject_serror(vcpu);
}
/**
* kvm_handle_guest_abort - handles all 2nd stage aborts
* @vcpu: the VCPU pointer
@@ -1915,17 +1933,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
gfn_t gfn;
int ret, idx;
/* Synchronous External Abort? */
if (kvm_vcpu_abt_issea(vcpu)) {
/*
* For RAS the host kernel may handle this abort.
* There is no need to pass the error into the guest.
*/
if (kvm_handle_guest_sea())
return kvm_inject_serror(vcpu);
return 1;
}
if (kvm_vcpu_abt_issea(vcpu))
return kvm_handle_guest_sea(vcpu);
esr = kvm_vcpu_get_esr(vcpu);

View File

@@ -847,7 +847,7 @@ static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end)
ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
vt->wr.level));
ipa_start = vt->wr.pa & (ipa_size - 1);
ipa_start = vt->wr.pa & ~(ipa_size - 1);
ipa_end = ipa_start + ipa_size;
if (ipa_end <= start || ipa_start >= end)
@@ -887,7 +887,7 @@ static void invalidate_vncr_va(struct kvm *kvm,
va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
vt->wr.level));
va_start = vt->gva & (va_size - 1);
va_start = vt->gva & ~(va_size - 1);
va_end = va_start + va_size;
switch (scope->type) {
@@ -1292,7 +1292,7 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
!(tcr & TCR_ASID16))
asid &= GENMASK(7, 0);
return asid != vt->wr.asid;
return asid == vt->wr.asid;
}
return true;
@@ -1303,7 +1303,10 @@ int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu)
struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
u64 esr = kvm_vcpu_get_esr(vcpu);
BUG_ON(!(esr & ESR_ELx_VNCR_SHIFT));
WARN_ON_ONCE(!(esr & ESR_ELx_VNCR));
if (kvm_vcpu_abt_issea(vcpu))
return kvm_handle_guest_sea(vcpu);
if (esr_fsc_is_permission_fault(esr)) {
inject_vncr_perm(vcpu);

View File

@@ -82,43 +82,105 @@ static bool write_to_read_only(struct kvm_vcpu *vcpu,
"sys_reg write to read-only register");
}
#define PURE_EL2_SYSREG(el2) \
case el2: { \
*el1r = el2; \
return true; \
}
enum sr_loc_attr {
SR_LOC_MEMORY = 0, /* Register definitely in memory */
SR_LOC_LOADED = BIT(0), /* Register on CPU, unless it cannot */
SR_LOC_MAPPED = BIT(1), /* Register in a different CPU register */
SR_LOC_XLATED = BIT(2), /* Register translated to fit another reg */
SR_LOC_SPECIAL = BIT(3), /* Demanding register, implies loaded */
};
#define MAPPED_EL2_SYSREG(el2, el1, fn) \
case el2: { \
*xlate = fn; \
*el1r = el1; \
return true; \
}
struct sr_loc {
enum sr_loc_attr loc;
enum vcpu_sysreg map_reg;
u64 (*xlate)(u64);
};
static bool get_el2_to_el1_mapping(unsigned int reg,
unsigned int *el1r, u64 (**xlate)(u64))
static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu,
enum vcpu_sysreg reg)
{
switch (reg) {
PURE_EL2_SYSREG( VPIDR_EL2 );
PURE_EL2_SYSREG( VMPIDR_EL2 );
PURE_EL2_SYSREG( ACTLR_EL2 );
PURE_EL2_SYSREG( HCR_EL2 );
PURE_EL2_SYSREG( MDCR_EL2 );
PURE_EL2_SYSREG( HSTR_EL2 );
PURE_EL2_SYSREG( HACR_EL2 );
PURE_EL2_SYSREG( VTTBR_EL2 );
PURE_EL2_SYSREG( VTCR_EL2 );
PURE_EL2_SYSREG( TPIDR_EL2 );
PURE_EL2_SYSREG( HPFAR_EL2 );
PURE_EL2_SYSREG( HCRX_EL2 );
PURE_EL2_SYSREG( HFGRTR_EL2 );
PURE_EL2_SYSREG( HFGWTR_EL2 );
PURE_EL2_SYSREG( HFGITR_EL2 );
PURE_EL2_SYSREG( HDFGRTR_EL2 );
PURE_EL2_SYSREG( HDFGWTR_EL2 );
PURE_EL2_SYSREG( HAFGRTR_EL2 );
PURE_EL2_SYSREG( CNTVOFF_EL2 );
PURE_EL2_SYSREG( CNTHCTL_EL2 );
case SCTLR_EL1:
case CPACR_EL1:
case TTBR0_EL1:
case TTBR1_EL1:
case TCR_EL1:
case TCR2_EL1:
case PIR_EL1:
case PIRE0_EL1:
case POR_EL1:
case ESR_EL1:
case AFSR0_EL1:
case AFSR1_EL1:
case FAR_EL1:
case MAIR_EL1:
case VBAR_EL1:
case CONTEXTIDR_EL1:
case AMAIR_EL1:
case CNTKCTL_EL1:
case ELR_EL1:
case SPSR_EL1:
case ZCR_EL1:
case SCTLR2_EL1:
/*
* EL1 registers which have an ELx2 mapping are loaded if
* we're not in hypervisor context.
*/
return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED;
case TPIDR_EL0:
case TPIDRRO_EL0:
case TPIDR_EL1:
case PAR_EL1:
case DACR32_EL2:
case IFSR32_EL2:
case DBGVCR32_EL2:
/* These registers are always loaded, no matter what */
return SR_LOC_LOADED;
default:
/* Non-mapped EL2 registers are by definition in memory. */
return SR_LOC_MEMORY;
}
}
static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu,
enum vcpu_sysreg reg,
enum vcpu_sysreg map_reg,
u64 (*xlate)(u64),
struct sr_loc *loc)
{
if (!is_hyp_ctxt(vcpu)) {
loc->loc = SR_LOC_MEMORY;
return;
}
loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED;
loc->map_reg = map_reg;
WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY);
if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) {
loc->loc |= SR_LOC_XLATED;
loc->xlate = xlate;
}
}
#define MAPPED_EL2_SYSREG(r, m, t) \
case r: { \
locate_mapped_el2_register(vcpu, r, m, t, loc); \
break; \
}
static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
struct sr_loc *loc)
{
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) {
loc->loc = SR_LOC_MEMORY;
return;
}
switch (reg) {
MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
translate_sctlr_el2_to_sctlr_el1 );
MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
@@ -144,125 +206,189 @@ static bool get_el2_to_el1_mapping(unsigned int reg,
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
case CNTHCTL_EL2:
/* CNTHCTL_EL2 is super special, until we support NV2.1 */
loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ?
SR_LOC_SPECIAL : SR_LOC_MEMORY);
break;
default:
return false;
loc->loc = locate_direct_register(vcpu, reg);
}
}
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
static u64 read_sr_from_cpu(enum vcpu_sysreg reg)
{
u64 val = 0x8badf00d8badf00d;
u64 (*xlate)(u64) = NULL;
unsigned int el1r;
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
goto memory_read;
switch (reg) {
case SCTLR_EL1: val = read_sysreg_s(SYS_SCTLR_EL12); break;
case CPACR_EL1: val = read_sysreg_s(SYS_CPACR_EL12); break;
case TTBR0_EL1: val = read_sysreg_s(SYS_TTBR0_EL12); break;
case TTBR1_EL1: val = read_sysreg_s(SYS_TTBR1_EL12); break;
case TCR_EL1: val = read_sysreg_s(SYS_TCR_EL12); break;
case TCR2_EL1: val = read_sysreg_s(SYS_TCR2_EL12); break;
case PIR_EL1: val = read_sysreg_s(SYS_PIR_EL12); break;
case PIRE0_EL1: val = read_sysreg_s(SYS_PIRE0_EL12); break;
case POR_EL1: val = read_sysreg_s(SYS_POR_EL12); break;
case ESR_EL1: val = read_sysreg_s(SYS_ESR_EL12); break;
case AFSR0_EL1: val = read_sysreg_s(SYS_AFSR0_EL12); break;
case AFSR1_EL1: val = read_sysreg_s(SYS_AFSR1_EL12); break;
case FAR_EL1: val = read_sysreg_s(SYS_FAR_EL12); break;
case MAIR_EL1: val = read_sysreg_s(SYS_MAIR_EL12); break;
case VBAR_EL1: val = read_sysreg_s(SYS_VBAR_EL12); break;
case CONTEXTIDR_EL1: val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
case AMAIR_EL1: val = read_sysreg_s(SYS_AMAIR_EL12); break;
case CNTKCTL_EL1: val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
case ELR_EL1: val = read_sysreg_s(SYS_ELR_EL12); break;
case SPSR_EL1: val = read_sysreg_s(SYS_SPSR_EL12); break;
case ZCR_EL1: val = read_sysreg_s(SYS_ZCR_EL12); break;
case SCTLR2_EL1: val = read_sysreg_s(SYS_SCTLR2_EL12); break;
case TPIDR_EL0: val = read_sysreg_s(SYS_TPIDR_EL0); break;
case TPIDRRO_EL0: val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
case TPIDR_EL1: val = read_sysreg_s(SYS_TPIDR_EL1); break;
case PAR_EL1: val = read_sysreg_par(); break;
case DACR32_EL2: val = read_sysreg_s(SYS_DACR32_EL2); break;
case IFSR32_EL2: val = read_sysreg_s(SYS_IFSR32_EL2); break;
case DBGVCR32_EL2: val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
default: WARN_ON_ONCE(1);
}
if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
if (!is_hyp_ctxt(vcpu))
goto memory_read;
return val;
}
static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val)
{
switch (reg) {
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
default: WARN_ON_ONCE(1);
}
}
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
{
struct sr_loc loc = {};
locate_register(vcpu, reg, &loc);
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
if (loc.loc & SR_LOC_SPECIAL) {
u64 val;
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
/*
* CNTHCTL_EL2 requires some special treatment to
* account for the bits that can be set via CNTKCTL_EL1.
* CNTHCTL_EL2 requires some special treatment to account
* for the bits that can be set via CNTKCTL_EL1 when E2H==1.
*/
switch (reg) {
case CNTHCTL_EL2:
if (vcpu_el2_e2h_is_set(vcpu)) {
val = read_sysreg_el1(SYS_CNTKCTL);
val &= CNTKCTL_VALID_BITS;
val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
return val;
}
break;
val = read_sysreg_el1(SYS_CNTKCTL);
val &= CNTKCTL_VALID_BITS;
val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
return val;
default:
WARN_ON_ONCE(1);
}
/*
* If this register does not have an EL1 counterpart,
* then read the stored EL2 version.
*/
if (reg == el1r)
goto memory_read;
/*
* If we have a non-VHE guest and that the sysreg
* requires translation to be used at EL1, use the
* in-memory copy instead.
*/
if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
goto memory_read;
/* Get the current version of the EL1 counterpart. */
WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
if (reg >= __SANITISED_REG_START__)
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
return val;
}
/* EL1 register can't be on the CPU if the guest is in vEL2. */
if (unlikely(is_hyp_ctxt(vcpu)))
goto memory_read;
if (loc.loc & SR_LOC_LOADED) {
enum vcpu_sysreg map_reg = reg;
if (__vcpu_read_sys_reg_from_cpu(reg, &val))
return val;
if (loc.loc & SR_LOC_MAPPED)
map_reg = loc.map_reg;
if (!(loc.loc & SR_LOC_XLATED)) {
u64 val = read_sr_from_cpu(map_reg);
if (reg >= __SANITISED_REG_START__)
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
return val;
}
}
memory_read:
return __vcpu_sys_reg(vcpu, reg);
}
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg)
{
u64 (*xlate)(u64) = NULL;
unsigned int el1r;
struct sr_loc loc = {};
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
goto memory_write;
locate_register(vcpu, reg, &loc);
if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
if (!is_hyp_ctxt(vcpu))
goto memory_write;
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
/*
* Always store a copy of the write to memory to avoid having
* to reverse-translate virtual EL2 system registers for a
* non-VHE guest hypervisor.
*/
__vcpu_assign_sys_reg(vcpu, reg, val);
if (loc.loc & SR_LOC_SPECIAL) {
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
switch (reg) {
case CNTHCTL_EL2:
/*
* If E2H=0, CNHTCTL_EL2 is a pure shadow register.
* Otherwise, some of the bits are backed by
* If E2H=1, some of the bits are backed by
* CNTKCTL_EL1, while the rest is kept in memory.
* Yes, this is fun stuff.
*/
if (vcpu_el2_e2h_is_set(vcpu))
write_sysreg_el1(val, SYS_CNTKCTL);
return;
write_sysreg_el1(val, SYS_CNTKCTL);
break;
default:
WARN_ON_ONCE(1);
}
/* No EL1 counterpart? We're done here.? */
if (reg == el1r)
return;
if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
val = xlate(val);
/* Redirect this to the EL1 version of the register. */
WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
return;
}
/* EL1 register can't be on the CPU if the guest is in vEL2. */
if (unlikely(is_hyp_ctxt(vcpu)))
goto memory_write;
if (loc.loc & SR_LOC_LOADED) {
enum vcpu_sysreg map_reg = reg;
u64 xlated_val;
if (__vcpu_write_sys_reg_to_cpu(val, reg))
return;
if (reg >= __SANITISED_REG_START__)
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
if (loc.loc & SR_LOC_MAPPED)
map_reg = loc.map_reg;
if (loc.loc & SR_LOC_XLATED)
xlated_val = loc.xlate(val);
else
xlated_val = val;
write_sr_to_cpu(map_reg, xlated_val);
/*
* Fall through to write the backing store anyway, which
* allows translated registers to be directly read without a
* reverse translation.
*/
}
memory_write:
__vcpu_assign_sys_reg(vcpu, reg, val);
}
@@ -1584,6 +1710,7 @@ static u8 pmuver_to_perfmon(u8 pmuver)
}
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
/* Read a sanitised cpufeature ID register by sys_reg_desc */
@@ -1606,19 +1733,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val = sanitise_id_aa64pfr0_el1(vcpu, val);
break;
case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm)) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
}
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
val = sanitise_id_aa64pfr1_el1(vcpu, val);
break;
case SYS_ID_AA64PFR2_EL1:
val &= ID_AA64PFR2_EL1_FPMR |
@@ -1628,18 +1743,18 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
val &= ~(ID_AA64ISAR1_EL1_APA |
ID_AA64ISAR1_EL1_API |
ID_AA64ISAR1_EL1_GPA |
ID_AA64ISAR1_EL1_GPI);
break;
case SYS_ID_AA64ISAR2_EL1:
if (!vcpu_has_ptrauth(vcpu))
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
val &= ~(ID_AA64ISAR2_EL1_APA3 |
ID_AA64ISAR2_EL1_GPA3);
if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
has_broken_cntvoff())
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
val &= ~ID_AA64ISAR2_EL1_WFxT;
break;
case SYS_ID_AA64ISAR3_EL1:
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
@@ -1655,7 +1770,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
ID_AA64MMFR3_EL1_S1PIE;
break;
case SYS_ID_MMFR4_EL1:
val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
val &= ~ID_MMFR4_EL1_CCIDX;
break;
}
@@ -1836,6 +1951,31 @@ static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
return val;
}
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
{
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
if (!kvm_has_mte(vcpu->kvm)) {
val &= ~ID_AA64PFR1_EL1_MTE;
val &= ~ID_AA64PFR1_EL1_MTE_frac;
}
if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
val &= ~ID_AA64PFR1_EL1_RAS_frac;
val &= ~ID_AA64PFR1_EL1_SME;
val &= ~ID_AA64PFR1_EL1_RNDR_trap;
val &= ~ID_AA64PFR1_EL1_NMI;
val &= ~ID_AA64PFR1_EL1_GCS;
val &= ~ID_AA64PFR1_EL1_THE;
val &= ~ID_AA64PFR1_EL1_MTEX;
val &= ~ID_AA64PFR1_EL1_PFAR;
val &= ~ID_AA64PFR1_EL1_MPAM_frac;
return val;
}
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
{
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
@@ -2697,6 +2837,18 @@ static bool access_ras(struct kvm_vcpu *vcpu,
struct kvm *kvm = vcpu->kvm;
switch(reg_to_encoding(r)) {
case SYS_ERXPFGCDN_EL1:
case SYS_ERXPFGCTL_EL1:
case SYS_ERXPFGF_EL1:
case SYS_ERXMISC2_EL1:
case SYS_ERXMISC3_EL1:
if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
(kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) {
kvm_inject_undefined(vcpu);
return false;
}
break;
default:
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
kvm_inject_undefined(vcpu);
@@ -2929,7 +3081,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
~(ID_AA64PFR0_EL1_AMU |
ID_AA64PFR0_EL1_MPAM |
ID_AA64PFR0_EL1_SVE |
ID_AA64PFR0_EL1_RAS |
ID_AA64PFR0_EL1_AdvSIMD |
ID_AA64PFR0_EL1_FP)),
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
@@ -2943,7 +3094,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64PFR1_EL1_SME |
ID_AA64PFR1_EL1_RES0 |
ID_AA64PFR1_EL1_MPAM_frac |
ID_AA64PFR1_EL1_RAS_frac |
ID_AA64PFR1_EL1_MTE)),
ID_WRITABLE(ID_AA64PFR2_EL1,
ID_AA64PFR2_EL1_FPMR |
@@ -3063,8 +3213,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
{ SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
{ SYS_DESC(SYS_ERXADDR_EL1), access_ras },
{ SYS_DESC(SYS_ERXPFGF_EL1), access_ras },
{ SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras },
{ SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras },
{ SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
{ SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
{ SYS_DESC(SYS_ERXMISC2_EL1), access_ras },
{ SYS_DESC(SYS_ERXMISC3_EL1), access_ras },
MTE_REG(TFSR_EL1),
MTE_REG(TFSRE0_EL1),

View File

@@ -69,7 +69,7 @@ static int iter_mark_lpis(struct kvm *kvm)
int nr_lpis = 0;
xa_for_each(&dist->lpi_xa, intid, irq) {
if (!vgic_try_get_irq_kref(irq))
if (!vgic_try_get_irq_ref(irq))
continue;
xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);

View File

@@ -53,7 +53,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
xa_init(&dist->lpi_xa);
}
/* CREATION */
@@ -208,7 +208,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
raw_spin_lock_init(&irq->irq_lock);
irq->vcpu = NULL;
irq->target_vcpu = vcpu0;
kref_init(&irq->refcount);
refcount_set(&irq->refcount, 0);
switch (dist->vgic_model) {
case KVM_DEV_TYPE_ARM_VGIC_V2:
irq->targets = 0;
@@ -277,7 +277,7 @@ static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type)
irq->intid = i;
irq->vcpu = NULL;
irq->target_vcpu = vcpu;
kref_init(&irq->refcount);
refcount_set(&irq->refcount, 0);
if (vgic_irq_is_sgi(i)) {
/* SGIs */
irq->enabled = 1;

View File

@@ -78,7 +78,6 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq;
unsigned long flags;
int ret;
/* In this case there is no put, since we keep the reference. */
@@ -89,7 +88,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
if (!irq)
return ERR_PTR(-ENOMEM);
ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
ret = xa_reserve(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
if (ret) {
kfree(irq);
return ERR_PTR(ret);
@@ -99,19 +98,19 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
raw_spin_lock_init(&irq->irq_lock);
irq->config = VGIC_CONFIG_EDGE;
kref_init(&irq->refcount);
refcount_set(&irq->refcount, 1);
irq->intid = intid;
irq->target_vcpu = vcpu;
irq->group = 1;
xa_lock_irqsave(&dist->lpi_xa, flags);
xa_lock(&dist->lpi_xa);
/*
* There could be a race with another vgic_add_lpi(), so we need to
* check that we don't add a second list entry with the same LPI.
*/
oldirq = xa_load(&dist->lpi_xa, intid);
if (vgic_try_get_irq_kref(oldirq)) {
if (vgic_try_get_irq_ref(oldirq)) {
/* Someone was faster with adding this LPI, lets use that. */
kfree(irq);
irq = oldirq;
@@ -126,7 +125,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
}
out_unlock:
xa_unlock_irqrestore(&dist->lpi_xa, flags);
xa_unlock(&dist->lpi_xa);
if (ret)
return ERR_PTR(ret);
@@ -547,7 +546,7 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
rcu_read_lock();
irq = xa_load(&its->translation_cache, cache_key);
if (!vgic_try_get_irq_kref(irq))
if (!vgic_try_get_irq_ref(irq))
irq = NULL;
rcu_read_unlock();
@@ -571,7 +570,7 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
* its_lock, as the ITE (and the reference it holds) cannot be freed.
*/
lockdep_assert_held(&its->its_lock);
vgic_get_irq_kref(irq);
vgic_get_irq_ref(irq);
old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);

View File

@@ -50,6 +50,14 @@ bool vgic_has_its(struct kvm *kvm)
bool vgic_supports_direct_msis(struct kvm *kvm)
{
/*
* Deliberately conflate vLPI and vSGI support on GICv4.1 hardware,
* indirectly allowing userspace to control whether or not vPEs are
* allocated for the VM.
*/
if (system_supports_direct_sgis() && !vgic_supports_direct_sgis(kvm))
return false;
return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
}

View File

@@ -1091,7 +1091,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
len = vgic_v3_init_dist_iodev(io_device);
break;
default:
BUG_ON(1);
BUG();
}
io_device->base_addr = dist_base_address;

View File

@@ -518,7 +518,7 @@ static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq)
if (!irq->hw || irq->host_irq != host_irq)
continue;
if (!vgic_try_get_irq_kref(irq))
if (!vgic_try_get_irq_ref(irq))
return NULL;
return irq;

View File

@@ -28,8 +28,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* kvm->arch.config_lock (mutex)
* its->cmd_lock (mutex)
* its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
* vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
* vgic_dist->lpi_xa.xa_lock
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
@@ -71,7 +71,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
rcu_read_lock();
irq = xa_load(&dist->lpi_xa, intid);
if (!vgic_try_get_irq_kref(irq))
if (!vgic_try_get_irq_ref(irq))
irq = NULL;
rcu_read_unlock();
@@ -114,37 +114,66 @@ struct vgic_irq *vgic_get_vcpu_irq(struct kvm_vcpu *vcpu, u32 intid)
return vgic_get_irq(vcpu->kvm, intid);
}
/*
* We can't do anything in here, because we lack the kvm pointer to
* lock and remove the item from the lpi_list. So we keep this function
* empty and use the return value of kref_put() to trigger the freeing.
*/
static void vgic_irq_release(struct kref *ref)
static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq)
{
lockdep_assert_held(&dist->lpi_xa.xa_lock);
__xa_erase(&dist->lpi_xa, irq->intid);
kfree_rcu(irq, rcu);
}
static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
if (irq->intid < VGIC_MIN_LPI)
return false;
return refcount_dec_and_test(&irq->refcount);
}
static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq *irq)
{
if (!__vgic_put_irq(kvm, irq))
return false;
irq->pending_release = true;
return true;
}
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long flags;
if (irq->intid < VGIC_MIN_LPI)
if (irq->intid >= VGIC_MIN_LPI)
might_lock(&dist->lpi_xa.xa_lock);
if (!__vgic_put_irq(kvm, irq))
return;
if (!kref_put(&irq->refcount, vgic_irq_release))
return;
xa_lock(&dist->lpi_xa);
vgic_release_lpi_locked(dist, irq);
xa_unlock(&dist->lpi_xa);
}
xa_lock_irqsave(&dist->lpi_xa, flags);
__xa_erase(&dist->lpi_xa, irq->intid);
xa_unlock_irqrestore(&dist->lpi_xa, flags);
static void vgic_release_deleted_lpis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long intid;
struct vgic_irq *irq;
kfree_rcu(irq, rcu);
xa_lock(&dist->lpi_xa);
xa_for_each(&dist->lpi_xa, intid, irq) {
if (irq->pending_release)
vgic_release_lpi_locked(dist, irq);
}
xa_unlock(&dist->lpi_xa);
}
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq, *tmp;
bool deleted = false;
unsigned long flags;
raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
@@ -155,11 +184,14 @@ void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
list_del(&irq->ap_list);
irq->vcpu = NULL;
raw_spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
deleted |= vgic_put_irq_norelease(vcpu->kvm, irq);
}
}
raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
if (deleted)
vgic_release_deleted_lpis(vcpu->kvm);
}
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
@@ -399,7 +431,7 @@ retry:
* now in the ap_list. This is safe as the caller must already hold a
* reference on the irq.
*/
vgic_get_irq_kref(irq);
vgic_get_irq_ref(irq);
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
irq->vcpu = vcpu;
@@ -630,6 +662,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq, *tmp;
bool deleted_lpis = false;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
@@ -657,12 +690,12 @@ retry:
/*
* This vgic_put_irq call matches the
* vgic_get_irq_kref in vgic_queue_irq_unlock,
* vgic_get_irq_ref in vgic_queue_irq_unlock,
* where we added the LPI to the ap_list. As
* we remove the irq from the list, we drop
* also drop the refcount.
*/
vgic_put_irq(vcpu->kvm, irq);
deleted_lpis |= vgic_put_irq_norelease(vcpu->kvm, irq);
continue;
}
@@ -725,6 +758,9 @@ retry:
}
raw_spin_unlock(&vgic_cpu->ap_list_lock);
if (unlikely(deleted_lpis))
vgic_release_deleted_lpis(vcpu->kvm);
}
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -818,7 +854,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
* the AP list has been sorted already.
*/
if (multi_sgi && irq->priority > prio) {
_raw_spin_unlock(&irq->irq_lock);
raw_spin_unlock(&irq->irq_lock);
break;
}

View File

@@ -267,7 +267,7 @@ void vgic_v2_put(struct kvm_vcpu *vcpu);
void vgic_v2_save_state(struct kvm_vcpu *vcpu);
void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq)
static inline bool vgic_try_get_irq_ref(struct vgic_irq *irq)
{
if (!irq)
return false;
@@ -275,12 +275,12 @@ static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq)
if (irq->intid < VGIC_MIN_LPI)
return true;
return kref_get_unless_zero(&irq->refcount);
return refcount_inc_not_zero(&irq->refcount);
}
static inline void vgic_get_irq_kref(struct vgic_irq *irq)
static inline void vgic_get_irq_ref(struct vgic_irq *irq)
{
WARN_ON_ONCE(!vgic_try_get_irq_kref(irq));
WARN_ON_ONCE(!vgic_try_get_irq_ref(irq));
}
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
@@ -396,15 +396,7 @@ bool vgic_supports_direct_sgis(struct kvm *kvm);
static inline bool vgic_supports_direct_irqs(struct kvm *kvm)
{
/*
* Deliberately conflate vLPI and vSGI support on GICv4.1 hardware,
* indirectly allowing userspace to control whether or not vPEs are
* allocated for the VM.
*/
if (system_supports_direct_sgis())
return vgic_supports_direct_sgis(kvm);
return vgic_supports_direct_msis(kvm);
return vgic_supports_direct_msis(kvm) || vgic_supports_direct_sgis(kvm);
}
int vgic_v4_init(struct kvm *kvm);

View File

@@ -47,13 +47,6 @@
#define NO_CONT_MAPPINGS BIT(1)
#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
enum pgtable_type {
TABLE_PTE,
TABLE_PMD,
TABLE_PUD,
TABLE_P4D,
};
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);

View File

@@ -53,6 +53,7 @@ HAS_S1PIE
HAS_S1POE
HAS_SCTLR2
HAS_RAS_EXTN
HAS_RASV1P1_EXTN
HAS_RNG
HAS_SB
HAS_STAGE2_FWB

View File

@@ -243,13 +243,13 @@ $(obj)/wrapper.a: $(obj-wlib) FORCE
hostprogs := addnote hack-coff mktree
targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a) zImage.lds
extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
always-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
$(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
dtstree := $(src)/dts
wrapper := $(src)/wrapper
wrapperbits := $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree) \
wrapperbits := $(always-y) $(addprefix $(obj)/,addnote hack-coff mktree) \
$(wrapper) FORCE
#############
@@ -456,7 +456,7 @@ WRAPPER_DTSDIR := /usr/lib/kernel-wrapper/dts
WRAPPER_BINDIR := /usr/sbin
INSTALL := install
extra-installed := $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y))
extra-installed := $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(always-y))
hostprogs-installed := $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs))
wrapper-installed := $(DESTDIR)$(WRAPPER_BINDIR)/wrapper
dts-installed := $(patsubst $(dtstree)/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(dtstree)/*.dts))

View File

@@ -19,19 +19,19 @@
set -e
# this should work for both the pSeries zImage and the iSeries vmlinux.sm
image_name=`basename $2`
image_name=$(basename "$2")
echo "Warning: '${INSTALLKERNEL}' command not available... Copying" \
"directly to $4/$image_name-$1" >&2
if [ -f $4/$image_name-$1 ]; then
mv $4/$image_name-$1 $4/$image_name-$1.old
if [ -f "$4"/"$image_name"-"$1" ]; then
mv "$4"/"$image_name"-"$1" "$4"/"$image_name"-"$1".old
fi
if [ -f $4/System.map-$1 ]; then
mv $4/System.map-$1 $4/System-$1.old
if [ -f "$4"/System.map-"$1" ]; then
mv "$4"/System.map-"$1" "$4"/System-"$1".old
fi
cat $2 > $4/$image_name-$1
cp $3 $4/System.map-$1
cat "$2" > "$4"/"$image_name"-"$1"
cp "$3" "$4"/System.map-"$1"

View File

@@ -199,7 +199,9 @@ obj-$(CONFIG_ALTIVEC) += vector.o
obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o
obj64-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_entry_64.o
extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init_check
ifdef KBUILD_BUILTIN
always-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init_check
endif
obj-$(CONFIG_PPC64) += $(obj64-y)
obj-$(CONFIG_PPC32) += $(obj32-y)

View File

@@ -632,19 +632,19 @@ static void __init kvm_check_ins(u32 *inst, u32 features)
#endif
}
switch (inst_no_rt & ~KVM_MASK_RB) {
#ifdef CONFIG_PPC_BOOK3S_32
switch (inst_no_rt & ~KVM_MASK_RB) {
case KVM_INST_MTSRIN:
if (features & KVM_MAGIC_FEAT_SR) {
u32 inst_rb = _inst & KVM_MASK_RB;
kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
}
break;
#endif
}
#endif
switch (_inst) {
#ifdef CONFIG_BOOKE
switch (_inst) {
case KVM_INST_WRTEEI_0:
kvm_patch_ins_wrteei_0(inst);
break;
@@ -652,8 +652,8 @@ static void __init kvm_check_ins(u32 *inst, u32 features)
case KVM_INST_WRTEEI_1:
kvm_patch_ins_wrtee(inst, 0, 1);
break;
#endif
}
#endif
}
extern u32 kvm_template_start[];

View File

@@ -15,8 +15,8 @@
has_renamed_memintrinsics()
{
grep -q "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} && \
! grep -q "^CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y" ${KCONFIG_CONFIG}
grep -q "^CONFIG_KASAN=y$" "${KCONFIG_CONFIG}" && \
! grep -q "^CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y" "${KCONFIG_CONFIG}"
}
if has_renamed_memintrinsics
@@ -42,15 +42,15 @@ check_section()
{
file=$1
section=$2
size=$(objdump -h -j $section $file 2>/dev/null | awk "\$2 == \"$section\" {print \$3}")
size=$(objdump -h -j "$section" "$file" 2>/dev/null | awk "\$2 == \"$section\" {print \$3}")
size=${size:-0}
if [ $size -ne 0 ]; then
if [ "$size" -ne 0 ]; then
ERROR=1
echo "Error: Section $section not empty in prom_init.c" >&2
fi
}
for UNDEF in $($NM -u $OBJ | awk '{print $2}')
for UNDEF in $($NM -u "$OBJ" | awk '{print $2}')
do
# On 64-bit nm gives us the function descriptors, which have
# a leading . on the name, so strip it off here.
@@ -87,8 +87,8 @@ do
fi
done
check_section $OBJ .data
check_section $OBJ .bss
check_section $OBJ .init.data
check_section "$OBJ" .data
check_section "$OBJ" .bss
check_section "$OBJ" .init.data
exit $ERROR

View File

@@ -141,10 +141,7 @@ void __init check_smt_enabled(void)
smt_enabled_at_boot = 0;
else {
int smt;
int rc;
rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
if (!rc)
if (!kstrtoint(smt_enabled_cmdline, 10, &smt))
smt_enabled_at_boot =
min(threads_per_core, smt);
}

View File

@@ -69,7 +69,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
/*
* Common checks before entering the guest world. Call with interrupts
* disabled.
* enabled.
*
* returns:
*

View File

@@ -110,8 +110,7 @@ static int cpm_pic_probe(struct platform_device *pdev)
out_be32(&data->reg->cpic_cimr, 0);
data->host = irq_domain_create_linear(of_fwnode_handle(dev->of_node),
64, &cpm_pic_host_ops, data);
data->host = irq_domain_create_linear(dev_fwnode(dev), 64, &cpm_pic_host_ops, data);
if (!data->host)
return -ENODEV;

View File

@@ -122,16 +122,11 @@ choice
If unsure, select Generic.
config POWERPC64_CPU
bool "Generic (POWER5 and PowerPC 970 and above)"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
bool "Generic 64 bits powerpc"
depends on PPC_BOOK3S_64
select ARCH_HAS_FAST_MULTIPLIER if CPU_LITTLE_ENDIAN
select PPC_64S_HASH_MMU
config POWERPC64_CPU
bool "Generic (POWER8 and above)"
depends on PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
select ARCH_HAS_FAST_MULTIPLIER
select PPC_64S_HASH_MMU
select PPC_HAS_LBARX_LHARX
select PPC_HAS_LBARX_LHARX if CPU_LITTLE_ENDIAN
config POWERPC_CPU
bool "Generic 32 bits powerpc"

View File

@@ -412,9 +412,8 @@ static int fsl_of_msi_probe(struct platform_device *dev)
}
platform_set_drvdata(dev, msi);
msi->irqhost = irq_domain_create_linear(of_fwnode_handle(dev->dev.of_node),
NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
msi->irqhost = irq_domain_create_linear(dev_fwnode(&dev->dev), NR_MSI_IRQS_MAX,
&fsl_msi_host_ops, msi);
if (msi->irqhost == NULL) {
dev_err(&dev->dev, "No memory for MSI irqhost\n");
err = -ENOMEM;

View File

@@ -39,6 +39,7 @@ int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
unsigned long size, bool writable, bool in_atomic)
{
int ret = 0;
pgprot_t prot;
unsigned long pfn;
phys_addr_t addr, end;
struct kvm_mmu_memory_cache pcache = {
@@ -55,10 +56,12 @@ int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa);
prot = pgprot_noncached(PAGE_WRITE);
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
map.addr = addr;
map.pte = pfn_pte(pfn, PAGE_KERNEL_IO);
map.pte = pfn_pte(pfn, prot);
map.pte = pte_mkdirty(map.pte);
map.level = 0;
if (!writable)

View File

@@ -683,7 +683,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
}
/**
* check_vcpu_requests - check and handle pending vCPU requests
* kvm_riscv_check_vcpu_requests - check and handle pending vCPU requests
* @vcpu: the VCPU pointer
*
* Return: 1 if we should enter the guest

View File

@@ -182,6 +182,8 @@ int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long reg_val;
if (reg_size != sizeof(reg_val))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, reg_size))
return -EFAULT;
if (reg_val != cntx->vector.vlenb)

View File

@@ -416,6 +416,10 @@ static bool __init should_mitigate_vuln(unsigned int bug)
cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
(smt_mitigations != SMT_MITIGATIONS_OFF);
case X86_BUG_SPEC_STORE_BYPASS:
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER);
default:
WARN(1, "Unknown bug %x\n", bug);
return false;
@@ -2710,6 +2714,11 @@ static void __init ssb_select_mitigation(void)
ssb_mode = SPEC_STORE_BYPASS_DISABLE;
break;
case SPEC_STORE_BYPASS_CMD_AUTO:
if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS))
ssb_mode = SPEC_STORE_BYPASS_PRCTL;
else
ssb_mode = SPEC_STORE_BYPASS_NONE;
break;
case SPEC_STORE_BYPASS_CMD_PRCTL:
ssb_mode = SPEC_STORE_BYPASS_PRCTL;
break;

View File

@@ -262,7 +262,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
} else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_WILLAMETTE) ||
} else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_CEDARMILL) ||
(c->x86_vfm >= INTEL_CORE_YONAH && c->x86_vfm <= INTEL_IVYBRIDGE)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
}

View File

@@ -171,8 +171,28 @@ static int cmp_id(const void *key, const void *elem)
return 1;
}
static u32 cpuid_to_ucode_rev(unsigned int val)
{
union zen_patch_rev p = {};
union cpuid_1_eax c;
c.full = val;
p.stepping = c.stepping;
p.model = c.model;
p.ext_model = c.ext_model;
p.ext_fam = c.ext_fam;
return p.ucode_rev;
}
static bool need_sha_check(u32 cur_rev)
{
if (!cur_rev) {
cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev);
}
switch (cur_rev >> 8) {
case 0x80012: return cur_rev <= 0x800126f; break;
case 0x80082: return cur_rev <= 0x800820f; break;
@@ -749,8 +769,6 @@ static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equi
n.equiv_cpu = equiv_cpu;
n.patch_id = uci->cpu_sig.rev;
WARN_ON_ONCE(!n.patch_id);
list_for_each_entry(p, &microcode_cache, plist)
if (patch_cpus_equivalent(p, &n, false))
return p;

View File

@@ -81,20 +81,25 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
cpuid_leaf(0x8000001e, &leaf);
tscan->c->topo.initial_apicid = leaf.ext_apic_id;
/*
* If leaf 0xb is available, then the domain shifts are set
* already and nothing to do here. Only valid for family >= 0x17.
* If leaf 0xb/0x26 is available, then the APIC ID and the domain
* shifts are set already.
*/
if (!has_topoext && tscan->c->x86 >= 0x17) {
/*
* Leaf 0x80000008 set the CORE domain shift already.
* Update the SMT domain, but do not propagate it.
*/
unsigned int nthreads = leaf.core_nthreads + 1;
if (!has_topoext) {
tscan->c->topo.initial_apicid = leaf.ext_apic_id;
topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads);
/*
* Leaf 0x8000008 sets the CORE domain shift but not the
* SMT domain shift. On CPUs with family >= 0x17, there
* might be hyperthreads.
*/
if (tscan->c->x86 >= 0x17) {
/* Update the SMT domain, but do not propagate it. */
unsigned int nthreads = leaf.core_nthreads + 1;
topology_update_dom(tscan, TOPO_SMT_DOMAIN,
get_count_order(nthreads), nthreads);
}
}
store_node(tscan, leaf.nnodes_per_socket + 1, leaf.node_id);

View File

@@ -149,12 +149,15 @@ static inline void rq_qos_done_bio(struct bio *bio)
q = bdev_get_queue(bio->bi_bdev);
/*
* If a bio has BIO_QOS_xxx set, it implicitly implies that
* q->rq_qos is present. So, we skip re-checking q->rq_qos
* here as an extra optimization and directly call
* __rq_qos_done_bio().
* A BIO may carry BIO_QOS_* flags even if the associated request_queue
* does not have rq_qos enabled. This can happen with stacked block
* devices for example, NVMe multipath, where it's possible that the
* bottom device has QoS enabled but the top device does not. Therefore,
* always verify that q->rq_qos is present and QoS is enabled before
* calling __rq_qos_done_bio().
*/
__rq_qos_done_bio(q->rq_qos, bio);
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_done_bio(q->rq_qos, bio);
}
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)

View File

@@ -1286,14 +1286,14 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
struct block_device *bdev;
unsigned long flags;
struct bio *bio;
bool prepared;
/*
* Submit the next plugged BIO. If we do not have any, clear
* the plugged flag.
*/
spin_lock_irqsave(&zwplug->lock, flags);
again:
spin_lock_irqsave(&zwplug->lock, flags);
bio = bio_list_pop(&zwplug->bio_list);
if (!bio) {
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
@@ -1304,13 +1304,14 @@ again:
trace_blk_zone_wplug_bio(zwplug->disk->queue, zwplug->zone_no,
bio->bi_iter.bi_sector, bio_sectors(bio));
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
prepared = blk_zone_wplug_prepare_bio(zwplug, bio);
spin_unlock_irqrestore(&zwplug->lock, flags);
if (!prepared) {
blk_zone_wplug_bio_io_error(zwplug, bio);
goto again;
}
spin_unlock_irqrestore(&zwplug->lock, flags);
bdev = bio->bi_bdev;
/*

View File

@@ -689,40 +689,50 @@ MODULE_PARM_DESC(mask_port_map,
"where <pci_dev> is the PCI ID of an AHCI controller in the "
"form \"domain:bus:dev.func\"");
static void ahci_apply_port_map_mask(struct device *dev,
struct ahci_host_priv *hpriv, char *mask_s)
static char *ahci_mask_port_ext;
module_param_named(mask_port_ext, ahci_mask_port_ext, charp, 0444);
MODULE_PARM_DESC(mask_port_ext,
"32-bits mask to ignore the external/hotplug capability of ports. "
"Valid values are: "
"\"<mask>\" to apply the same mask to all AHCI controller "
"devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
"specify different masks for the controllers specified, "
"where <pci_dev> is the PCI ID of an AHCI controller in the "
"form \"domain:bus:dev.func\"");
static u32 ahci_port_mask(struct device *dev, char *mask_s)
{
unsigned int mask;
if (kstrtouint(mask_s, 0, &mask)) {
dev_err(dev, "Invalid port map mask\n");
return;
return 0;
}
hpriv->mask_port_map = mask;
return mask;
}
static void ahci_get_port_map_mask(struct device *dev,
struct ahci_host_priv *hpriv)
static u32 ahci_get_port_mask(struct device *dev, char *mask_p)
{
char *param, *end, *str, *mask_s;
char *name;
u32 mask = 0;
if (!strlen(ahci_mask_port_map))
return;
if (!mask_p || !strlen(mask_p))
return 0;
str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
str = kstrdup(mask_p, GFP_KERNEL);
if (!str)
return;
return 0;
/* Handle single mask case */
if (!strchr(str, '=')) {
ahci_apply_port_map_mask(dev, hpriv, str);
mask = ahci_port_mask(dev, str);
goto free;
}
/*
* Mask list case: parse the parameter to apply the mask only if
* Mask list case: parse the parameter to get the mask only if
* the device name matches.
*/
param = str;
@@ -752,11 +762,13 @@ static void ahci_get_port_map_mask(struct device *dev,
param++;
}
ahci_apply_port_map_mask(dev, hpriv, mask_s);
mask = ahci_port_mask(dev, mask_s);
}
free:
kfree(str);
return mask;
}
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
@@ -782,8 +794,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
}
/* Handle port map masks passed as module parameter. */
if (ahci_mask_port_map)
ahci_get_port_map_mask(&pdev->dev, hpriv);
hpriv->mask_port_map =
ahci_get_port_mask(&pdev->dev, ahci_mask_port_map);
hpriv->mask_port_ext =
ahci_get_port_mask(&pdev->dev, ahci_mask_port_ext);
ahci_save_initial_config(&pdev->dev, hpriv);
}
@@ -1757,11 +1771,20 @@ static void ahci_mark_external_port(struct ata_port *ap)
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
/* mark external ports (hotplug-capable, eSATA) */
/*
* Mark external ports (hotplug-capable, eSATA), unless we were asked to
* ignore this feature.
*/
tmp = readl(port_mmio + PORT_CMD);
if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
(tmp & PORT_CMD_HPCP))
(tmp & PORT_CMD_HPCP)) {
if (hpriv->mask_port_ext & (1U << ap->port_no)) {
ata_port_info(ap,
"Ignoring external/hotplug capability\n");
return;
}
ap->pflags |= ATA_PFLAG_EXTERNAL;
}
}
static void ahci_update_initial_lpm_policy(struct ata_port *ap)

View File

@@ -330,6 +330,7 @@ struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
u32 mask_port_map; /* Mask of valid ports */
u32 mask_port_ext; /* Mask of ports ext capability */
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */

View File

@@ -450,7 +450,6 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
{
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
u32 rc;
void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
@@ -463,9 +462,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
writel(port_fbs, port_mmio + PORT_FBS);
rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
return rc;
return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
/**
@@ -500,7 +497,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
u32 port_fbs;
u32 port_fbs_save;
u32 retry = 1;
u32 rc;
int rc;
port_fbs_save = readl(port_mmio + PORT_FBS);

View File

@@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
return NULL;
}
static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct atmtcp_hdr *hdr;
if (skb->len < sizeof(struct atmtcp_hdr))
return -EINVAL;
hdr = (struct atmtcp_hdr *)skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC)
return -EINVAL;
return 0;
}
static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
{
@@ -288,9 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct sk_buff *new_skb;
int result = 0;
if (skb->len < sizeof(struct atmtcp_hdr))
goto done;
dev = vcc->dev_data;
hdr = (struct atmtcp_hdr *) skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC) {
@@ -347,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = {
static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
.pre_send = atmtcp_c_pre_send,
.send = atmtcp_c_send
};

View File

@@ -675,7 +675,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
@@ -1330,7 +1330,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);

View File

@@ -139,20 +139,26 @@ static int part_shift;
static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
{
struct kstat stat;
loff_t loopsize;
int ret;
/*
* Get the accurate file size. This provides better results than
* cached inode data, particularly for network filesystems where
* metadata may be stale.
*/
ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
if (ret)
return 0;
if (S_ISBLK(file_inode(file)->i_mode)) {
loopsize = i_size_read(file->f_mapping->host);
} else {
struct kstat stat;
/*
* Get the accurate file size. This provides better results than
* cached inode data, particularly for network filesystems where
* metadata may be stale.
*/
ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
if (ret)
return 0;
loopsize = stat.size;
}
loopsize = stat.size;
if (lo->lo_offset > 0)
loopsize -= lo->lo_offset;
/* offset is beyond i_size, weird but possible */

View File

@@ -239,6 +239,7 @@ struct ublk_device {
struct mutex cancel_mutex;
bool canceling;
pid_t ublksrv_tgid;
struct delayed_work exit_work;
};
/* header of ublk_params */
@@ -1595,12 +1596,62 @@ static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
ublk_get_queue(ub, i)->canceling = canceling;
}
static int ublk_ch_release(struct inode *inode, struct file *filp)
static bool ublk_check_and_reset_active_ref(struct ublk_device *ub)
{
struct ublk_device *ub = filp->private_data;
int i, j;
if (!(ub->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY |
UBLK_F_AUTO_BUF_REG)))
return false;
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
struct ublk_queue *ubq = ublk_get_queue(ub, i);
for (j = 0; j < ubq->q_depth; j++) {
struct ublk_io *io = &ubq->ios[j];
unsigned int refs = refcount_read(&io->ref) +
io->task_registered_buffers;
/*
* UBLK_REFCOUNT_INIT or zero means no active
* reference
*/
if (refs != UBLK_REFCOUNT_INIT && refs != 0)
return true;
/* reset to zero if the io hasn't active references */
refcount_set(&io->ref, 0);
io->task_registered_buffers = 0;
}
}
return false;
}
static void ublk_ch_release_work_fn(struct work_struct *work)
{
struct ublk_device *ub =
container_of(work, struct ublk_device, exit_work.work);
struct gendisk *disk;
int i;
/*
* For zero-copy and auto buffer register modes, I/O references
* might not be dropped naturally when the daemon is killed, but
* io_uring guarantees that registered bvec kernel buffers are
* unregistered finally when freeing io_uring context, then the
* active references are dropped.
*
* Wait until active references are dropped for avoiding use-after-free
*
* registered buffer may be unregistered in io_ring's release hander,
* so have to wait by scheduling work function for avoiding the two
* file release dependency.
*/
if (ublk_check_and_reset_active_ref(ub)) {
schedule_delayed_work(&ub->exit_work, 1);
return;
}
/*
* disk isn't attached yet, either device isn't live, or it has
* been removed already, so we needn't to do anything
@@ -1673,6 +1724,23 @@ unlock:
ublk_reset_ch_dev(ub);
out:
clear_bit(UB_STATE_OPEN, &ub->state);
/* put the reference grabbed in ublk_ch_release() */
ublk_put_device(ub);
}
static int ublk_ch_release(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = filp->private_data;
/*
* Grab ublk device reference, so it won't be gone until we are
* really released from work function.
*/
ublk_get_device(ub);
INIT_DELAYED_WORK(&ub->exit_work, ublk_ch_release_work_fn);
schedule_delayed_work(&ub->exit_work, 0);
return 0;
}

View File

@@ -143,6 +143,10 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
return var_hdr->ret_status;
}
#define COMM_BUF_SIZE(__payload_size) (MM_COMMUNICATE_HEADER_SIZE + \
MM_VARIABLE_COMMUNICATE_SIZE + \
(__payload_size))
/**
* setup_mm_hdr() - Allocate a buffer for StandAloneMM and initialize the
* header data.
@@ -150,11 +154,9 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
* @dptr: pointer address to store allocated buffer
* @payload_size: payload size
* @func: standAloneMM function number
* @ret: EFI return code
* Return: pointer to corresponding StandAloneMM function buffer or NULL
*/
static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
efi_status_t *ret)
static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func)
{
const efi_guid_t mm_var_guid = EFI_MM_VARIABLE_GUID;
struct efi_mm_communicate_header *mm_hdr;
@@ -169,17 +171,13 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
if (max_buffer_size &&
max_buffer_size < (MM_COMMUNICATE_HEADER_SIZE +
MM_VARIABLE_COMMUNICATE_SIZE + payload_size)) {
*ret = EFI_INVALID_PARAMETER;
return NULL;
}
comm_buf = kzalloc(MM_COMMUNICATE_HEADER_SIZE +
MM_VARIABLE_COMMUNICATE_SIZE + payload_size,
GFP_KERNEL);
if (!comm_buf) {
*ret = EFI_OUT_OF_RESOURCES;
comm_buf = alloc_pages_exact(COMM_BUF_SIZE(payload_size),
GFP_KERNEL | __GFP_ZERO);
if (!comm_buf)
return NULL;
}
mm_hdr = (struct efi_mm_communicate_header *)comm_buf;
memcpy(&mm_hdr->header_guid, &mm_var_guid, sizeof(mm_hdr->header_guid));
@@ -187,9 +185,7 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
var_hdr = (struct smm_variable_communicate_header *)mm_hdr->data;
var_hdr->function = func;
if (dptr)
*dptr = comm_buf;
*ret = EFI_SUCCESS;
*dptr = comm_buf;
return var_hdr->data;
}
@@ -212,10 +208,9 @@ static efi_status_t get_max_payload(size_t *size)
payload_size = sizeof(*var_payload);
var_payload = setup_mm_hdr(&comm_buf, payload_size,
SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE,
&ret);
SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE);
if (!var_payload)
return EFI_OUT_OF_RESOURCES;
return EFI_DEVICE_ERROR;
ret = mm_communicate(comm_buf, payload_size);
if (ret != EFI_SUCCESS)
@@ -239,7 +234,7 @@ static efi_status_t get_max_payload(size_t *size)
*/
*size -= 2;
out:
kfree(comm_buf);
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -259,9 +254,9 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
smm_property = setup_mm_hdr(
&comm_buf, payload_size,
SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET, &ret);
SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET);
if (!smm_property)
return EFI_OUT_OF_RESOURCES;
return EFI_DEVICE_ERROR;
memcpy(&smm_property->guid, vendor, sizeof(smm_property->guid));
smm_property->name_size = name_size;
@@ -282,7 +277,7 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
memcpy(var_property, &smm_property->property, sizeof(*var_property));
out:
kfree(comm_buf);
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -315,9 +310,9 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
payload_size = MM_VARIABLE_ACCESS_HEADER_SIZE + name_size + tmp_dsize;
var_acc = setup_mm_hdr(&comm_buf, payload_size,
SMM_VARIABLE_FUNCTION_GET_VARIABLE, &ret);
SMM_VARIABLE_FUNCTION_GET_VARIABLE);
if (!var_acc)
return EFI_OUT_OF_RESOURCES;
return EFI_DEVICE_ERROR;
/* Fill in contents */
memcpy(&var_acc->guid, vendor, sizeof(var_acc->guid));
@@ -347,7 +342,7 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
memcpy(data, (u8 *)var_acc->name + var_acc->name_size,
var_acc->data_size);
out:
kfree(comm_buf);
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -380,10 +375,9 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
payload_size = MM_VARIABLE_GET_NEXT_HEADER_SIZE + out_name_size;
var_getnext = setup_mm_hdr(&comm_buf, payload_size,
SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME,
&ret);
SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME);
if (!var_getnext)
return EFI_OUT_OF_RESOURCES;
return EFI_DEVICE_ERROR;
/* Fill in contents */
memcpy(&var_getnext->guid, guid, sizeof(var_getnext->guid));
@@ -404,7 +398,7 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
memcpy(name, var_getnext->name, var_getnext->name_size);
out:
kfree(comm_buf);
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -437,9 +431,9 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
* the properties, if the allocation fails
*/
var_acc = setup_mm_hdr(&comm_buf, payload_size,
SMM_VARIABLE_FUNCTION_SET_VARIABLE, &ret);
SMM_VARIABLE_FUNCTION_SET_VARIABLE);
if (!var_acc)
return EFI_OUT_OF_RESOURCES;
return EFI_DEVICE_ERROR;
/*
* The API has the ability to override RO flags. If no RO check was
@@ -467,7 +461,7 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
ret = mm_communicate(comm_buf, payload_size);
dev_dbg(pvt_data.dev, "Set Variable %s %d %lx\n", __FILE__, __LINE__, ret);
out:
kfree(comm_buf);
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -492,10 +486,9 @@ static efi_status_t tee_query_variable_info(u32 attributes,
payload_size = sizeof(*mm_query_info);
mm_query_info = setup_mm_hdr(&comm_buf, payload_size,
SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO,
&ret);
SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO);
if (!mm_query_info)
return EFI_OUT_OF_RESOURCES;
return EFI_DEVICE_ERROR;
mm_query_info->attr = attributes;
ret = mm_communicate(comm_buf, payload_size);
@@ -507,7 +500,7 @@ static efi_status_t tee_query_variable_info(u32 attributes,
*max_variable_size = mm_query_info->max_variable_size;
out:
kfree(comm_buf);
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}

View File

@@ -137,7 +137,7 @@ static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
u32 ver;
int ret = 0;
if (offset < 0 || offset > tgpio->gpio.ngpio)
if (offset < 0 || offset >= tgpio->gpio.ngpio)
return -EINVAL;
ver = ioread32(tgpio->membase + TGPIO_VER);

View File

@@ -344,6 +344,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_interrupt = "AMDI0030:00@8",
},
},
{
/*
* Spurious wakeups from TP_ATTN# pin
* Found in BIOS 5.35
* https://gitlab.freedesktop.org/drm/amd/-/issues/4482
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt PX13"),
},
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
.ignore_wake = "ASCP1A00:00@8",
},
},
{} /* Terminating entry */
};

View File

@@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
AMDGPU_VM_PAGE_EXECUTABLE);
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
AMDGPU_PTE_EXECUTABLE);
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);

View File

@@ -285,6 +285,36 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret;
/*
* Pin to keep buffer in place while it's vmap'ed. The actual
* domain is not that important as long as it's mapable. Using
* GTT and VRAM should be compatible with most use cases.
*/
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM);
if (ret)
return ret;
ret = drm_gem_dmabuf_vmap(dma_buf, map);
if (ret)
amdgpu_bo_unpin(bo);
return ret;
}
static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
drm_gem_dmabuf_vunmap(dma_buf, map);
amdgpu_bo_unpin(bo);
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
.pin = amdgpu_dma_buf_pin,
@@ -294,8 +324,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
.vmap = amdgpu_dma_buf_vmap,
.vunmap = amdgpu_dma_buf_vunmap,
};
/**

View File

@@ -471,6 +471,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (index == (uint64_t)-EINVAL) {
drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
kfree(queue);
r = -EINVAL;
goto unlock;
}

View File

@@ -1612,9 +1612,9 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if (!adev->gfx.disable_uq &&
adev->gfx.me_fw_version >= 2390 &&
adev->gfx.pfp_fw_version >= 2530 &&
adev->gfx.mec_fw_version >= 2600 &&
adev->gfx.me_fw_version >= 2420 &&
adev->gfx.pfp_fw_version >= 2580 &&
adev->gfx.mec_fw_version >= 2650 &&
adev->mes.fw_version[0] >= 120) {
adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
@@ -4129,6 +4129,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
if (!prop->kernel_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -4281,8 +4283,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
if (prop->kernel_queue) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
}
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;

View File

@@ -3026,6 +3026,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
if (!prop->kernel_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -3175,8 +3177,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
if (prop->kernel_queue) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
}
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;

View File

@@ -3458,14 +3458,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
(gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
return 0;
if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
if (adev->family == AMDGPU_FAMILY_SI ||
((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
(gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
(amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
return 0;
}
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
if (((adev->family == AMDGPU_FAMILY_SI) ||

View File

@@ -40,7 +40,7 @@
* mapping's backing &drm_gem_object buffers.
*
* &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
* all existent GPU VA mappings using this &drm_gem_object as backing buffer.
* all existing GPU VA mappings using this &drm_gem_object as backing buffer.
*
* GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
* keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
@@ -72,7 +72,7 @@
* but it can also be a 'dummy' object, which can be allocated with
* drm_gpuvm_resv_object_alloc().
*
* In order to connect a struct drm_gpuva its backing &drm_gem_object each
* In order to connect a struct drm_gpuva to its backing &drm_gem_object each
* &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
* &drm_gpuvm_bo contains a list of &drm_gpuva structures.
*
@@ -81,7 +81,7 @@
* This is ensured by the API through drm_gpuvm_bo_obtain() and
* drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
* &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
* particular combination. If not existent a new instance is created and linked
* particular combination. If not present, a new instance is created and linked
* to the &drm_gem_object.
*
* &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
@@ -108,7 +108,7 @@
* sequence of operations to satisfy a given map or unmap request.
*
* Therefore the DRM GPU VA manager provides an algorithm implementing splitting
* and merging of existent GPU VA mappings with the ones that are requested to
* and merging of existing GPU VA mappings with the ones that are requested to
* be mapped or unmapped. This feature is required by the Vulkan API to
* implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
* as VM BIND.
@@ -119,7 +119,7 @@
* execute in order to integrate the new mapping cleanly into the current state
* of the GPU VA space.
*
* Depending on how the new GPU VA mapping intersects with the existent mappings
* Depending on how the new GPU VA mapping intersects with the existing mappings
* of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
* of unmap operations, a maximum of two remap operations and a single map
* operation. The caller might receive no callback at all if no operation is
@@ -139,16 +139,16 @@
* one unmap operation and one or two map operations, such that drivers can
* derive the page table update delta accordingly.
*
* Note that there can't be more than two existent mappings to split up, one at
* Note that there can't be more than two existing mappings to split up, one at
* the beginning and one at the end of the new mapping, hence there is a
* maximum of two remap operations.
*
* Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
* call back into the driver in order to unmap a range of GPU VA space. The
* logic behind this function is way simpler though: For all existent mappings
* logic behind this function is way simpler though: For all existing mappings
* enclosed by the given range unmap operations are created. For mappings which
* are only partically located within the given range, remap operations are
* created such that those mappings are split up and re-mapped partically.
* are only partially located within the given range, remap operations are
* created such that those mappings are split up and re-mapped partially.
*
* As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
* drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
@@ -168,7 +168,7 @@
* provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
* drm_gpuva_unmap() instead.
*
* The following diagram depicts the basic relationships of existent GPU VA
* The following diagram depicts the basic relationships of existing GPU VA
* mappings, a newly requested mapping and the resulting mappings as implemented
* by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
*
@@ -218,7 +218,7 @@
*
*
* 4) Existent mapping is a left aligned subset of the requested one, hence
* replace the existent one.
* replace the existing one.
*
* ::
*
@@ -236,9 +236,9 @@
* and/or non-contiguous BO offset.
*
*
* 5) Requested mapping's range is a left aligned subset of the existent one,
* 5) Requested mapping's range is a left aligned subset of the existing one,
* but backed by a different BO. Hence, map the requested mapping and split
* the existent one adjusting its BO offset.
* the existing one adjusting its BO offset.
*
* ::
*
@@ -271,9 +271,9 @@
* new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
*
*
* 7) Requested mapping's range is a right aligned subset of the existent one,
* 7) Requested mapping's range is a right aligned subset of the existing one,
* but backed by a different BO. Hence, map the requested mapping and split
* the existent one, without adjusting the BO offset.
* the existing one, without adjusting the BO offset.
*
* ::
*
@@ -304,7 +304,7 @@
*
* 9) Existent mapping is overlapped at the end by the requested mapping backed
* by a different BO. Hence, map the requested mapping and split up the
* existent one, without adjusting the BO offset.
* existing one, without adjusting the BO offset.
*
* ::
*
@@ -334,9 +334,9 @@
* new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
*
*
* 11) Requested mapping's range is a centered subset of the existent one
* 11) Requested mapping's range is a centered subset of the existing one
* having a different backing BO. Hence, map the requested mapping and split
* up the existent one in two mappings, adjusting the BO offset of the right
* up the existing one in two mappings, adjusting the BO offset of the right
* one accordingly.
*
* ::
@@ -351,7 +351,7 @@
* new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
*
*
* 12) Requested mapping is a contiguous subset of the existent one. Split it
* 12) Requested mapping is a contiguous subset of the existing one. Split it
* up, but indicate that the backing PTEs could be kept.
*
* ::
@@ -367,7 +367,7 @@
*
*
* 13) Existent mapping is a right aligned subset of the requested one, hence
* replace the existent one.
* replace the existing one.
*
* ::
*
@@ -386,7 +386,7 @@
*
*
* 14) Existent mapping is a centered subset of the requested one, hence
* replace the existent one.
* replace the existing one.
*
* ::
*
@@ -406,7 +406,7 @@
*
* 15) Existent mappings is overlapped at the beginning by the requested mapping
* backed by a different BO. Hence, map the requested mapping and split up
* the existent one, adjusting its BO offset accordingly.
* the existing one, adjusting its BO offset accordingly.
*
* ::
*
@@ -469,8 +469,8 @@
* make use of them.
*
* The below code is strictly limited to illustrate the generic usage pattern.
* To maintain simplicitly, it doesn't make use of any abstractions for common
* code, different (asyncronous) stages with fence signalling critical paths,
* To maintain simplicity, it doesn't make use of any abstractions for common
* code, different (asynchronous) stages with fence signalling critical paths,
* any other helpers or error handling in terms of freeing memory and dropping
* previously taken locks.
*
@@ -479,7 +479,7 @@
* // Allocates a new &drm_gpuva.
* struct drm_gpuva * driver_gpuva_alloc(void);
*
* // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
* // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
@@ -582,7 +582,7 @@
* .sm_step_unmap = driver_gpuva_unmap,
* };
*
* // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
* // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
@@ -680,7 +680,7 @@
*
* This helper is here to provide lockless list iteration. Lockless as in, the
* iterator releases the lock immediately after picking the first element from
* the list, so list insertion deletion can happen concurrently.
* the list, so list insertion and deletion can happen concurrently.
*
* Elements popped from the original list are kept in a local list, so removal
* and is_empty checks can still happen while we're iterating the list.
@@ -1160,7 +1160,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
}
/**
* drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
* drm_gpuvm_prepare_objects() - prepare all associated BOs
* @gpuvm: the &drm_gpuvm
* @exec: the &drm_exec locking context
* @num_fences: the amount of &dma_fences to reserve
@@ -1230,13 +1230,13 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
/**
* drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
* drm_gpuvm_exec_lock() - lock all dma-resv of all associated BOs
* @vm_exec: the &drm_gpuvm_exec wrapper
*
* Acquires all dma-resv locks of all &drm_gem_objects the given
* &drm_gpuvm contains mappings of.
*
* Addionally, when calling this function with struct drm_gpuvm_exec::extra
* Additionally, when calling this function with struct drm_gpuvm_exec::extra
* being set the driver receives the given @fn callback to lock additional
* dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
* would call drm_exec_prepare_obj() from within this callback.
@@ -1293,7 +1293,7 @@ fn_lock_array(struct drm_gpuvm_exec *vm_exec)
}
/**
* drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
* drm_gpuvm_exec_lock_array() - lock all dma-resv of all associated BOs
* @vm_exec: the &drm_gpuvm_exec wrapper
* @objs: additional &drm_gem_objects to lock
* @num_objs: the number of additional &drm_gem_objects to lock
@@ -1588,7 +1588,7 @@ drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
/**
* drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
* drm_gpuvm_bo_obtain() - obtains an instance of the &drm_gpuvm_bo for the
* given &drm_gpuvm and &drm_gem_object
* @gpuvm: The &drm_gpuvm the @obj is mapped in.
* @obj: The &drm_gem_object being mapped in the @gpuvm.
@@ -1624,7 +1624,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
/**
* drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
* drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
* for the given &drm_gpuvm and &drm_gem_object
* @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
*
@@ -1688,7 +1688,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
* @vm_bo: the &drm_gpuvm_bo to add or remove
* @evict: indicates whether the object is evicted
*
* Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
* Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvm's evicted list.
*/
void
drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
@@ -1790,7 +1790,7 @@ __drm_gpuva_remove(struct drm_gpuva *va)
* drm_gpuva_remove() - remove a &drm_gpuva
* @va: the &drm_gpuva to remove
*
* This removes the given &va from the underlaying tree.
* This removes the given &va from the underlying tree.
*
* It is safe to use this function using the safe versions of iterating the GPU
* VA space, such as drm_gpuvm_for_each_va_safe() and
@@ -2358,7 +2358,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the operations to
* unmap and, if required, split existent mappings.
* unmap and, if required, split existing mappings.
*
* Drivers may use these callbacks to update the GPU VA space right away within
* the callback. In case the driver decides to copy and store the operations for
@@ -2430,7 +2430,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
* will be newly mapped.
*
* The expected usage is:
* The expected usage is::
*
* .. code-block:: c
*
@@ -2475,7 +2475,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* required without the earlier DRIVER_OP_MAP. This is safe because we've
* already locked the GEM object in the earlier DRIVER_OP_MAP step.
*
* Returns: 0 on success or a negative error codec
* Returns: 0 on success or a negative error code
*/
int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
@@ -2619,12 +2619,12 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
* @req_offset: the offset within the &drm_gem_object
*
* This function creates a list of operations to perform splitting and merging
* of existent mapping(s) with the newly requested one.
* of existing mapping(s) with the newly requested one.
*
* The list can be iterated with &drm_gpuva_for_each_op and must be processed
* in the given order. It can contain map, unmap and remap operations, but it
* also can be empty if no operation is required, e.g. if the requested mapping
* already exists is the exact same way.
* already exists in the exact same way.
*
* There can be an arbitrary amount of unmap operations, a maximum of two remap
* operations and a single map operation. The latter one represents the original

View File

@@ -387,19 +387,19 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
of_id = of_match_node(mtk_drm_of_ids, node);
if (!of_id)
continue;
goto next_put_node;
pdev = of_find_device_by_node(node);
if (!pdev)
continue;
goto next_put_node;
drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
if (!drm_dev)
continue;
goto next_put_device_pdev_dev;
temp_drm_priv = dev_get_drvdata(drm_dev);
if (!temp_drm_priv)
continue;
goto next_put_device_drm_dev;
if (temp_drm_priv->data->main_len)
all_drm_priv[CRTC_MAIN] = temp_drm_priv;
@@ -411,10 +411,17 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
if (temp_drm_priv->mtk_drm_bound)
cnt++;
if (cnt == MAX_CRTC) {
of_node_put(node);
next_put_device_drm_dev:
put_device(drm_dev);
next_put_device_pdev_dev:
put_device(&pdev->dev);
next_put_node:
of_node_put(node);
if (cnt == MAX_CRTC)
break;
}
}
if (drm_priv->data->mmsys_dev_num == cnt) {

View File

@@ -1002,6 +1002,12 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
return PTR_ERR(dsi->next_bridge);
}
/*
* set flag to request the DSI host bridge be pre-enabled before device bridge
* in the chain, so the DSI host is ready when the device bridge is pre-enabled
*/
dsi->next_bridge->pre_enable_prev_first = true;
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);

View File

@@ -182,8 +182,8 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
{
regmap_update_bits(hdmi->regs, VIDEO_SOURCE_SEL,
VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH);
regmap_update_bits(hdmi->regs, VIDEO_CFG_4,
VIDEO_SOURCE_SEL, black ? GEN_RGB : NORMAL_PATH);
}
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
@@ -310,8 +310,8 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
{
regmap_update_bits(hdmi->regs, AUDIO_PACKET_OFF,
GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF);
regmap_update_bits(hdmi->regs, GRL_SHIFT_R2,
AUDIO_PACKET_OFF, enable ? 0 : AUDIO_PACKET_OFF);
}
static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)

View File

@@ -292,7 +292,8 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
wmb(); /* Make sure the above parameter is set before update */
mtk_plane_state->pending.dirty = true;
mtk_crtc_plane_disable(old_state->crtc, plane);
if (old_state && old_state->crtc)
mtk_crtc_plane_disable(old_state->crtc, plane);
}
static void mtk_plane_atomic_update(struct drm_plane *plane,

View File

@@ -11,7 +11,7 @@
static const unsigned int *gen7_0_0_external_core_regs[] __always_unused;
static const unsigned int *gen7_2_0_external_core_regs[] __always_unused;
static const unsigned int *gen7_9_0_external_core_regs[] __always_unused;
static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
static const u32 gen7_9_0_cx_debugbus_blocks[] __always_unused;
#include "adreno_gen7_0_0_snapshot.h"
@@ -174,8 +174,15 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
u32 *data)
{
u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
u32 reg;
if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
reg = A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
} else {
reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
}
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
@@ -198,11 +205,18 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
readl((ptr) + ((offset) << 2))
/* read a value from the CX debug bus */
static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
static int cx_debugbus_read(struct msm_gpu *gpu, void __iomem *cxdbg, u32 block, u32 offset,
u32 *data)
{
u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
u32 reg;
if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
reg = A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
} else {
reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
}
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
@@ -315,7 +329,8 @@ static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
ptr += debugbus_read(gpu, block->id, i, ptr);
}
static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
static void a6xx_get_cx_debugbus_block(struct msm_gpu *gpu,
void __iomem *cxdbg,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_debugbus_block *block,
struct a6xx_gpu_state_obj *obj)
@@ -330,7 +345,7 @@ static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
obj->handle = block;
for (ptr = obj->data, i = 0; i < block->count; i++)
ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
ptr += cx_debugbus_read(gpu, cxdbg, block->id, i, ptr);
}
static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu,
@@ -423,8 +438,9 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
a6xx_state, &a7xx_debugbus_blocks[gbif_debugbus_blocks[i]],
&a6xx_state->debugbus[i + debugbus_blocks_count]);
}
}
a6xx_state->nr_debugbus = total_debugbus_blocks;
}
}
static void a6xx_get_debugbus(struct msm_gpu *gpu,
@@ -526,7 +542,8 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
int i;
for (i = 0; i < nr_cx_debugbus_blocks; i++)
a6xx_get_cx_debugbus_block(cxdbg,
a6xx_get_cx_debugbus_block(gpu,
cxdbg,
a6xx_state,
&cx_debugbus_blocks[i],
&a6xx_state->cx_debugbus[i]);
@@ -759,15 +776,15 @@ static void a7xx_get_cluster(struct msm_gpu *gpu,
size_t datasize;
int i, regcount = 0;
/* Some clusters need a selector register to be programmed too */
if (cluster->sel)
in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
in += CRASHDUMP_WRITE(in, REG_A7XX_CP_APERTURE_CNTL_CD,
A7XX_CP_APERTURE_CNTL_CD_PIPE(cluster->pipe_id) |
A7XX_CP_APERTURE_CNTL_CD_CLUSTER(cluster->cluster_id) |
A7XX_CP_APERTURE_CNTL_CD_CONTEXT(cluster->context_id));
/* Some clusters need a selector register to be programmed too */
if (cluster->sel)
in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
for (i = 0; cluster->regs[i] != UINT_MAX; i += 2) {
int count = RANGE(cluster->regs, i);
@@ -1796,6 +1813,7 @@ static void a7xx_show_shader(struct a6xx_gpu_state_obj *obj,
print_name(p, " - type: ", a7xx_statetype_names[block->statetype]);
print_name(p, " - pipe: ", a7xx_pipe_names[block->pipeid]);
drm_printf(p, " - location: %d\n", block->location);
for (i = 0; i < block->num_sps; i++) {
drm_printf(p, " - sp: %d\n", i);
@@ -1873,6 +1891,7 @@ static void a7xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
print_name(p, " - pipe: ", a7xx_pipe_names[dbgahb->pipe_id]);
print_name(p, " - cluster-name: ", a7xx_cluster_names[dbgahb->cluster_id]);
drm_printf(p, " - context: %d\n", dbgahb->context_id);
drm_printf(p, " - location: %d\n", dbgahb->location_id);
a7xx_show_registers_indented(dbgahb->regs, obj->data, p, 4);
}
}

View File

@@ -419,47 +419,47 @@ static const struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
{ "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
{ "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
};
static const struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
REG_A6XX_CP_SQE_STAT_DATA, 0x40, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
{ "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
{ "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x33, NULL },
{ "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
{ "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x40, NULL },
{ "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x100, NULL },
{ "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
{ "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x8000, NULL },
{ "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x33, NULL },
{ "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
{ "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x40, NULL },
{ "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x100, NULL },
{ "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
{ "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x8000, NULL },
{ "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
{ "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x40, NULL },
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
{ "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size },
};
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
"CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
};
static const struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
{ "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL },
{ "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2100, NULL },
{ "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2200, NULL },
{ "CP_BV_MEM_POOL_DBG", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2200, NULL },
};
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }

View File

@@ -81,7 +81,7 @@ static const u32 gen7_0_0_debugbus_blocks[] = {
A7XX_DBGBUS_USPTP_7,
};
static struct gen7_shader_block gen7_0_0_shader_blocks[] = {
static const struct gen7_shader_block gen7_0_0_shader_blocks[] = {
{A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
@@ -668,12 +668,19 @@ static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8));
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_NONE */
static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = {
0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c,
0x0b60f, 0x0b621, 0x0b630, 0x0b633,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_none_registers), 8));
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
0x0b600, 0x0b600,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8));
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */
@@ -695,7 +702,7 @@ static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = {
.val = 0x9,
};
static struct gen7_cluster_registers gen7_0_0_clusters[] = {
static const struct gen7_cluster_registers gen7_0_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -764,7 +771,7 @@ static struct gen7_cluster_registers gen7_0_0_clusters[] = {
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
static struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
static const struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -914,7 +921,7 @@ static const u32 gen7_0_0_dpm_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_dpm_registers), 8));
static struct gen7_reg_list gen7_0_0_reg_list[] = {
static const struct gen7_reg_list gen7_0_0_reg_list[] = {
{ gen7_0_0_gpu_registers, NULL },
{ gen7_0_0_cx_misc_registers, NULL },
{ gen7_0_0_dpm_registers, NULL },

View File

@@ -95,7 +95,7 @@ static const u32 gen7_2_0_debugbus_blocks[] = {
A7XX_DBGBUS_CCHE_2,
};
static struct gen7_shader_block gen7_2_0_shader_blocks[] = {
static const struct gen7_shader_block gen7_2_0_shader_blocks[] = {
{A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
@@ -489,7 +489,7 @@ static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = {
.val = 0x9,
};
static struct gen7_cluster_registers gen7_2_0_clusters[] = {
static const struct gen7_cluster_registers gen7_2_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_2_0_noncontext_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -558,7 +558,7 @@ static struct gen7_cluster_registers gen7_2_0_clusters[] = {
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
static const struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -573,6 +573,8 @@ static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_none_registers, 0xb600 },
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
{ A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
@@ -737,7 +739,7 @@ static const u32 gen7_2_0_dpm_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_2_0_dpm_registers), 8));
static struct gen7_reg_list gen7_2_0_reg_list[] = {
static const struct gen7_reg_list gen7_2_0_reg_list[] = {
{ gen7_2_0_gpu_registers, NULL },
{ gen7_2_0_cx_misc_registers, NULL },
{ gen7_2_0_dpm_registers, NULL },

View File

@@ -117,7 +117,7 @@ static const u32 gen7_9_0_cx_debugbus_blocks[] = {
A7XX_DBGBUS_GBIF_CX,
};
static struct gen7_shader_block gen7_9_0_shader_blocks[] = {
static const struct gen7_shader_block gen7_9_0_shader_blocks[] = {
{ A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
{ A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
{ A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
@@ -1116,7 +1116,7 @@ static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = {
.val = 0x9,
};
static struct gen7_cluster_registers gen7_9_0_clusters[] = {
static const struct gen7_cluster_registers gen7_9_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_9_0_non_context_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -1185,7 +1185,7 @@ static struct gen7_cluster_registers gen7_9_0_clusters[] = {
gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
};
static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00},
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -1294,34 +1294,34 @@ static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
};
static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
static const struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x00040},
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x00200},
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
{ "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0x00800},
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
{ "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000},
{ "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
{ "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200},
{ "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
{ "CP_BV_ROQ_DBG", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800},
{ "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
{ "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000},
{ "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
{ "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040},
{ "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
{ "CP_RESOURCE_TABLE_DBG", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA, 0x04100},
{ "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
{ "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200},
{ "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
{ "CP_LPAC_ROQ_DBG", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
REG_A7XX_CP_LPAC_ROQ_DBG_DATA, 0x00200},
{ "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
{ "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x08000},
{ "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
{ "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x00040},
{ "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
{ "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x00040},
{ "CP_AQE_ROQ_0", REG_A7XX_CP_AQE_ROQ_DBG_ADDR_0,
REG_A7XX_CP_AQE_ROQ_DBG_DATA_0, 0x00100},
@@ -1337,7 +1337,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A7XX_CP_AQE_STAT_DATA_1, 0x00040},
};
static struct gen7_reg_list gen7_9_0_reg_list[] = {
static const struct gen7_reg_list gen7_9_0_reg_list[] = {
{ gen7_9_0_gpu_registers, NULL},
{ gen7_9_0_cx_misc_registers, NULL},
{ gen7_9_0_cx_dbgc_registers, NULL},

View File

@@ -596,7 +596,7 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
spin_lock_irqsave(&dev->event_lock, flags);
if (dpu_crtc->event) {
DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
DRM_DEBUG_VBL("%s: send event: %p\n", dpu_crtc->name,
dpu_crtc->event);
trace_dpu_crtc_complete_flip(DRMID(crtc));
drm_crtc_send_vblank_event(crtc, dpu_crtc->event);

View File

@@ -730,6 +730,8 @@ bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_st
return false;
conn_state = drm_atomic_get_new_connector_state(state, connector);
if (!conn_state)
return false;
/**
* These checks are duplicated from dpu_encoder_update_topology() since

View File

@@ -31,14 +31,14 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
u32 base;
if (!ctx) {
DRM_ERROR("invalid ctx %pK\n", ctx);
DRM_ERROR("invalid ctx %p\n", ctx);
return;
}
base = ctx->cap->sblk->pcc.base;
if (!base) {
DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
DRM_ERROR("invalid ctx %p pcc base 0x%x\n", ctx, base);
return;
}

View File

@@ -1345,7 +1345,7 @@ static int dpu_kms_mmap_mdp5(struct dpu_kms *dpu_kms)
dpu_kms->mmio = NULL;
return ret;
}
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap_mdss(mdss_dev,
dpu_kms->pdev,
@@ -1380,7 +1380,7 @@ static int dpu_kms_mmap_dpu(struct dpu_kms *dpu_kms)
dpu_kms->mmio = NULL;
return ret;
}
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {

View File

@@ -1129,7 +1129,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(state, plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state);
struct drm_crtc_state *crtc_state;
struct drm_crtc_state *crtc_state = NULL;
int ret;
if (IS_ERR(plane_state))
@@ -1162,7 +1162,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
if (!old_plane_state || !old_plane_state->fb ||
old_plane_state->src_w != plane_state->src_w ||
old_plane_state->src_h != plane_state->src_h ||
old_plane_state->src_w != plane_state->src_w ||
old_plane_state->crtc_w != plane_state->crtc_w ||
old_plane_state->crtc_h != plane_state->crtc_h ||
msm_framebuffer_format(old_plane_state->fb) !=
msm_framebuffer_format(plane_state->fb))

View File

@@ -5,6 +5,8 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
#include <dt-bindings/phy/phy.h>
#include "dsi_phy.h"
@@ -511,30 +513,6 @@ int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
{
struct device *dev = &phy->pdev->dev;
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
pm_runtime_put_sync(dev);
}
return ret;
}
static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
{
clk_disable_unprepare(phy->ahb_clk);
pm_runtime_put(&phy->pdev->dev);
}
static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
{ .compatible = "qcom,dsi-phy-28nm-hpm",
@@ -698,22 +676,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
if (ret)
return ret;
phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk))
return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
"Unable to get ahb clk\n");
platform_set_drvdata(pdev, phy);
ret = devm_pm_runtime_enable(&pdev->dev);
ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
ret = dsi_phy_enable_resource(phy);
ret = devm_pm_clk_create(dev);
if (ret)
return ret;
ret = pm_clk_add(dev, "iface");
if (ret < 0)
return dev_err_probe(dev, ret, "Unable to get iface clk\n");
if (phy->cfg->ops.pll_init) {
ret = phy->cfg->ops.pll_init(phy);
if (ret)
@@ -727,18 +703,19 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"Failed to register clk provider\n");
dsi_phy_disable_resource(phy);
platform_set_drvdata(pdev, phy);
return 0;
}
static const struct dev_pm_ops dsi_phy_pm_ops = {
SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
};
static struct platform_driver dsi_phy_platform_driver = {
.probe = dsi_phy_driver_probe,
.driver = {
.name = "msm_dsi_phy",
.of_match_table = dsi_phy_dt_match,
.pm = &dsi_phy_pm_ops,
},
};
@@ -764,9 +741,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
dev = &phy->pdev->dev;
ret = dsi_phy_enable_resource(phy);
ret = pm_runtime_resume_and_get(dev);
if (ret) {
DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
DRM_DEV_ERROR(dev, "%s: resume failed, %d\n",
__func__, ret);
goto res_en_fail;
}
@@ -810,7 +787,7 @@ pll_restor_fail:
phy_en_fail:
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
reg_en_fail:
dsi_phy_disable_resource(phy);
pm_runtime_put(dev);
res_en_fail:
return ret;
}
@@ -823,7 +800,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
phy->cfg->ops.disable(phy);
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
dsi_phy_disable_resource(phy);
pm_runtime_put(&phy->pdev->dev);
}
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,

View File

@@ -104,7 +104,6 @@ struct msm_dsi_phy {
phys_addr_t lane_size;
int id;
struct clk *ahb_clk;
struct regulator_bulk_data *supplies;
struct msm_dsi_dphy_timing timing;

View File

@@ -325,25 +325,28 @@ static struct drm_info_list msm_debugfs_list[] = {
static int late_init_minor(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_device *dev;
struct msm_drm_private *priv;
int ret;
if (!minor)
return 0;
dev = minor->dev;
priv = dev->dev_private;
if (!priv->gpu_pdev)
return 0;
ret = msm_rd_debugfs_init(minor);
if (ret) {
DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
DRM_DEV_ERROR(dev->dev, "could not install rd debugfs\n");
return ret;
}
ret = msm_perf_debugfs_init(minor);
if (ret) {
DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
DRM_DEV_ERROR(dev->dev, "could not install perf debugfs\n");
return ret;
}

View File

@@ -95,7 +95,6 @@ void msm_gem_vma_get(struct drm_gem_object *obj)
void msm_gem_vma_put(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct drm_exec exec;
if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
return;
@@ -103,9 +102,13 @@ void msm_gem_vma_put(struct drm_gem_object *obj)
if (!priv->kms)
return;
#ifdef CONFIG_DRM_MSM_KMS
struct drm_exec exec;
msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
drm_exec_fini(&exec); /* drop locks */
#endif
}
/*
@@ -663,9 +666,13 @@ int msm_gem_set_iova(struct drm_gem_object *obj,
static bool is_kms_vm(struct drm_gpuvm *vm)
{
#ifdef CONFIG_DRM_MSM_KMS
struct msm_drm_private *priv = vm->drm->dev_private;
return priv->kms && (priv->kms->vm == vm);
#else
return false;
#endif
}
/*
@@ -1113,10 +1120,12 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
if (msm_obj->flags & MSM_BO_NO_SHARE) {
if (obj->resv != &obj->_resv) {
struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}

View File

@@ -100,7 +100,7 @@ struct msm_gem_vm {
*
* Only used for kernel managed VMs, unused for user managed VMs.
*
* Protected by @mm_lock.
* Protected by vm lock. See msm_gem_lock_vm_and_obj(), for ex.
*/
struct drm_mm mm;

View File

@@ -271,32 +271,37 @@ out:
return ret;
}
static int submit_lock_objects_vmbind(struct msm_gem_submit *submit)
{
unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES;
struct drm_exec *exec = &submit->exec;
int ret = 0;
drm_exec_init(&submit->exec, flags, submit->nr_bos);
drm_exec_until_all_locked (&submit->exec) {
ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
drm_exec_retry_on_contention(exec);
if (ret)
break;
ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
drm_exec_retry_on_contention(exec);
if (ret)
break;
}
return ret;
}
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_lock_objects(struct msm_gem_submit *submit)
{
unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
struct drm_exec *exec = &submit->exec;
int ret;
int ret = 0;
if (msm_context_is_vmbind(submit->queue->ctx)) {
flags |= DRM_EXEC_IGNORE_DUPLICATES;
drm_exec_init(&submit->exec, flags, submit->nr_bos);
drm_exec_until_all_locked (&submit->exec) {
ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
drm_exec_retry_on_contention(exec);
if (ret)
return ret;
ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
drm_exec_retry_on_contention(exec);
if (ret)
return ret;
}
return 0;
}
if (msm_context_is_vmbind(submit->queue->ctx))
return submit_lock_objects_vmbind(submit);
drm_exec_init(&submit->exec, flags, submit->nr_bos);
@@ -305,17 +310,17 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
drm_gpuvm_resv_obj(submit->vm));
drm_exec_retry_on_contention(&submit->exec);
if (ret)
return ret;
break;
for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
drm_exec_retry_on_contention(&submit->exec);
if (ret)
return ret;
break;
}
}
return 0;
return ret;
}
static int submit_fence_sync(struct msm_gem_submit *submit)
@@ -514,14 +519,15 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
if (error)
submit_unpin_objects(submit);
if (submit->exec.objects)
drm_exec_fini(&submit->exec);
if (error) {
submit_unpin_objects(submit);
/* job wasn't enqueued to scheduler, so early retirement: */
/* if job wasn't enqueued to scheduler, early retirement: */
if (error)
msm_submit_retire(submit);
}
}
void msm_submit_retire(struct msm_gem_submit *submit)
@@ -769,12 +775,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
sync_file = sync_file_create(submit->user_fence);
if (!sync_file) {
if (!sync_file)
ret = -ENOMEM;
} else {
fd_install(out_fence_fd, sync_file->file);
args->fence_fd = out_fence_fd;
}
}
if (ret)
@@ -812,10 +814,14 @@ out:
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
if (ret && (out_fence_fd >= 0)) {
put_unused_fd(out_fence_fd);
if (ret) {
if (out_fence_fd >= 0)
put_unused_fd(out_fence_fd);
if (sync_file)
fput(sync_file->file);
} else if (sync_file) {
fd_install(out_fence_fd, sync_file->file);
args->fence_fd = out_fence_fd;
}
if (!IS_ERR_OR_NULL(submit)) {

View File

@@ -319,13 +319,10 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
mutex_lock(&vm->mmu_lock);
/*
* NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
* NOTE: if not using pgtable preallocation, we cannot hold
* a lock across map/unmap which is also used in the job_run()
* path, as this can cause deadlock in job_run() vs shrinker/
* reclaim.
*
* Revisit this if we can come up with a scheme to pre-alloc pages
* for the pgtable in map/unmap ops.
*/
ret = vm_map_op(vm, &(struct msm_vm_map_op){
.iova = vma->va.addr,
@@ -454,6 +451,8 @@ msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
struct op_arg {
unsigned flags;
struct msm_vm_bind_job *job;
const struct msm_vm_bind_op *op;
bool kept;
};
static void
@@ -475,14 +474,18 @@ vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
}
static int
msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg)
msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
{
struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
struct op_arg *arg = _arg;
struct msm_vm_bind_job *job = arg->job;
struct drm_gem_object *obj = op->map.gem.obj;
struct drm_gpuva *vma;
struct sg_table *sgt;
unsigned prot;
if (arg->kept)
return 0;
vma = vma_from_op(arg, &op->map);
if (WARN_ON(IS_ERR(vma)))
return PTR_ERR(vma);
@@ -602,15 +605,41 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
}
static int
msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg)
msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
{
struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
struct op_arg *arg = _arg;
struct msm_vm_bind_job *job = arg->job;
struct drm_gpuva *vma = op->unmap.va;
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
vma->va.addr, vma->va.range);
/*
* Detect in-place remap. Turnip does this to change the vma flags,
* in particular MSM_VMA_DUMP. In this case we want to avoid actually
* touching the page tables, as that would require synchronization
* against SUBMIT jobs running on the GPU.
*/
if (op->unmap.keep &&
(arg->op->op == MSM_VM_BIND_OP_MAP) &&
(vma->gem.obj == arg->op->obj) &&
(vma->gem.offset == arg->op->obj_offset) &&
(vma->va.addr == arg->op->iova) &&
(vma->va.range == arg->op->range)) {
/* We are only expecting a single in-place unmap+map cb pair: */
WARN_ON(arg->kept);
/* Leave the existing VMA in place, but signal that to the map cb: */
arg->kept = true;
/* Only flags are changing, so update that in-place: */
unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1);
vma->flags = orig_flags | arg->flags;
return 0;
}
if (!msm_vma->mapped)
goto out_close;
@@ -1271,6 +1300,7 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
const struct msm_vm_bind_op *op = &job->ops[i];
struct op_arg arg = {
.job = job,
.op = op,
};
switch (op->op) {
@@ -1460,12 +1490,8 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
sync_file = sync_file_create(job->fence);
if (!sync_file) {
if (!sync_file)
ret = -ENOMEM;
} else {
fd_install(out_fence_fd, sync_file->file);
args->fence_fd = out_fence_fd;
}
}
if (ret)
@@ -1494,10 +1520,14 @@ out:
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
if (ret && (out_fence_fd >= 0)) {
put_unused_fd(out_fence_fd);
if (ret) {
if (out_fence_fd >= 0)
put_unused_fd(out_fence_fd);
if (sync_file)
fput(sync_file->file);
} else if (sync_file) {
fd_install(out_fence_fd, sync_file->file);
args->fence_fd = out_fence_fd;
}
if (!IS_ERR_OR_NULL(job)) {

View File

@@ -465,6 +465,7 @@ static void recover_worker(struct kthread_work *work)
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
struct task_struct *task;
int i;
mutex_lock(&gpu->lock);
@@ -482,16 +483,20 @@ static void recover_worker(struct kthread_work *work)
/* Increment the fault counts */
submit->queue->faults++;
if (submit->vm) {
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (!task)
gpu->global_faults++;
else {
struct msm_gem_vm *vm = to_msm_vm(submit->vm);
vm->faults++;
/*
* If userspace has opted-in to VM_BIND (and therefore userspace
* management of the VM), faults mark the VM as unusuable. This
* management of the VM), faults mark the VM as unusable. This
* matches vulkan expectations (vulkan is the main target for
* VM_BIND)
* VM_BIND).
*/
if (!vm->managed)
msm_gem_vm_unusable(submit->vm);
@@ -553,8 +558,15 @@ static void recover_worker(struct kthread_work *work)
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node)
list_for_each_entry(submit, &ring->submits, node) {
/*
* If the submit uses an unusable vm make sure
* we don't actually run it
*/
if (to_msm_vm(submit->vm)->unusable)
submit->nr_cmds = 0;
gpu->funcs->submit(gpu, submit);
}
spin_unlock_irqrestore(&ring->submit_lock, flags);
}
}

View File

@@ -14,7 +14,9 @@
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
atomic_t pagetables;
struct mutex init_lock; /* protects pagetables counter and prr_page */
int pagetables;
struct page *prr_page;
struct kmem_cache *pt_cache;
@@ -227,7 +229,8 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
* If this is the last attached pagetable for the parent,
* disable TTBR0 in the arm-smmu driver
*/
if (atomic_dec_return(&iommu->pagetables) == 0) {
mutex_lock(&iommu->init_lock);
if (--iommu->pagetables == 0) {
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
if (adreno_smmu->set_prr_bit) {
@@ -236,6 +239,7 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
iommu->prr_page = NULL;
}
}
mutex_unlock(&iommu->init_lock);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
@@ -568,9 +572,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
* If this is the first pagetable that we've allocated, send it back to
* the arm-smmu driver as a trigger to set up TTBR0
*/
if (atomic_inc_return(&iommu->pagetables) == 1) {
mutex_lock(&iommu->init_lock);
if (iommu->pagetables++ == 0) {
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
if (ret) {
iommu->pagetables--;
mutex_unlock(&iommu->init_lock);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
return ERR_PTR(ret);
@@ -595,6 +602,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
adreno_smmu->set_prr_bit(adreno_smmu->cookie, true);
}
}
mutex_unlock(&iommu->init_lock);
/* Needed later for TLB flush */
pagetable->parent = parent;
@@ -730,7 +738,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
atomic_set(&iommu->pagetables, 0);
mutex_init(&iommu->init_lock);
ret = iommu_attach_device(iommu->domain, dev);
if (ret) {

View File

@@ -275,6 +275,12 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
if (ret)
return ret;
ret = msm_disp_snapshot_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
return ret;
}
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
@@ -327,10 +333,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
goto err_msm_uninit;
}
ret = msm_disp_snapshot_init(ddev);
if (ret)
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
drm_mode_config_reset(ddev);
return 0;

View File

@@ -423,7 +423,7 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
if (IS_ERR(msm_mdss->mmio))
return ERR_CAST(msm_mdss->mmio);
dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
dev_dbg(&pdev->dev, "mapped mdss address space @%p\n", msm_mdss->mmio);
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
if (ret)

View File

@@ -594,10 +594,14 @@ by a particular renderpass/blit.
<reg32 offset="0x0600" name="DBGC_CFG_DBGBUS_SEL_A"/>
<reg32 offset="0x0601" name="DBGC_CFG_DBGBUS_SEL_B"/>
<reg32 offset="0x0602" name="DBGC_CFG_DBGBUS_SEL_C"/>
<reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D">
<reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D" variants="A6XX">
<bitfield high="7" low="0" name="PING_INDEX"/>
<bitfield high="15" low="8" name="PING_BLK_SEL"/>
</reg32>
<reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D" variants="A7XX-">
<bitfield high="7" low="0" name="PING_INDEX"/>
<bitfield high="24" low="16" name="PING_BLK_SEL"/>
</reg32>
<reg32 offset="0x0604" name="DBGC_CFG_DBGBUS_CNTLT">
<bitfield high="5" low="0" name="TRACEEN"/>
<bitfield high="14" low="12" name="GRANU"/>
@@ -3796,6 +3800,14 @@ by a particular renderpass/blit.
<reg32 offset="0x0030" name="CFG_DBGBUS_TRACE_BUF2"/>
</domain>
<domain name="A7XX_CX_DBGC" width="32">
<!-- Bitfields shifted, but otherwise the same: -->
<reg32 offset="0x0000" name="CFG_DBGBUS_SEL_A" variants="A7XX-">
<bitfield high="7" low="0" name="PING_INDEX"/>
<bitfield high="24" low="16" name="PING_BLK_SEL"/>
</reg32>
</domain>
<domain name="A6XX_CX_MISC" width="32" prefix="variant" varset="chip">
<reg32 offset="0x0001" name="SYSTEM_CACHE_CNTL_0"/>
<reg32 offset="0x0002" name="SYSTEM_CACHE_CNTL_1"/>

View File

@@ -159,28 +159,28 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="RGB_SWAP" low="12" high="14" type="dsi_rgb_swap"/>
</reg32>
<reg32 offset="0x00020" name="ACTIVE_H">
<bitfield name="START" low="0" high="11" type="uint"/>
<bitfield name="END" low="16" high="27" type="uint"/>
<bitfield name="START" low="0" high="15" type="uint"/>
<bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00024" name="ACTIVE_V">
<bitfield name="START" low="0" high="11" type="uint"/>
<bitfield name="END" low="16" high="27" type="uint"/>
<bitfield name="START" low="0" high="15" type="uint"/>
<bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00028" name="TOTAL">
<bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
<bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
<bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
<bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x0002c" name="ACTIVE_HSYNC">
<bitfield name="START" low="0" high="11" type="uint"/>
<bitfield name="END" low="16" high="27" type="uint"/>
<bitfield name="START" low="0" high="15" type="uint"/>
<bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00030" name="ACTIVE_VSYNC_HPOS">
<bitfield name="START" low="0" high="11" type="uint"/>
<bitfield name="END" low="16" high="27" type="uint"/>
<bitfield name="START" low="0" high="15" type="uint"/>
<bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00034" name="ACTIVE_VSYNC_VPOS">
<bitfield name="START" low="0" high="11" type="uint"/>
<bitfield name="END" low="16" high="27" type="uint"/>
<bitfield name="START" low="0" high="15" type="uint"/>
<bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00038" name="CMD_DMA_CTRL">
@@ -209,8 +209,8 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="WORD_COUNT" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00058" name="CMD_MDP_STREAM0_TOTAL">
<bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
<bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
<bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
<bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x0005c" name="CMD_MDP_STREAM1_CTRL">
<bitfield name="DATA_TYPE" low="0" high="5" type="uint"/>

Some files were not shown because too many files have changed in this diff Show More