mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
Merge tag 'asoc-v6.18-2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-next
ASoC: Updates for v6.18 round 2 Some more updates for v6.18, mostly fixes for the earlier pull request with some cleanups and more minor fixes for older code. We do have one new driver, the TI TAS2783A, and some quirks for new platforms.
This commit is contained in:
@@ -23,11 +23,14 @@ properties:
|
||||
- enum:
|
||||
- qcom,sc7180-refgen-regulator
|
||||
- qcom,sc8180x-refgen-regulator
|
||||
- qcom,sdm670-refgen-regulator
|
||||
- qcom,sm8150-refgen-regulator
|
||||
- const: qcom,sdm845-refgen-regulator
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,qcs8300-refgen-regulator
|
||||
- qcom,sa8775p-refgen-regulator
|
||||
- qcom,sc7280-refgen-regulator
|
||||
- qcom,sc8280xp-refgen-regulator
|
||||
- qcom,sm6350-refgen-regulator
|
||||
|
||||
@@ -151,6 +151,12 @@ properties:
|
||||
minimum: 0
|
||||
maximum: 5
|
||||
|
||||
cirrus,subsystem-id:
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
description:
|
||||
Subsystem ID. If this property is present, it sets the system name,
|
||||
used to identify the firmware and tuning to load.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
@@ -0,0 +1,228 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/sound/mediatek,mt8183-audio.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Mediatek AFE PCM controller for mt8183
|
||||
|
||||
maintainers:
|
||||
- Julien Massot <jmassot@collabora.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: mediatek,mt8183-audio
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
reset-names:
|
||||
const: audiosys
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
memory-region:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: AFE clock
|
||||
- description: ADDA DAC clock
|
||||
- description: ADDA DAC pre-distortion clock
|
||||
- description: ADDA ADC clock
|
||||
- description: ADDA6 ADC clock
|
||||
- description: Audio low-jitter 22.5792m clock
|
||||
- description: Audio low-jitter 24.576m clock
|
||||
- description: Audio PLL1 tuner clock
|
||||
- description: Audio PLL2 tuner clock
|
||||
- description: I2S1 bit clock
|
||||
- description: I2S2 bit clock
|
||||
- description: I2S3 bit clock
|
||||
- description: I2S4 bit clock
|
||||
- description: Audio Time-Division Multiplexing interface clock
|
||||
- description: Powerdown Audio test model clock
|
||||
- description: Audio infra sys clock
|
||||
- description: Audio infra 26M clock
|
||||
- description: Mux for audio clock
|
||||
- description: Mux for audio internal bus clock
|
||||
- description: Mux main divider by 4
|
||||
- description: Primary audio mux
|
||||
- description: Primary audio PLL
|
||||
- description: Secondary audio mux
|
||||
- description: Secondary audio PLL
|
||||
- description: Primary audio en-generator clock
|
||||
- description: Primary PLL divider by 4 for IEC
|
||||
- description: Secondary audio en-generator clock
|
||||
- description: Secondary PLL divider by 8 for IEC
|
||||
- description: Mux selector for I2S port 0
|
||||
- description: Mux selector for I2S port 1
|
||||
- description: Mux selector for I2S port 2
|
||||
- description: Mux selector for I2S port 3
|
||||
- description: Mux selector for I2S port 4
|
||||
- description: Mux selector for I2S port 5
|
||||
- description: APLL1 and APLL2 divider for I2S port 0
|
||||
- description: APLL1 and APLL2 divider for I2S port 1
|
||||
- description: APLL1 and APLL2 divider for I2S port 2
|
||||
- description: APLL1 and APLL2 divider for I2S port 3
|
||||
- description: APLL1 and APLL2 divider for I2S port 4
|
||||
- description: APLL1 and APLL2 divider for IEC
|
||||
- description: 26MHz clock for audio subsystem
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: aud_afe_clk
|
||||
- const: aud_dac_clk
|
||||
- const: aud_dac_predis_clk
|
||||
- const: aud_adc_clk
|
||||
- const: aud_adc_adda6_clk
|
||||
- const: aud_apll22m_clk
|
||||
- const: aud_apll24m_clk
|
||||
- const: aud_apll1_tuner_clk
|
||||
- const: aud_apll2_tuner_clk
|
||||
- const: aud_i2s1_bclk_sw
|
||||
- const: aud_i2s2_bclk_sw
|
||||
- const: aud_i2s3_bclk_sw
|
||||
- const: aud_i2s4_bclk_sw
|
||||
- const: aud_tdm_clk
|
||||
- const: aud_tml_clk
|
||||
- const: aud_infra_clk
|
||||
- const: mtkaif_26m_clk
|
||||
- const: top_mux_audio
|
||||
- const: top_mux_aud_intbus
|
||||
- const: top_syspll_d2_d4
|
||||
- const: top_mux_aud_1
|
||||
- const: top_apll1_ck
|
||||
- const: top_mux_aud_2
|
||||
- const: top_apll2_ck
|
||||
- const: top_mux_aud_eng1
|
||||
- const: top_apll1_d8
|
||||
- const: top_mux_aud_eng2
|
||||
- const: top_apll2_d8
|
||||
- const: top_i2s0_m_sel
|
||||
- const: top_i2s1_m_sel
|
||||
- const: top_i2s2_m_sel
|
||||
- const: top_i2s3_m_sel
|
||||
- const: top_i2s4_m_sel
|
||||
- const: top_i2s5_m_sel
|
||||
- const: top_apll12_div0
|
||||
- const: top_apll12_div1
|
||||
- const: top_apll12_div2
|
||||
- const: top_apll12_div3
|
||||
- const: top_apll12_div4
|
||||
- const: top_apll12_divb
|
||||
- const: top_clk26m_clk
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- interrupts
|
||||
- resets
|
||||
- reset-names
|
||||
- power-domains
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/mt8183-clk.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
#include <dt-bindings/power/mt8183-power.h>
|
||||
#include <dt-bindings/reset/mt8183-resets.h>
|
||||
|
||||
audio-controller {
|
||||
compatible = "mediatek,mt8183-audio";
|
||||
interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_LOW>;
|
||||
resets = <&watchdog MT8183_TOPRGU_AUDIO_SW_RST>;
|
||||
reset-names = "audiosys";
|
||||
power-domains = <&spm MT8183_POWER_DOMAIN_AUDIO>;
|
||||
clocks = <&audiosys CLK_AUDIO_AFE>,
|
||||
<&audiosys CLK_AUDIO_DAC>,
|
||||
<&audiosys CLK_AUDIO_DAC_PREDIS>,
|
||||
<&audiosys CLK_AUDIO_ADC>,
|
||||
<&audiosys CLK_AUDIO_PDN_ADDA6_ADC>,
|
||||
<&audiosys CLK_AUDIO_22M>,
|
||||
<&audiosys CLK_AUDIO_24M>,
|
||||
<&audiosys CLK_AUDIO_APLL_TUNER>,
|
||||
<&audiosys CLK_AUDIO_APLL2_TUNER>,
|
||||
<&audiosys CLK_AUDIO_I2S1>,
|
||||
<&audiosys CLK_AUDIO_I2S2>,
|
||||
<&audiosys CLK_AUDIO_I2S3>,
|
||||
<&audiosys CLK_AUDIO_I2S4>,
|
||||
<&audiosys CLK_AUDIO_TDM>,
|
||||
<&audiosys CLK_AUDIO_TML>,
|
||||
<&infracfg CLK_INFRA_AUDIO>,
|
||||
<&infracfg CLK_INFRA_AUDIO_26M_BCLK>,
|
||||
<&topckgen CLK_TOP_MUX_AUDIO>,
|
||||
<&topckgen CLK_TOP_MUX_AUD_INTBUS>,
|
||||
<&topckgen CLK_TOP_SYSPLL_D2_D4>,
|
||||
<&topckgen CLK_TOP_MUX_AUD_1>,
|
||||
<&topckgen CLK_TOP_APLL1_CK>,
|
||||
<&topckgen CLK_TOP_MUX_AUD_2>,
|
||||
<&topckgen CLK_TOP_APLL2_CK>,
|
||||
<&topckgen CLK_TOP_MUX_AUD_ENG1>,
|
||||
<&topckgen CLK_TOP_APLL1_D8>,
|
||||
<&topckgen CLK_TOP_MUX_AUD_ENG2>,
|
||||
<&topckgen CLK_TOP_APLL2_D8>,
|
||||
<&topckgen CLK_TOP_MUX_APLL_I2S0>,
|
||||
<&topckgen CLK_TOP_MUX_APLL_I2S1>,
|
||||
<&topckgen CLK_TOP_MUX_APLL_I2S2>,
|
||||
<&topckgen CLK_TOP_MUX_APLL_I2S3>,
|
||||
<&topckgen CLK_TOP_MUX_APLL_I2S4>,
|
||||
<&topckgen CLK_TOP_MUX_APLL_I2S5>,
|
||||
<&topckgen CLK_TOP_APLL12_DIV0>,
|
||||
<&topckgen CLK_TOP_APLL12_DIV1>,
|
||||
<&topckgen CLK_TOP_APLL12_DIV2>,
|
||||
<&topckgen CLK_TOP_APLL12_DIV3>,
|
||||
<&topckgen CLK_TOP_APLL12_DIV4>,
|
||||
<&topckgen CLK_TOP_APLL12_DIVB>,
|
||||
<&clk26m>;
|
||||
clock-names = "aud_afe_clk",
|
||||
"aud_dac_clk",
|
||||
"aud_dac_predis_clk",
|
||||
"aud_adc_clk",
|
||||
"aud_adc_adda6_clk",
|
||||
"aud_apll22m_clk",
|
||||
"aud_apll24m_clk",
|
||||
"aud_apll1_tuner_clk",
|
||||
"aud_apll2_tuner_clk",
|
||||
"aud_i2s1_bclk_sw",
|
||||
"aud_i2s2_bclk_sw",
|
||||
"aud_i2s3_bclk_sw",
|
||||
"aud_i2s4_bclk_sw",
|
||||
"aud_tdm_clk",
|
||||
"aud_tml_clk",
|
||||
"aud_infra_clk",
|
||||
"mtkaif_26m_clk",
|
||||
"top_mux_audio",
|
||||
"top_mux_aud_intbus",
|
||||
"top_syspll_d2_d4",
|
||||
"top_mux_aud_1",
|
||||
"top_apll1_ck",
|
||||
"top_mux_aud_2",
|
||||
"top_apll2_ck",
|
||||
"top_mux_aud_eng1",
|
||||
"top_apll1_d8",
|
||||
"top_mux_aud_eng2",
|
||||
"top_apll2_d8",
|
||||
"top_i2s0_m_sel",
|
||||
"top_i2s1_m_sel",
|
||||
"top_i2s2_m_sel",
|
||||
"top_i2s3_m_sel",
|
||||
"top_i2s4_m_sel",
|
||||
"top_i2s5_m_sel",
|
||||
"top_apll12_div0",
|
||||
"top_apll12_div1",
|
||||
"top_apll12_div2",
|
||||
"top_apll12_div3",
|
||||
"top_apll12_div4",
|
||||
"top_apll12_divb",
|
||||
"top_clk26m_clk";
|
||||
};
|
||||
|
||||
...
|
||||
@@ -0,0 +1,49 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/sound/mediatek,mt8183_da7219.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: MediaTek MT8183 sound card with external codecs
|
||||
|
||||
maintainers:
|
||||
- Julien Massot <jmassot@collabora.com>
|
||||
|
||||
description:
|
||||
MediaTek MT8183 SoC-based sound cards with DA7219 as headset codec,
|
||||
and MAX98357A, RT1015 or RT1015P as speaker amplifiers. Optionally includes HDMI codec.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- mediatek,mt8183_da7219_max98357
|
||||
- mediatek,mt8183_da7219_rt1015
|
||||
- mediatek,mt8183_da7219_rt1015p
|
||||
|
||||
mediatek,headset-codec:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description: Phandle to the DA7219 headset codec.
|
||||
|
||||
mediatek,platform:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description: Phandle to the MT8183 ASoC platform (e.g., AFE node).
|
||||
|
||||
mediatek,hdmi-codec:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description: Optional phandle to the HDMI codec (e.g., IT6505).
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- mediatek,headset-codec
|
||||
- mediatek,platform
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
sound {
|
||||
compatible = "mediatek,mt8183_da7219_max98357";
|
||||
mediatek,headset-codec = <&da7219>;
|
||||
mediatek,hdmi-codec = <&it6505dptx>;
|
||||
mediatek,platform = <&afe>;
|
||||
};
|
||||
@@ -0,0 +1,59 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/sound/mediatek,mt8183_mt6358_ts3a227.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: MediaTek MT8183 sound card with MT6358, TS3A227, and MAX98357/RT1015 codecs
|
||||
|
||||
maintainers:
|
||||
- Julien Massot <julien.massot@collabora.com>
|
||||
|
||||
description:
|
||||
MediaTek MT8183 SoC-based sound cards using the MT6358 codec,
|
||||
with optional TS3A227 headset codec, EC codec (via Chrome EC), and HDMI audio.
|
||||
Speaker amplifier can be one of MAX98357A/B, RT1015, or RT1015P.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- mediatek,mt8183_mt6358_ts3a227_max98357
|
||||
- mediatek,mt8183_mt6358_ts3a227_max98357b
|
||||
- mediatek,mt8183_mt6358_ts3a227_rt1015
|
||||
- mediatek,mt8183_mt6358_ts3a227_rt1015p
|
||||
|
||||
mediatek,platform:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description: Phandle to the MT8183 ASoC platform node (e.g., AFE).
|
||||
|
||||
mediatek,headset-codec:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description: Phandle to the TS3A227 headset codec.
|
||||
|
||||
mediatek,ec-codec:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description: |
|
||||
Optional phandle to a ChromeOS EC codec node.
|
||||
See bindings in google,cros-ec-codec.yaml.
|
||||
|
||||
mediatek,hdmi-codec:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description: Optional phandle to an HDMI audio codec node.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- mediatek,platform
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
sound {
|
||||
compatible = "mediatek,mt8183_mt6358_ts3a227_max98357";
|
||||
mediatek,headset-codec = <&ts3a227>;
|
||||
mediatek,ec-codec = <&ec_codec>;
|
||||
mediatek,hdmi-codec = <&it6505dptx>;
|
||||
mediatek,platform = <&afe>;
|
||||
};
|
||||
|
||||
...
|
||||
@@ -1,42 +0,0 @@
|
||||
Mediatek AFE PCM controller for mt8183
|
||||
|
||||
Required properties:
|
||||
- compatible = "mediatek,mt68183-audio";
|
||||
- reg: register location and size
|
||||
- interrupts: should contain AFE interrupt
|
||||
- resets: Must contain an entry for each entry in reset-names
|
||||
See ../reset/reset.txt for details.
|
||||
- reset-names: should have these reset names:
|
||||
"audiosys";
|
||||
- power-domains: should define the power domain
|
||||
- clocks: Must contain an entry for each entry in clock-names
|
||||
- clock-names: should have these clock names:
|
||||
"infra_sys_audio_clk",
|
||||
"mtkaif_26m_clk",
|
||||
"top_mux_audio",
|
||||
"top_mux_aud_intbus",
|
||||
"top_sys_pll3_d4",
|
||||
"top_clk26m_clk";
|
||||
|
||||
Example:
|
||||
|
||||
afe: mt8183-afe-pcm@11220000 {
|
||||
compatible = "mediatek,mt8183-audio";
|
||||
reg = <0 0x11220000 0 0x1000>;
|
||||
interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_LOW>;
|
||||
resets = <&watchdog MT8183_TOPRGU_AUDIO_SW_RST>;
|
||||
reset-names = "audiosys";
|
||||
power-domains = <&scpsys MT8183_POWER_DOMAIN_AUDIO>;
|
||||
clocks = <&infrasys CLK_INFRA_AUDIO>,
|
||||
<&infrasys CLK_INFRA_AUDIO_26M_BCLK>,
|
||||
<&topckgen CLK_TOP_MUX_AUDIO>,
|
||||
<&topckgen CLK_TOP_MUX_AUD_INTBUS>,
|
||||
<&topckgen CLK_TOP_SYSPLL_D2_D4>,
|
||||
<&clk26m>;
|
||||
clock-names = "infra_sys_audio_clk",
|
||||
"mtkaif_26m_clk",
|
||||
"top_mux_audio",
|
||||
"top_mux_aud_intbus",
|
||||
"top_sys_pll_d2_d4",
|
||||
"top_clk26m_clk";
|
||||
};
|
||||
@@ -1,21 +0,0 @@
|
||||
MT8183 with MT6358, DA7219, MAX98357, and RT1015 CODECS
|
||||
|
||||
Required properties:
|
||||
- compatible : "mediatek,mt8183_da7219_max98357" for MAX98357A codec
|
||||
"mediatek,mt8183_da7219_rt1015" for RT1015 codec
|
||||
"mediatek,mt8183_da7219_rt1015p" for RT1015P codec
|
||||
- mediatek,headset-codec: the phandles of da7219 codecs
|
||||
- mediatek,platform: the phandle of MT8183 ASoC platform
|
||||
|
||||
Optional properties:
|
||||
- mediatek,hdmi-codec: the phandles of HDMI codec
|
||||
|
||||
Example:
|
||||
|
||||
sound {
|
||||
compatible = "mediatek,mt8183_da7219_max98357";
|
||||
mediatek,headset-codec = <&da7219>;
|
||||
mediatek,hdmi-codec = <&it6505dptx>;
|
||||
mediatek,platform = <&afe>;
|
||||
};
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
MT8183 with MT6358, TS3A227, MAX98357, and RT1015 CODECS
|
||||
|
||||
Required properties:
|
||||
- compatible : "mediatek,mt8183_mt6358_ts3a227_max98357" for MAX98357A codec
|
||||
"mediatek,mt8183_mt6358_ts3a227_max98357b" for MAX98357B codec
|
||||
"mediatek,mt8183_mt6358_ts3a227_rt1015" for RT1015 codec
|
||||
"mediatek,mt8183_mt6358_ts3a227_rt1015p" for RT1015P codec
|
||||
- mediatek,platform: the phandle of MT8183 ASoC platform
|
||||
|
||||
Optional properties:
|
||||
- mediatek,headset-codec: the phandles of ts3a227 codecs
|
||||
- mediatek,ec-codec: the phandle of EC codecs.
|
||||
See google,cros-ec-codec.txt for more details.
|
||||
- mediatek,hdmi-codec: the phandles of HDMI codec
|
||||
|
||||
Example:
|
||||
|
||||
sound {
|
||||
compatible = "mediatek,mt8183_mt6358_ts3a227_max98357";
|
||||
mediatek,headset-codec = <&ts3a227>;
|
||||
mediatek,ec-codec = <&ec_codec>;
|
||||
mediatek,hdmi-codec = <&it6505dptx>;
|
||||
mediatek,platform = <&afe>;
|
||||
};
|
||||
|
||||
@@ -575,8 +575,8 @@ operations:
|
||||
- nat-dst
|
||||
- timeout
|
||||
- mark
|
||||
- counter-orig
|
||||
- counter-reply
|
||||
- counters-orig
|
||||
- counters-reply
|
||||
- use
|
||||
- id
|
||||
- nat-dst
|
||||
@@ -591,7 +591,6 @@ operations:
|
||||
request:
|
||||
value: 0x101
|
||||
attributes:
|
||||
- nfgen-family
|
||||
- mark
|
||||
- filter
|
||||
- status
|
||||
@@ -608,8 +607,8 @@ operations:
|
||||
- nat-dst
|
||||
- timeout
|
||||
- mark
|
||||
- counter-orig
|
||||
- counter-reply
|
||||
- counters-orig
|
||||
- counters-reply
|
||||
- use
|
||||
- id
|
||||
- nat-dst
|
||||
|
||||
@@ -28,13 +28,13 @@ definitions:
|
||||
traffic-patterns it can take a long time until the
|
||||
MPTCP_EVENT_ESTABLISHED is sent.
|
||||
Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, server-side.
|
||||
dport, server-side, [flags].
|
||||
-
|
||||
name: established
|
||||
doc: >-
|
||||
A MPTCP connection is established (can start new subflows).
|
||||
Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, server-side.
|
||||
dport, server-side, [flags].
|
||||
-
|
||||
name: closed
|
||||
doc: >-
|
||||
|
||||
@@ -7430,7 +7430,7 @@ S: Supported
|
||||
F: Documentation/devicetree/bindings/dpll/dpll-device.yaml
|
||||
F: Documentation/devicetree/bindings/dpll/dpll-pin.yaml
|
||||
F: Documentation/driver-api/dpll.rst
|
||||
F: drivers/dpll/*
|
||||
F: drivers/dpll/
|
||||
F: include/linux/dpll.h
|
||||
F: include/uapi/linux/dpll.h
|
||||
|
||||
@@ -16204,6 +16204,7 @@ R: Rik van Riel <riel@surriel.com>
|
||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Harry Yoo <harry.yoo@oracle.com>
|
||||
R: Jann Horn <jannh@google.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: include/linux/rmap.h
|
||||
@@ -16248,6 +16249,7 @@ R: Nico Pache <npache@redhat.com>
|
||||
R: Ryan Roberts <ryan.roberts@arm.com>
|
||||
R: Dev Jain <dev.jain@arm.com>
|
||||
R: Barry Song <baohua@kernel.org>
|
||||
R: Lance Yang <lance.yang@linux.dev>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
@@ -22060,6 +22062,7 @@ F: drivers/infiniband/ulp/rtrs/
|
||||
|
||||
RUNTIME VERIFICATION (RV)
|
||||
M: Steven Rostedt <rostedt@goodmis.org>
|
||||
M: Gabriele Monaco <gmonaco@redhat.com>
|
||||
L: linux-trace-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/trace/rv/
|
||||
@@ -24267,7 +24270,7 @@ F: Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
|
||||
F: drivers/input/keyboard/sun4i-lradc-keys.c
|
||||
|
||||
SUNDANCE NETWORK DRIVER
|
||||
M: Denis Kirjanov <dkirjanov@suse.de>
|
||||
M: Denis Kirjanov <kirjanov@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/dlink/sundance.c
|
||||
|
||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@@ -1369,6 +1369,7 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
|
||||
}
|
||||
|
||||
void kvm_init_host_debug_data(void);
|
||||
void kvm_debug_init_vhe(void);
|
||||
void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu);
|
||||
void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu);
|
||||
|
||||
@@ -355,11 +355,6 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
|
||||
return pteref;
|
||||
}
|
||||
|
||||
static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
|
||||
{
|
||||
return pteref;
|
||||
}
|
||||
|
||||
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
|
||||
{
|
||||
/*
|
||||
@@ -389,11 +384,6 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
|
||||
return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
|
||||
}
|
||||
|
||||
static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
|
||||
{
|
||||
return rcu_dereference_raw(pteref);
|
||||
}
|
||||
|
||||
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
|
||||
{
|
||||
if (walker->flags & KVM_PGTABLE_WALK_SHARED)
|
||||
@@ -561,26 +551,6 @@ static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2
|
||||
*/
|
||||
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses.
|
||||
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
|
||||
* @addr: Intermediate physical address at which to place the mapping.
|
||||
* @size: Size of the mapping.
|
||||
*
|
||||
* The page-table is assumed to be unreachable by any hardware walkers prior
|
||||
* to freeing and therefore no TLB invalidation is performed.
|
||||
*/
|
||||
void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||
u64 addr, u64 size);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table.
|
||||
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
|
||||
*
|
||||
* It is assumed that the rest of the page-table is freed before this operation.
|
||||
*/
|
||||
void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
|
||||
* @mm_ops: Memory management callbacks.
|
||||
|
||||
@@ -179,9 +179,7 @@ struct pkvm_mapping {
|
||||
|
||||
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
|
||||
struct kvm_pgtable_mm_ops *mm_ops);
|
||||
void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||
u64 addr, u64 size);
|
||||
void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
|
||||
void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
||||
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
|
||||
enum kvm_pgtable_prot prot, void *mc,
|
||||
enum kvm_pgtable_walk_flags flags);
|
||||
|
||||
@@ -2113,8 +2113,10 @@ static void cpu_hyp_init_features(void)
|
||||
{
|
||||
cpu_set_hyp_vector();
|
||||
|
||||
if (is_kernel_in_hyp_mode())
|
||||
if (is_kernel_in_hyp_mode()) {
|
||||
kvm_timer_init_vhe();
|
||||
kvm_debug_init_vhe();
|
||||
}
|
||||
|
||||
if (vgic_present)
|
||||
kvm_vgic_init_cpu_hardware();
|
||||
|
||||
@@ -96,6 +96,13 @@ void kvm_init_host_debug_data(void)
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_debug_init_vhe(void)
|
||||
{
|
||||
/* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */
|
||||
if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1)))
|
||||
write_sysreg_el1(0, SYS_PMSCR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host
|
||||
* has taken over MDSCR_EL1.
|
||||
@@ -138,6 +145,9 @@ void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
|
||||
/* Must be called before kvm_vcpu_load_vhe() */
|
||||
KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm);
|
||||
|
||||
if (has_vhe())
|
||||
*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
|
||||
|
||||
/*
|
||||
* Determine which of the possible debug states we're in:
|
||||
*
|
||||
@@ -184,6 +194,9 @@ void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (has_vhe())
|
||||
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
|
||||
|
||||
if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
|
||||
return;
|
||||
|
||||
|
||||
@@ -431,9 +431,6 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
||||
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
|
||||
}
|
||||
|
||||
*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
|
||||
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
|
||||
u64 hcrx = vcpu->arch.hcrx_el2;
|
||||
if (is_nested_ctxt(vcpu)) {
|
||||
@@ -454,8 +451,6 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
|
||||
|
||||
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
|
||||
|
||||
write_sysreg(0, hstr_el2);
|
||||
if (system_supports_pmuv3()) {
|
||||
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
|
||||
|
||||
@@ -50,6 +50,10 @@ extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
|
||||
static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
___activate_traps(vcpu, vcpu->arch.hcr_el2);
|
||||
|
||||
*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
|
||||
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
||||
|
||||
__activate_traps_common(vcpu);
|
||||
__activate_cptr_traps(vcpu);
|
||||
|
||||
@@ -93,6 +97,8 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
isb();
|
||||
}
|
||||
|
||||
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
|
||||
|
||||
__deactivate_traps_common(vcpu);
|
||||
|
||||
write_sysreg_hcr(this_cpu_ptr(&kvm_init_params)->hcr_el2);
|
||||
|
||||
@@ -253,7 +253,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
|
||||
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
||||
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
|
||||
__vcpu_assign_sys_reg(vcpu, read_sysreg_el1(SYS_VBAR), VBAR_EL1);
|
||||
__vcpu_assign_sys_reg(vcpu, VBAR_EL1, read_sysreg_el1(SYS_VBAR));
|
||||
|
||||
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
|
||||
|
||||
|
||||
@@ -1551,38 +1551,21 @@ static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||
u64 addr, u64 size)
|
||||
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
||||
{
|
||||
size_t pgd_sz;
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = stage2_free_walker,
|
||||
.flags = KVM_PGTABLE_WALK_LEAF |
|
||||
KVM_PGTABLE_WALK_TABLE_POST,
|
||||
};
|
||||
|
||||
WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
|
||||
}
|
||||
|
||||
void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
|
||||
{
|
||||
size_t pgd_sz;
|
||||
|
||||
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
|
||||
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* Since the pgtable is unlinked at this point, and not shared with
|
||||
* other walkers, safely deference pgd with kvm_dereference_pteref_raw()
|
||||
*/
|
||||
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref_raw(pgt->pgd), pgd_sz);
|
||||
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
|
||||
pgt->pgd = NULL;
|
||||
}
|
||||
|
||||
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
||||
{
|
||||
kvm_pgtable_stage2_destroy_range(pgt, 0, BIT(pgt->ia_bits));
|
||||
kvm_pgtable_stage2_destroy_pgd(pgt);
|
||||
}
|
||||
|
||||
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
|
||||
{
|
||||
kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
|
||||
|
||||
@@ -904,38 +904,6 @@ static int kvm_init_ipa_range(struct kvm_s2_mmu *mmu, unsigned long type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Assume that @pgt is valid and unlinked from the KVM MMU to free the
|
||||
* page-table without taking the kvm_mmu_lock and without performing any
|
||||
* TLB invalidations.
|
||||
*
|
||||
* Also, the range of addresses can be large enough to cause need_resched
|
||||
* warnings, for instance on CONFIG_PREEMPT_NONE kernels. Hence, invoke
|
||||
* cond_resched() periodically to prevent hogging the CPU for a long time
|
||||
* and schedule something else, if required.
|
||||
*/
|
||||
static void stage2_destroy_range(struct kvm_pgtable *pgt, phys_addr_t addr,
|
||||
phys_addr_t end)
|
||||
{
|
||||
u64 next;
|
||||
|
||||
do {
|
||||
next = stage2_range_addr_end(addr, end);
|
||||
KVM_PGT_FN(kvm_pgtable_stage2_destroy_range)(pgt, addr,
|
||||
next - addr);
|
||||
if (next != end)
|
||||
cond_resched();
|
||||
} while (addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void kvm_stage2_destroy(struct kvm_pgtable *pgt)
|
||||
{
|
||||
unsigned int ia_bits = VTCR_EL2_IPA(pgt->mmu->vtcr);
|
||||
|
||||
stage2_destroy_range(pgt, 0, BIT(ia_bits));
|
||||
KVM_PGT_FN(kvm_pgtable_stage2_destroy_pgd)(pgt);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_init_stage2_mmu - Initialise a S2 MMU structure
|
||||
* @kvm: The pointer to the KVM structure
|
||||
@@ -1012,7 +980,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
|
||||
return 0;
|
||||
|
||||
out_destroy_pgtable:
|
||||
kvm_stage2_destroy(pgt);
|
||||
KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
|
||||
out_free_pgtable:
|
||||
kfree(pgt);
|
||||
return err;
|
||||
@@ -1106,10 +1074,14 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
|
||||
mmu->pgt = NULL;
|
||||
free_percpu(mmu->last_vcpu_ran);
|
||||
}
|
||||
|
||||
if (kvm_is_nested_s2_mmu(kvm, mmu))
|
||||
kvm_init_nested_s2_mmu(mmu);
|
||||
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
|
||||
if (pgt) {
|
||||
kvm_stage2_destroy(pgt);
|
||||
KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
|
||||
kfree(pgt);
|
||||
}
|
||||
}
|
||||
@@ -1541,11 +1513,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
|
||||
VM_BUG_ON(write_fault && exec_fault);
|
||||
|
||||
if (fault_is_perm && !write_fault && !exec_fault) {
|
||||
kvm_err("Unexpected L2 read permission error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!is_protected_kvm_enabled())
|
||||
memcache = &vcpu->arch.mmu_page_cache;
|
||||
else
|
||||
|
||||
@@ -847,7 +847,7 @@ static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end)
|
||||
|
||||
ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
|
||||
vt->wr.level));
|
||||
ipa_start = vt->wr.pa & (ipa_size - 1);
|
||||
ipa_start = vt->wr.pa & ~(ipa_size - 1);
|
||||
ipa_end = ipa_start + ipa_size;
|
||||
|
||||
if (ipa_end <= start || ipa_start >= end)
|
||||
@@ -887,7 +887,7 @@ static void invalidate_vncr_va(struct kvm *kvm,
|
||||
|
||||
va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
|
||||
vt->wr.level));
|
||||
va_start = vt->gva & (va_size - 1);
|
||||
va_start = vt->gva & ~(va_size - 1);
|
||||
va_end = va_start + va_size;
|
||||
|
||||
switch (scope->type) {
|
||||
@@ -1276,7 +1276,7 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
|
||||
!(tcr & TCR_ASID16))
|
||||
asid &= GENMASK(7, 0);
|
||||
|
||||
return asid != vt->wr.asid;
|
||||
return asid == vt->wr.asid;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
@@ -316,16 +316,9 @@ static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 e
|
||||
return 0;
|
||||
}
|
||||
|
||||
void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||
u64 addr, u64 size)
|
||||
void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
||||
{
|
||||
__pkvm_pgtable_stage2_unmap(pgt, addr, addr + size);
|
||||
}
|
||||
|
||||
void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
|
||||
{
|
||||
/* Expected to be called after all pKVM mappings have been released. */
|
||||
WARN_ON_ONCE(!RB_EMPTY_ROOT(&pgt->pkvm_mappings.rb_root));
|
||||
__pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL));
|
||||
}
|
||||
|
||||
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
|
||||
@@ -69,7 +69,7 @@ static int iter_mark_lpis(struct kvm *kvm)
|
||||
int nr_lpis = 0;
|
||||
|
||||
xa_for_each(&dist->lpi_xa, intid, irq) {
|
||||
if (!vgic_try_get_irq_kref(irq))
|
||||
if (!vgic_try_get_irq_ref(irq))
|
||||
continue;
|
||||
|
||||
xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
|
||||
|
||||
@@ -53,7 +53,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
|
||||
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
|
||||
xa_init(&dist->lpi_xa);
|
||||
}
|
||||
|
||||
/* CREATION */
|
||||
@@ -208,7 +208,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
|
||||
raw_spin_lock_init(&irq->irq_lock);
|
||||
irq->vcpu = NULL;
|
||||
irq->target_vcpu = vcpu0;
|
||||
kref_init(&irq->refcount);
|
||||
refcount_set(&irq->refcount, 0);
|
||||
switch (dist->vgic_model) {
|
||||
case KVM_DEV_TYPE_ARM_VGIC_V2:
|
||||
irq->targets = 0;
|
||||
@@ -277,7 +277,7 @@ static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type)
|
||||
irq->intid = i;
|
||||
irq->vcpu = NULL;
|
||||
irq->target_vcpu = vcpu;
|
||||
kref_init(&irq->refcount);
|
||||
refcount_set(&irq->refcount, 0);
|
||||
if (vgic_irq_is_sgi(i)) {
|
||||
/* SGIs */
|
||||
irq->enabled = 1;
|
||||
|
||||
@@ -78,7 +78,6 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* In this case there is no put, since we keep the reference. */
|
||||
@@ -89,7 +88,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
||||
if (!irq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
|
||||
ret = xa_reserve(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
|
||||
if (ret) {
|
||||
kfree(irq);
|
||||
return ERR_PTR(ret);
|
||||
@@ -99,19 +98,19 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
||||
raw_spin_lock_init(&irq->irq_lock);
|
||||
|
||||
irq->config = VGIC_CONFIG_EDGE;
|
||||
kref_init(&irq->refcount);
|
||||
refcount_set(&irq->refcount, 1);
|
||||
irq->intid = intid;
|
||||
irq->target_vcpu = vcpu;
|
||||
irq->group = 1;
|
||||
|
||||
xa_lock_irqsave(&dist->lpi_xa, flags);
|
||||
xa_lock(&dist->lpi_xa);
|
||||
|
||||
/*
|
||||
* There could be a race with another vgic_add_lpi(), so we need to
|
||||
* check that we don't add a second list entry with the same LPI.
|
||||
*/
|
||||
oldirq = xa_load(&dist->lpi_xa, intid);
|
||||
if (vgic_try_get_irq_kref(oldirq)) {
|
||||
if (vgic_try_get_irq_ref(oldirq)) {
|
||||
/* Someone was faster with adding this LPI, lets use that. */
|
||||
kfree(irq);
|
||||
irq = oldirq;
|
||||
@@ -126,7 +125,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
xa_unlock_irqrestore(&dist->lpi_xa, flags);
|
||||
xa_unlock(&dist->lpi_xa);
|
||||
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
@@ -547,7 +546,7 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
|
||||
rcu_read_lock();
|
||||
|
||||
irq = xa_load(&its->translation_cache, cache_key);
|
||||
if (!vgic_try_get_irq_kref(irq))
|
||||
if (!vgic_try_get_irq_ref(irq))
|
||||
irq = NULL;
|
||||
|
||||
rcu_read_unlock();
|
||||
@@ -571,7 +570,7 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
|
||||
* its_lock, as the ITE (and the reference it holds) cannot be freed.
|
||||
*/
|
||||
lockdep_assert_held(&its->its_lock);
|
||||
vgic_get_irq_kref(irq);
|
||||
vgic_get_irq_ref(irq);
|
||||
|
||||
old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
|
||||
|
||||
|
||||
@@ -518,7 +518,7 @@ static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq)
|
||||
if (!irq->hw || irq->host_irq != host_irq)
|
||||
continue;
|
||||
|
||||
if (!vgic_try_get_irq_kref(irq))
|
||||
if (!vgic_try_get_irq_ref(irq))
|
||||
return NULL;
|
||||
|
||||
return irq;
|
||||
|
||||
@@ -28,8 +28,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
|
||||
* kvm->arch.config_lock (mutex)
|
||||
* its->cmd_lock (mutex)
|
||||
* its->its_lock (mutex)
|
||||
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
|
||||
* vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
|
||||
* vgic_dist->lpi_xa.xa_lock
|
||||
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
|
||||
* vgic_irq->irq_lock must be taken with IRQs disabled
|
||||
*
|
||||
* As the ap_list_lock might be taken from the timer interrupt handler,
|
||||
@@ -71,7 +71,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
|
||||
rcu_read_lock();
|
||||
|
||||
irq = xa_load(&dist->lpi_xa, intid);
|
||||
if (!vgic_try_get_irq_kref(irq))
|
||||
if (!vgic_try_get_irq_ref(irq))
|
||||
irq = NULL;
|
||||
|
||||
rcu_read_unlock();
|
||||
@@ -114,37 +114,66 @@ struct vgic_irq *vgic_get_vcpu_irq(struct kvm_vcpu *vcpu, u32 intid)
|
||||
return vgic_get_irq(vcpu->kvm, intid);
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't do anything in here, because we lack the kvm pointer to
|
||||
* lock and remove the item from the lpi_list. So we keep this function
|
||||
* empty and use the return value of kref_put() to trigger the freeing.
|
||||
*/
|
||||
static void vgic_irq_release(struct kref *ref)
|
||||
static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq)
|
||||
{
|
||||
lockdep_assert_held(&dist->lpi_xa.xa_lock);
|
||||
__xa_erase(&dist->lpi_xa, irq->intid);
|
||||
kfree_rcu(irq, rcu);
|
||||
}
|
||||
|
||||
static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
|
||||
{
|
||||
if (irq->intid < VGIC_MIN_LPI)
|
||||
return false;
|
||||
|
||||
return refcount_dec_and_test(&irq->refcount);
|
||||
}
|
||||
|
||||
static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq *irq)
|
||||
{
|
||||
if (!__vgic_put_irq(kvm, irq))
|
||||
return false;
|
||||
|
||||
irq->pending_release = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
unsigned long flags;
|
||||
|
||||
if (irq->intid < VGIC_MIN_LPI)
|
||||
if (irq->intid >= VGIC_MIN_LPI)
|
||||
might_lock(&dist->lpi_xa.xa_lock);
|
||||
|
||||
if (!__vgic_put_irq(kvm, irq))
|
||||
return;
|
||||
|
||||
if (!kref_put(&irq->refcount, vgic_irq_release))
|
||||
return;
|
||||
xa_lock(&dist->lpi_xa);
|
||||
vgic_release_lpi_locked(dist, irq);
|
||||
xa_unlock(&dist->lpi_xa);
|
||||
}
|
||||
|
||||
xa_lock_irqsave(&dist->lpi_xa, flags);
|
||||
__xa_erase(&dist->lpi_xa, irq->intid);
|
||||
xa_unlock_irqrestore(&dist->lpi_xa, flags);
|
||||
static void vgic_release_deleted_lpis(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
unsigned long intid;
|
||||
struct vgic_irq *irq;
|
||||
|
||||
kfree_rcu(irq, rcu);
|
||||
xa_lock(&dist->lpi_xa);
|
||||
|
||||
xa_for_each(&dist->lpi_xa, intid, irq) {
|
||||
if (irq->pending_release)
|
||||
vgic_release_lpi_locked(dist, irq);
|
||||
}
|
||||
|
||||
xa_unlock(&dist->lpi_xa);
|
||||
}
|
||||
|
||||
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
struct vgic_irq *irq, *tmp;
|
||||
bool deleted = false;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
|
||||
@@ -155,11 +184,14 @@ void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
|
||||
list_del(&irq->ap_list);
|
||||
irq->vcpu = NULL;
|
||||
raw_spin_unlock(&irq->irq_lock);
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
deleted |= vgic_put_irq_norelease(vcpu->kvm, irq);
|
||||
}
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
|
||||
|
||||
if (deleted)
|
||||
vgic_release_deleted_lpis(vcpu->kvm);
|
||||
}
|
||||
|
||||
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
|
||||
@@ -399,7 +431,7 @@ retry:
|
||||
* now in the ap_list. This is safe as the caller must already hold a
|
||||
* reference on the irq.
|
||||
*/
|
||||
vgic_get_irq_kref(irq);
|
||||
vgic_get_irq_ref(irq);
|
||||
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
|
||||
irq->vcpu = vcpu;
|
||||
|
||||
@@ -630,6 +662,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
struct vgic_irq *irq, *tmp;
|
||||
bool deleted_lpis = false;
|
||||
|
||||
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
|
||||
|
||||
@@ -657,12 +690,12 @@ retry:
|
||||
|
||||
/*
|
||||
* This vgic_put_irq call matches the
|
||||
* vgic_get_irq_kref in vgic_queue_irq_unlock,
|
||||
* vgic_get_irq_ref in vgic_queue_irq_unlock,
|
||||
* where we added the LPI to the ap_list. As
|
||||
* we remove the irq from the list, we drop
|
||||
* also drop the refcount.
|
||||
*/
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
deleted_lpis |= vgic_put_irq_norelease(vcpu->kvm, irq);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -725,6 +758,9 @@ retry:
|
||||
}
|
||||
|
||||
raw_spin_unlock(&vgic_cpu->ap_list_lock);
|
||||
|
||||
if (unlikely(deleted_lpis))
|
||||
vgic_release_deleted_lpis(vcpu->kvm);
|
||||
}
|
||||
|
||||
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||
@@ -818,7 +854,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
|
||||
* the AP list has been sorted already.
|
||||
*/
|
||||
if (multi_sgi && irq->priority > prio) {
|
||||
_raw_spin_unlock(&irq->irq_lock);
|
||||
raw_spin_unlock(&irq->irq_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -267,7 +267,7 @@ void vgic_v2_put(struct kvm_vcpu *vcpu);
|
||||
void vgic_v2_save_state(struct kvm_vcpu *vcpu);
|
||||
void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq)
|
||||
static inline bool vgic_try_get_irq_ref(struct vgic_irq *irq)
|
||||
{
|
||||
if (!irq)
|
||||
return false;
|
||||
@@ -275,12 +275,12 @@ static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq)
|
||||
if (irq->intid < VGIC_MIN_LPI)
|
||||
return true;
|
||||
|
||||
return kref_get_unless_zero(&irq->refcount);
|
||||
return refcount_inc_not_zero(&irq->refcount);
|
||||
}
|
||||
|
||||
static inline void vgic_get_irq_kref(struct vgic_irq *irq)
|
||||
static inline void vgic_get_irq_ref(struct vgic_irq *irq)
|
||||
{
|
||||
WARN_ON_ONCE(!vgic_try_get_irq_kref(irq));
|
||||
WARN_ON_ONCE(!vgic_try_get_irq_ref(irq));
|
||||
}
|
||||
|
||||
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
|
||||
|
||||
@@ -298,6 +298,10 @@ config AS_HAS_LVZ_EXTENSION
|
||||
config CC_HAS_ANNOTATE_TABLEJUMP
|
||||
def_bool $(cc-option,-mannotate-tablejump)
|
||||
|
||||
config RUSTC_HAS_ANNOTATE_TABLEJUMP
|
||||
depends on RUST
|
||||
def_bool $(rustc-option,-Cllvm-args=--loongarch-annotate-tablejump)
|
||||
|
||||
menu "Kernel type and options"
|
||||
|
||||
source "kernel/Kconfig.hz"
|
||||
@@ -563,10 +567,14 @@ config ARCH_STRICT_ALIGN
|
||||
-mstrict-align build parameter to prevent unaligned accesses.
|
||||
|
||||
CPUs with h/w unaligned access support:
|
||||
Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.
|
||||
Loongson-2K2000/2K3000 and all of Loongson-3 series processors
|
||||
based on LoongArch.
|
||||
|
||||
CPUs without h/w unaligned access support:
|
||||
Loongson-2K500/2K1000.
|
||||
Loongson-2K0300/2K0500/2K1000.
|
||||
|
||||
If you want to make sure whether to support unaligned memory access
|
||||
on your hardware, please read the bit 20 (UAL) of CPUCFG1 register.
|
||||
|
||||
This option is enabled by default to make the kernel be able to run
|
||||
on all LoongArch systems. But you can disable it manually if you want
|
||||
|
||||
@@ -102,16 +102,21 @@ KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)
|
||||
|
||||
ifdef CONFIG_OBJTOOL
|
||||
ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP
|
||||
KBUILD_CFLAGS += -mannotate-tablejump
|
||||
else
|
||||
KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
|
||||
endif
|
||||
ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP
|
||||
KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump
|
||||
else
|
||||
KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers
|
||||
endif
|
||||
ifdef CONFIG_LTO_CLANG
|
||||
# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.
|
||||
# Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to
|
||||
# be passed via '-mllvm' to ld.lld.
|
||||
KBUILD_CFLAGS += -mannotate-tablejump
|
||||
ifdef CONFIG_LTO_CLANG
|
||||
KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump
|
||||
endif
|
||||
else
|
||||
KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
|
||||
endif
|
||||
endif
|
||||
|
||||
KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small
|
||||
|
||||
@@ -10,9 +10,8 @@
|
||||
#ifndef _ASM_LOONGARCH_ACENV_H
|
||||
#define _ASM_LOONGARCH_ACENV_H
|
||||
|
||||
/*
|
||||
* This header is required by ACPI core, but we have nothing to fill in
|
||||
* right now. Will be updated later when needed.
|
||||
*/
|
||||
#ifdef CONFIG_ARCH_STRICT_ALIGN
|
||||
#define ACPI_MISALIGNMENT_NOT_SUPPORTED
|
||||
#endif /* CONFIG_ARCH_STRICT_ALIGN */
|
||||
|
||||
#endif /* _ASM_LOONGARCH_ACENV_H */
|
||||
|
||||
@@ -16,6 +16,13 @@
|
||||
*/
|
||||
#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1)
|
||||
|
||||
/*
|
||||
* _PAGE_MODIFIED is a SW pte bit, it records page ever written on host
|
||||
* kernel, on secondary MMU it records the page writeable attribute, in
|
||||
* order for fast path handling.
|
||||
*/
|
||||
#define KVM_PAGE_WRITEABLE _PAGE_MODIFIED
|
||||
|
||||
#define _KVM_FLUSH_PGTABLE 0x1
|
||||
#define _KVM_HAS_PGMASK 0x2
|
||||
#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
|
||||
@@ -52,10 +59,10 @@ static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val)
|
||||
WRITE_ONCE(*ptep, val);
|
||||
}
|
||||
|
||||
static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; }
|
||||
static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; }
|
||||
static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; }
|
||||
static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; }
|
||||
static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & __WRITEABLE; }
|
||||
static inline int kvm_pte_writeable(kvm_pte_t pte) { return pte & KVM_PAGE_WRITEABLE; }
|
||||
|
||||
static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte)
|
||||
{
|
||||
@@ -69,12 +76,12 @@ static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte)
|
||||
|
||||
static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte)
|
||||
{
|
||||
return pte | _PAGE_DIRTY;
|
||||
return pte | __WRITEABLE;
|
||||
}
|
||||
|
||||
static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte)
|
||||
{
|
||||
return pte & ~_PAGE_DIRTY;
|
||||
return pte & ~__WRITEABLE;
|
||||
}
|
||||
|
||||
static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte)
|
||||
@@ -87,6 +94,11 @@ static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte)
|
||||
return pte & ~_PAGE_HUGE;
|
||||
}
|
||||
|
||||
static inline kvm_pte_t kvm_pte_mkwriteable(kvm_pte_t pte)
|
||||
{
|
||||
return pte | KVM_PAGE_WRITEABLE;
|
||||
}
|
||||
|
||||
static inline int kvm_need_flush(kvm_ptw_ctx *ctx)
|
||||
{
|
||||
return ctx->flag & _KVM_FLUSH_PGTABLE;
|
||||
|
||||
@@ -86,7 +86,7 @@ late_initcall(fdt_cpu_clk_init);
|
||||
static ssize_t boardinfo_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf,
|
||||
return sysfs_emit(buf,
|
||||
"BIOS Information\n"
|
||||
"Vendor\t\t\t: %s\n"
|
||||
"Version\t\t\t: %s\n"
|
||||
@@ -109,6 +109,8 @@ static int __init boardinfo_init(void)
|
||||
struct kobject *loongson_kobj;
|
||||
|
||||
loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
|
||||
if (!loongson_kobj)
|
||||
return -ENOMEM;
|
||||
|
||||
return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
|
||||
}
|
||||
|
||||
@@ -51,12 +51,13 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||
if (task == current) {
|
||||
regs->regs[3] = (unsigned long)__builtin_frame_address(0);
|
||||
regs->csr_era = (unsigned long)__builtin_return_address(0);
|
||||
regs->regs[22] = 0;
|
||||
} else {
|
||||
regs->regs[3] = thread_saved_fp(task);
|
||||
regs->csr_era = thread_saved_ra(task);
|
||||
regs->regs[22] = task->thread.reg22;
|
||||
}
|
||||
regs->regs[1] = 0;
|
||||
regs->regs[22] = 0;
|
||||
|
||||
for (unwind_start(&state, task, regs);
|
||||
!unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
|
||||
|
||||
@@ -54,6 +54,9 @@ static int __init init_vdso(void)
|
||||
vdso_info.code_mapping.pages =
|
||||
kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL);
|
||||
|
||||
if (!vdso_info.code_mapping.pages)
|
||||
return -ENOMEM;
|
||||
|
||||
pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
|
||||
for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
|
||||
vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
|
||||
|
||||
@@ -778,10 +778,8 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
default:
|
||||
return KVM_HCALL_INVALID_CODE;
|
||||
};
|
||||
|
||||
return KVM_HCALL_INVALID_CODE;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
|
||||
|
||||
@@ -426,21 +426,26 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
|
||||
struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
|
||||
|
||||
data = (void __user *)attr->addr;
|
||||
switch (type) {
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
|
||||
if (copy_from_user(&val, data, 4))
|
||||
return -EFAULT;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
switch (type) {
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
|
||||
if (copy_from_user(&val, data, 4))
|
||||
ret = -EFAULT;
|
||||
else {
|
||||
if (val >= EIOINTC_ROUTE_MAX_VCPUS)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
s->num_cpu = val;
|
||||
}
|
||||
if (val >= EIOINTC_ROUTE_MAX_VCPUS)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
s->num_cpu = val;
|
||||
break;
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
|
||||
if (copy_from_user(&s->features, data, 4))
|
||||
ret = -EFAULT;
|
||||
s->features = val;
|
||||
if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
|
||||
s->status |= BIT(EIOINTC_ENABLE);
|
||||
break;
|
||||
@@ -462,19 +467,17 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
|
||||
|
||||
static int kvm_eiointc_regs_access(struct kvm_device *dev,
|
||||
struct kvm_device_attr *attr,
|
||||
bool is_write)
|
||||
bool is_write, int *data)
|
||||
{
|
||||
int addr, cpu, offset, ret = 0;
|
||||
unsigned long flags;
|
||||
void *p = NULL;
|
||||
void __user *data;
|
||||
struct loongarch_eiointc *s;
|
||||
|
||||
s = dev->kvm->arch.eiointc;
|
||||
addr = attr->attr;
|
||||
cpu = addr >> 16;
|
||||
addr &= 0xffff;
|
||||
data = (void __user *)attr->addr;
|
||||
switch (addr) {
|
||||
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
|
||||
offset = (addr - EIOINTC_NODETYPE_START) / 4;
|
||||
@@ -513,13 +516,10 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
if (is_write) {
|
||||
if (copy_from_user(p, data, 4))
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
if (copy_to_user(data, p, 4))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
if (is_write)
|
||||
memcpy(p, data, 4);
|
||||
else
|
||||
memcpy(data, p, 4);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
|
||||
return ret;
|
||||
@@ -527,19 +527,17 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
|
||||
|
||||
static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
|
||||
struct kvm_device_attr *attr,
|
||||
bool is_write)
|
||||
bool is_write, int *data)
|
||||
{
|
||||
int addr, ret = 0;
|
||||
unsigned long flags;
|
||||
void *p = NULL;
|
||||
void __user *data;
|
||||
struct loongarch_eiointc *s;
|
||||
|
||||
s = dev->kvm->arch.eiointc;
|
||||
addr = attr->attr;
|
||||
addr &= 0xffff;
|
||||
|
||||
data = (void __user *)attr->addr;
|
||||
switch (addr) {
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
|
||||
if (is_write)
|
||||
@@ -561,13 +559,10 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
if (is_write) {
|
||||
if (copy_from_user(p, data, 4))
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
if (copy_to_user(data, p, 4))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
if (is_write)
|
||||
memcpy(p, data, 4);
|
||||
else
|
||||
memcpy(data, p, 4);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
|
||||
return ret;
|
||||
@@ -576,11 +571,27 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
|
||||
static int kvm_eiointc_get_attr(struct kvm_device *dev,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
int ret, data;
|
||||
|
||||
switch (attr->group) {
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
|
||||
return kvm_eiointc_regs_access(dev, attr, false);
|
||||
ret = kvm_eiointc_regs_access(dev, attr, false, &data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (copy_to_user((void __user *)attr->addr, &data, 4))
|
||||
ret = -EFAULT;
|
||||
|
||||
return ret;
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
|
||||
return kvm_eiointc_sw_status_access(dev, attr, false);
|
||||
ret = kvm_eiointc_sw_status_access(dev, attr, false, &data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (copy_to_user((void __user *)attr->addr, &data, 4))
|
||||
ret = -EFAULT;
|
||||
|
||||
return ret;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -589,13 +600,21 @@ static int kvm_eiointc_get_attr(struct kvm_device *dev,
|
||||
static int kvm_eiointc_set_attr(struct kvm_device *dev,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
int data;
|
||||
|
||||
switch (attr->group) {
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
|
||||
return kvm_eiointc_ctrl_access(dev, attr);
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
|
||||
return kvm_eiointc_regs_access(dev, attr, true);
|
||||
if (copy_from_user(&data, (void __user *)attr->addr, 4))
|
||||
return -EFAULT;
|
||||
|
||||
return kvm_eiointc_regs_access(dev, attr, true, &data);
|
||||
case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
|
||||
return kvm_eiointc_sw_status_access(dev, attr, true);
|
||||
if (copy_from_user(&data, (void __user *)attr->addr, 4))
|
||||
return -EFAULT;
|
||||
|
||||
return kvm_eiointc_sw_status_access(dev, attr, true, &data);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -348,6 +348,7 @@ static int kvm_pch_pic_regs_access(struct kvm_device *dev,
|
||||
struct kvm_device_attr *attr,
|
||||
bool is_write)
|
||||
{
|
||||
char buf[8];
|
||||
int addr, offset, len = 8, ret = 0;
|
||||
void __user *data;
|
||||
void *p = NULL;
|
||||
@@ -397,17 +398,23 @@ static int kvm_pch_pic_regs_access(struct kvm_device *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&s->lock);
|
||||
/* write or read value according to is_write */
|
||||
if (is_write) {
|
||||
if (copy_from_user(p, data, len))
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
if (copy_to_user(data, p, len))
|
||||
ret = -EFAULT;
|
||||
if (copy_from_user(buf, data, len))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
spin_lock(&s->lock);
|
||||
if (is_write)
|
||||
memcpy(p, buf, len);
|
||||
else
|
||||
memcpy(buf, p, len);
|
||||
spin_unlock(&s->lock);
|
||||
|
||||
if (!is_write) {
|
||||
if (copy_to_user(data, buf, len))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -569,7 +569,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
|
||||
/* Track access to pages marked old */
|
||||
new = kvm_pte_mkyoung(*ptep);
|
||||
if (write && !kvm_pte_dirty(new)) {
|
||||
if (!kvm_pte_write(new)) {
|
||||
if (!kvm_pte_writeable(new)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
@@ -856,9 +856,9 @@ retry:
|
||||
prot_bits |= _CACHE_SUC;
|
||||
|
||||
if (writeable) {
|
||||
prot_bits |= _PAGE_WRITE;
|
||||
prot_bits = kvm_pte_mkwriteable(prot_bits);
|
||||
if (write)
|
||||
prot_bits |= __WRITEABLE;
|
||||
prot_bits = kvm_pte_mkdirty(prot_bits);
|
||||
}
|
||||
|
||||
/* Disable dirty logging on HugePages */
|
||||
@@ -904,7 +904,7 @@ retry:
|
||||
kvm_release_faultin_page(kvm, page, false, writeable);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
if (prot_bits & _PAGE_DIRTY)
|
||||
if (kvm_pte_dirty(prot_bits))
|
||||
mark_page_dirty_in_slot(kvm, memslot, gfn);
|
||||
|
||||
out:
|
||||
|
||||
@@ -16,11 +16,11 @@
|
||||
#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
|
||||
#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
|
||||
|
||||
/* Load/Store return codes */
|
||||
#define ZPCI_PCI_LS_OK 0
|
||||
#define ZPCI_PCI_LS_ERR 1
|
||||
#define ZPCI_PCI_LS_BUSY 2
|
||||
#define ZPCI_PCI_LS_INVAL_HANDLE 3
|
||||
/* PCI instruction condition codes */
|
||||
#define ZPCI_CC_OK 0
|
||||
#define ZPCI_CC_ERR 1
|
||||
#define ZPCI_CC_BUSY 2
|
||||
#define ZPCI_CC_INVAL_HANDLE 3
|
||||
|
||||
/* Load/Store address space identifiers */
|
||||
#define ZPCI_PCIAS_MEMIO_0 0
|
||||
|
||||
@@ -2778,12 +2778,19 @@ static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
|
||||
|
||||
static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
|
||||
{
|
||||
struct mm_struct *mm = kvm->mm;
|
||||
struct page *page = NULL;
|
||||
int locked = 1;
|
||||
|
||||
if (mmget_not_zero(mm)) {
|
||||
mmap_read_lock(mm);
|
||||
get_user_pages_remote(mm, uaddr, 1, FOLL_WRITE,
|
||||
&page, &locked);
|
||||
if (locked)
|
||||
mmap_read_unlock(mm);
|
||||
mmput(mm);
|
||||
}
|
||||
|
||||
mmap_read_lock(kvm->mm);
|
||||
get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE,
|
||||
&page, NULL);
|
||||
mmap_read_unlock(kvm->mm);
|
||||
return page;
|
||||
}
|
||||
|
||||
|
||||
@@ -4864,12 +4864,12 @@ static void kvm_s390_assert_primary_as(struct kvm_vcpu *vcpu)
|
||||
* @vcpu: the vCPU whose gmap is to be fixed up
|
||||
* @gfn: the guest frame number used for memslots (including fake memslots)
|
||||
* @gaddr: the gmap address, does not have to match @gfn for ucontrol gmaps
|
||||
* @flags: FOLL_* flags
|
||||
* @foll: FOLL_* flags
|
||||
*
|
||||
* Return: 0 on success, < 0 in case of error.
|
||||
* Context: The mm lock must not be held before calling. May sleep.
|
||||
*/
|
||||
int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags)
|
||||
int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int foll)
|
||||
{
|
||||
struct kvm_memory_slot *slot;
|
||||
unsigned int fault_flags;
|
||||
@@ -4883,13 +4883,13 @@ int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, u
|
||||
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
|
||||
return vcpu_post_run_addressing_exception(vcpu);
|
||||
|
||||
fault_flags = flags & FOLL_WRITE ? FAULT_FLAG_WRITE : 0;
|
||||
fault_flags = foll & FOLL_WRITE ? FAULT_FLAG_WRITE : 0;
|
||||
if (vcpu->arch.gmap->pfault_enabled)
|
||||
flags |= FOLL_NOWAIT;
|
||||
foll |= FOLL_NOWAIT;
|
||||
vmaddr = __gfn_to_hva_memslot(slot, gfn);
|
||||
|
||||
try_again:
|
||||
pfn = __kvm_faultin_pfn(slot, gfn, flags, &writable, &page);
|
||||
pfn = __kvm_faultin_pfn(slot, gfn, foll, &writable, &page);
|
||||
|
||||
/* Access outside memory, inject addressing exception */
|
||||
if (is_noslot_pfn(pfn))
|
||||
@@ -4905,7 +4905,7 @@ try_again:
|
||||
return 0;
|
||||
vcpu->stat.pfault_sync++;
|
||||
/* Could not setup async pfault, try again synchronously */
|
||||
flags &= ~FOLL_NOWAIT;
|
||||
foll &= ~FOLL_NOWAIT;
|
||||
goto try_again;
|
||||
}
|
||||
/* Any other error */
|
||||
@@ -4925,7 +4925,7 @@ try_again:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, unsigned int flags)
|
||||
static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, unsigned int foll)
|
||||
{
|
||||
unsigned long gaddr_tmp;
|
||||
gfn_t gfn;
|
||||
@@ -4950,18 +4950,18 @@ static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, un
|
||||
}
|
||||
gfn = gpa_to_gfn(gaddr_tmp);
|
||||
}
|
||||
return __kvm_s390_handle_dat_fault(vcpu, gfn, gaddr, flags);
|
||||
return __kvm_s390_handle_dat_fault(vcpu, gfn, gaddr, foll);
|
||||
}
|
||||
|
||||
static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned int flags = 0;
|
||||
unsigned int foll = 0;
|
||||
unsigned long gaddr;
|
||||
int rc;
|
||||
|
||||
gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
|
||||
if (kvm_s390_cur_gmap_fault_is_write())
|
||||
flags = FAULT_FLAG_WRITE;
|
||||
foll = FOLL_WRITE;
|
||||
|
||||
switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) {
|
||||
case 0:
|
||||
@@ -5003,7 +5003,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
||||
send_sig(SIGSEGV, current, 0);
|
||||
if (rc != -ENXIO)
|
||||
break;
|
||||
flags = FAULT_FLAG_WRITE;
|
||||
foll = FOLL_WRITE;
|
||||
fallthrough;
|
||||
case PGM_PROTECTION:
|
||||
case PGM_SEGMENT_TRANSLATION:
|
||||
@@ -5013,7 +5013,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
||||
case PGM_REGION_SECOND_TRANS:
|
||||
case PGM_REGION_THIRD_TRANS:
|
||||
kvm_s390_assert_primary_as(vcpu);
|
||||
return vcpu_dat_fault_handler(vcpu, gaddr, flags);
|
||||
return vcpu_dat_fault_handler(vcpu, gaddr, foll);
|
||||
default:
|
||||
KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
|
||||
current->thread.gmap_int_code, current->thread.gmap_teid.val);
|
||||
|
||||
@@ -624,6 +624,17 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
int cc, ret;
|
||||
u16 dummy;
|
||||
|
||||
/* Add the notifier only once. No races because we hold kvm->lock */
|
||||
if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
|
||||
/* The notifier will be unregistered when the VM is destroyed */
|
||||
kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
|
||||
ret = mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
|
||||
if (ret) {
|
||||
kvm->arch.pv.mmu_notifier.ops = NULL;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = kvm_s390_pv_alloc_vm(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -659,11 +670,6 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
return -EIO;
|
||||
}
|
||||
kvm->arch.gmap->guest_handle = uvcb.guest_handle;
|
||||
/* Add the notifier only once. No races because we hold kvm->lock */
|
||||
if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
|
||||
kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
|
||||
mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1250,10 +1250,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
|
||||
device_set_wakeup_capable(&vu_dev->vdev.dev, true);
|
||||
|
||||
rc = register_virtio_device(&vu_dev->vdev);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
put_device(&vu_dev->vdev.dev);
|
||||
return rc;
|
||||
}
|
||||
vu_dev->registered = 1;
|
||||
return rc;
|
||||
return 0;
|
||||
|
||||
error_init:
|
||||
os_close_file(vu_dev->sock);
|
||||
|
||||
@@ -535,7 +535,7 @@ ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds,
|
||||
cmsg->cmsg_type != SCM_RIGHTS)
|
||||
return n;
|
||||
|
||||
memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len);
|
||||
memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len - CMSG_LEN(0));
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
@@ -20,8 +20,7 @@
|
||||
|
||||
void stack_protections(unsigned long address)
|
||||
{
|
||||
if (mprotect((void *) address, UM_THREAD_SIZE,
|
||||
PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
|
||||
if (mprotect((void *) address, UM_THREAD_SIZE, PROT_READ | PROT_WRITE) < 0)
|
||||
panic("protecting stack failed, errno = %d", errno);
|
||||
}
|
||||
|
||||
|
||||
@@ -562,6 +562,24 @@ enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
|
||||
|
||||
extern struct ghcb *boot_ghcb;
|
||||
|
||||
static inline void sev_evict_cache(void *va, int npages)
|
||||
{
|
||||
volatile u8 val __always_unused;
|
||||
u8 *bytes = va;
|
||||
int page_idx;
|
||||
|
||||
/*
|
||||
* For SEV guests, a read from the first/last cache-lines of a 4K page
|
||||
* using the guest key is sufficient to cause a flush of all cache-lines
|
||||
* associated with that 4K page without incurring all the overhead of a
|
||||
* full CLFLUSH sequence.
|
||||
*/
|
||||
for (page_idx = 0; page_idx < npages; page_idx++) {
|
||||
val = bytes[page_idx * PAGE_SIZE];
|
||||
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
|
||||
}
|
||||
}
|
||||
|
||||
#else /* !CONFIG_AMD_MEM_ENCRYPT */
|
||||
|
||||
#define snp_vmpl 0
|
||||
@@ -605,6 +623,7 @@ static inline int snp_send_guest_request(struct snp_msg_desc *mdesc,
|
||||
static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; }
|
||||
static inline void __init snp_secure_tsc_prepare(void) { }
|
||||
static inline void __init snp_secure_tsc_init(void) { }
|
||||
static inline void sev_evict_cache(void *va, int npages) {}
|
||||
|
||||
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
||||
|
||||
@@ -619,24 +638,6 @@ int rmp_make_shared(u64 pfn, enum pg_level level);
|
||||
void snp_leak_pages(u64 pfn, unsigned int npages);
|
||||
void kdump_sev_callback(void);
|
||||
void snp_fixup_e820_tables(void);
|
||||
|
||||
static inline void sev_evict_cache(void *va, int npages)
|
||||
{
|
||||
volatile u8 val __always_unused;
|
||||
u8 *bytes = va;
|
||||
int page_idx;
|
||||
|
||||
/*
|
||||
* For SEV guests, a read from the first/last cache-lines of a 4K page
|
||||
* using the guest key is sufficient to cause a flush of all cache-lines
|
||||
* associated with that 4K page without incurring all the overhead of a
|
||||
* full CLFLUSH sequence.
|
||||
*/
|
||||
for (page_idx = 0; page_idx < npages; page_idx++) {
|
||||
val = bytes[page_idx * PAGE_SIZE];
|
||||
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline bool snp_probe_rmptable_info(void) { return false; }
|
||||
static inline int snp_rmptable_init(void) { return -ENOSYS; }
|
||||
@@ -652,7 +653,6 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV
|
||||
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
|
||||
static inline void kdump_sev_callback(void) { }
|
||||
static inline void snp_fixup_e820_tables(void) {}
|
||||
static inline void sev_evict_cache(void *va, int npages) {}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4046,8 +4046,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
u64 cr8;
|
||||
|
||||
if (nested_svm_virtualize_tpr(vcpu) ||
|
||||
kvm_vcpu_apicv_active(vcpu))
|
||||
if (nested_svm_virtualize_tpr(vcpu))
|
||||
return;
|
||||
|
||||
cr8 = kvm_get_cr8(vcpu);
|
||||
|
||||
@@ -970,6 +970,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
if (ctx->write) {
|
||||
release_sock(sk);
|
||||
return -EBUSY;
|
||||
}
|
||||
ctx->write = true;
|
||||
|
||||
if (ctx->init && !ctx->more) {
|
||||
if (ctx->used) {
|
||||
err = -EINVAL;
|
||||
@@ -1019,6 +1025,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx->merge = 0;
|
||||
|
||||
if (!af_alg_writable(sk)) {
|
||||
err = af_alg_wait_for_wmem(sk, msg->msg_flags);
|
||||
if (err)
|
||||
@@ -1058,7 +1066,6 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
ctx->used += plen;
|
||||
copied += plen;
|
||||
size -= plen;
|
||||
ctx->merge = 0;
|
||||
} else {
|
||||
do {
|
||||
struct page *pg;
|
||||
@@ -1104,6 +1111,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
|
||||
unlock:
|
||||
af_alg_data_wakeup(sk);
|
||||
ctx->write = false;
|
||||
release_sock(sk);
|
||||
|
||||
return copied ?: err;
|
||||
|
||||
@@ -1330,6 +1330,7 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
|
||||
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
|
||||
else
|
||||
lim.max_write_zeroes_sectors = 0;
|
||||
lim.max_hw_wzeroes_unmap_sectors = 0;
|
||||
|
||||
if ((lim.discard_granularity >> SECTOR_SHIFT) >
|
||||
lim.max_hw_discard_sectors) {
|
||||
|
||||
@@ -1795,6 +1795,7 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
|
||||
u32 index)
|
||||
{
|
||||
zram_slot_lock(zram, index);
|
||||
zram_free_page(zram, index);
|
||||
zram_set_flag(zram, index, ZRAM_SAME);
|
||||
zram_set_handle(zram, index, fill);
|
||||
zram_slot_unlock(zram, index);
|
||||
@@ -1832,6 +1833,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
|
||||
kunmap_local(src);
|
||||
|
||||
zram_slot_lock(zram, index);
|
||||
zram_free_page(zram, index);
|
||||
zram_set_flag(zram, index, ZRAM_HUGE);
|
||||
zram_set_handle(zram, index, handle);
|
||||
zram_set_obj_size(zram, index, PAGE_SIZE);
|
||||
@@ -1855,11 +1857,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
|
||||
unsigned long element;
|
||||
bool same_filled;
|
||||
|
||||
/* First, free memory allocated to this slot (if any) */
|
||||
zram_slot_lock(zram, index);
|
||||
zram_free_page(zram, index);
|
||||
zram_slot_unlock(zram, index);
|
||||
|
||||
mem = kmap_local_page(page);
|
||||
same_filled = page_same_filled(mem, &element);
|
||||
kunmap_local(mem);
|
||||
@@ -1901,6 +1898,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
|
||||
zcomp_stream_put(zstrm);
|
||||
|
||||
zram_slot_lock(zram, index);
|
||||
zram_free_page(zram, index);
|
||||
zram_set_handle(zram, index, handle);
|
||||
zram_set_obj_size(zram, index, comp_len);
|
||||
zram_slot_unlock(zram, index);
|
||||
|
||||
@@ -303,6 +303,9 @@ void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
|
||||
pm_clk_destroy(dev);
|
||||
}
|
||||
|
||||
static struct device_node *cpg_mstp_pd_np __initdata = NULL;
|
||||
static struct generic_pm_domain *cpg_mstp_pd_genpd __initdata = NULL;
|
||||
|
||||
void __init cpg_mstp_add_clk_domain(struct device_node *np)
|
||||
{
|
||||
struct generic_pm_domain *pd;
|
||||
@@ -324,5 +327,20 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
|
||||
pd->detach_dev = cpg_mstp_detach_dev;
|
||||
pm_genpd_init(pd, &pm_domain_always_on_gov, false);
|
||||
|
||||
of_genpd_add_provider_simple(np, pd);
|
||||
cpg_mstp_pd_np = of_node_get(np);
|
||||
cpg_mstp_pd_genpd = pd;
|
||||
}
|
||||
|
||||
static int __init cpg_mstp_pd_init_provider(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!cpg_mstp_pd_np)
|
||||
return -ENODEV;
|
||||
|
||||
error = of_genpd_add_provider_simple(cpg_mstp_pd_np, cpg_mstp_pd_genpd);
|
||||
|
||||
of_node_put(cpg_mstp_pd_np);
|
||||
return error;
|
||||
}
|
||||
postcore_initcall(cpg_mstp_pd_init_provider);
|
||||
|
||||
@@ -185,7 +185,7 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
|
||||
p &= (1 << cmp->p.width) - 1;
|
||||
|
||||
if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
|
||||
rate = (parent_rate / p) / m;
|
||||
rate = (parent_rate / (p + cmp->p.offset)) / m;
|
||||
else
|
||||
rate = (parent_rate >> p) / m;
|
||||
|
||||
|
||||
@@ -2430,7 +2430,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
|
||||
{
|
||||
int error;
|
||||
|
||||
__sev_platform_shutdown_locked(NULL);
|
||||
__sev_platform_shutdown_locked(&error);
|
||||
|
||||
if (sev_es_tmr) {
|
||||
/*
|
||||
|
||||
@@ -211,8 +211,8 @@ static int
|
||||
dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) = { 0 };
|
||||
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
|
||||
DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) = { 0 };
|
||||
enum dpll_clock_quality_level ql;
|
||||
int ret;
|
||||
|
||||
@@ -221,7 +221,7 @@ dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
|
||||
ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack);
|
||||
if (ret)
|
||||
return ret;
|
||||
for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX)
|
||||
for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1)
|
||||
if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql))
|
||||
return -EMSGSIZE;
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
/*
|
||||
* ABI version history is documented in linux/firewire-cdev.h.
|
||||
*/
|
||||
#define FW_CDEV_KERNEL_VERSION 5
|
||||
#define FW_CDEV_KERNEL_VERSION 6
|
||||
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
|
||||
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
|
||||
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
|
||||
|
||||
@@ -942,8 +942,9 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
|
||||
{
|
||||
struct acpi_device *adev = to_acpi_device_node(fwnode);
|
||||
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
|
||||
struct acpi_gpio_info info;
|
||||
struct acpi_gpio_info info = {};
|
||||
struct gpio_desc *desc;
|
||||
int ret;
|
||||
|
||||
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
|
||||
if (IS_ERR(desc))
|
||||
@@ -957,6 +958,12 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
|
||||
|
||||
acpi_gpio_update_gpiod_flags(dflags, &info);
|
||||
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
|
||||
|
||||
/* ACPI uses hundredths of milliseconds units */
|
||||
ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
@@ -992,7 +999,7 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
|
||||
int ret;
|
||||
|
||||
for (i = 0, idx = 0; idx <= index; i++) {
|
||||
struct acpi_gpio_info info;
|
||||
struct acpi_gpio_info info = {};
|
||||
struct gpio_desc *desc;
|
||||
|
||||
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
|
||||
|
||||
@@ -317,6 +317,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
|
||||
.ignore_wake = "PNP0C50:00@8",
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Same as G1619-04. New model.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"),
|
||||
},
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.ignore_wake = "PNP0C50:00@8",
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Spurious wakeups from GPIO 11
|
||||
|
||||
@@ -250,16 +250,24 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
|
||||
|
||||
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
|
||||
{
|
||||
if (adev->kfd.dev)
|
||||
kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
|
||||
if (adev->kfd.dev) {
|
||||
if (adev->in_s0ix)
|
||||
kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
|
||||
else
|
||||
kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (adev->kfd.dev)
|
||||
r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
|
||||
if (adev->kfd.dev) {
|
||||
if (adev->in_s0ix)
|
||||
r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
|
||||
else
|
||||
r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -426,7 +426,9 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
|
||||
int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
|
||||
void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
|
||||
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
|
||||
int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
|
||||
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
|
||||
int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
|
||||
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
|
||||
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
|
||||
bool retry_fault);
|
||||
@@ -516,11 +518,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
|
||||
{
|
||||
return false;
|
||||
|
||||
@@ -5136,7 +5136,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
|
||||
adev->in_suspend = true;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if (!adev->in_s0ix && !adev->in_runpm)
|
||||
if (!adev->in_runpm)
|
||||
amdgpu_amdkfd_suspend_process(adev);
|
||||
amdgpu_virt_fini_data_exchange(adev);
|
||||
r = amdgpu_virt_request_full_gpu(adev, false);
|
||||
@@ -5156,10 +5156,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
|
||||
|
||||
amdgpu_device_ip_suspend_phase1(adev);
|
||||
|
||||
if (!adev->in_s0ix) {
|
||||
amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
|
||||
amdgpu_userq_suspend(adev);
|
||||
}
|
||||
amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
|
||||
amdgpu_userq_suspend(adev);
|
||||
|
||||
r = amdgpu_device_evict_resources(adev);
|
||||
if (r)
|
||||
@@ -5254,15 +5252,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (!adev->in_s0ix) {
|
||||
r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
|
||||
if (r)
|
||||
goto exit;
|
||||
r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
|
||||
if (r)
|
||||
goto exit;
|
||||
|
||||
r = amdgpu_userq_resume(adev);
|
||||
if (r)
|
||||
goto exit;
|
||||
}
|
||||
r = amdgpu_userq_resume(adev);
|
||||
if (r)
|
||||
goto exit;
|
||||
|
||||
r = amdgpu_device_ip_late_init(adev);
|
||||
if (r)
|
||||
@@ -5275,7 +5271,7 @@ exit:
|
||||
amdgpu_virt_init_data_exchange(adev);
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
|
||||
if (!adev->in_s0ix && !r && !adev->in_runpm)
|
||||
if (!r && !adev->in_runpm)
|
||||
r = amdgpu_amdkfd_resume_process(adev);
|
||||
}
|
||||
|
||||
|
||||
@@ -1654,6 +1654,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
}
|
||||
break;
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 4):
|
||||
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
|
||||
adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
|
||||
if (adev->gfx.pfp_fw_version >= 102 &&
|
||||
adev->gfx.mec_fw_version >= 66 &&
|
||||
adev->mes.fw_version[0] >= 128) {
|
||||
adev->gfx.enable_cleaner_shader = true;
|
||||
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
|
||||
if (r) {
|
||||
adev->gfx.enable_cleaner_shader = false;
|
||||
dev_err(adev->dev, "Failed to initialize cleaner shader\n");
|
||||
}
|
||||
}
|
||||
break;
|
||||
case IP_VERSION(11, 5, 0):
|
||||
case IP_VERSION(11, 5, 1):
|
||||
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
|
||||
|
||||
@@ -1550,6 +1550,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
|
||||
{
|
||||
struct kfd_node *node;
|
||||
int i, r;
|
||||
|
||||
if (!kfd->init_complete)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < kfd->num_nodes; i++) {
|
||||
node = kfd->nodes[i];
|
||||
r = node->dqm->ops.unhalt(node->dqm);
|
||||
if (r) {
|
||||
dev_err(kfd_device, "Error in starting scheduler\n");
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
|
||||
{
|
||||
struct kfd_node *node;
|
||||
@@ -1567,6 +1586,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
|
||||
return node->dqm->ops.halt(node->dqm);
|
||||
}
|
||||
|
||||
int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
|
||||
{
|
||||
struct kfd_node *node;
|
||||
int i, r;
|
||||
|
||||
if (!kfd->init_complete)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < kfd->num_nodes; i++) {
|
||||
node = kfd->nodes[i];
|
||||
r = node->dqm->ops.halt(node->dqm);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
|
||||
{
|
||||
struct kfd_node *node;
|
||||
|
||||
@@ -8717,7 +8717,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
|
||||
static void manage_dm_interrupts(struct amdgpu_device *adev,
|
||||
struct amdgpu_crtc *acrtc,
|
||||
struct dm_crtc_state *acrtc_state)
|
||||
{
|
||||
{ /*
|
||||
* We cannot be sure that the frontend index maps to the same
|
||||
* backend index - some even map to more than one.
|
||||
* So we have to go through the CRTC to find the right IRQ.
|
||||
*/
|
||||
int irq_type = amdgpu_display_crtc_idx_to_irq_type(
|
||||
adev,
|
||||
acrtc->crtc_id);
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
|
||||
struct drm_vblank_crtc_config config = {0};
|
||||
struct dc_crtc_timing *timing;
|
||||
int offdelay;
|
||||
@@ -8770,7 +8779,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
|
||||
|
||||
drm_crtc_vblank_on_config(&acrtc->base,
|
||||
&config);
|
||||
/* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
|
||||
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 2):
|
||||
case IP_VERSION(3, 0, 3):
|
||||
case IP_VERSION(3, 2, 0):
|
||||
if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
|
||||
drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
|
||||
drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
} else {
|
||||
/* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
|
||||
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 2):
|
||||
case IP_VERSION(3, 0, 3):
|
||||
case IP_VERSION(3, 2, 0):
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
|
||||
drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
|
||||
#endif
|
||||
if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
|
||||
drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
|
||||
}
|
||||
|
||||
drm_crtc_vblank_off(&acrtc->base);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2236,7 +2236,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
|
||||
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) {
|
||||
ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -2677,7 +2677,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
|
||||
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
|
||||
NULL, anx7625_intr_hpd_isr,
|
||||
IRQF_TRIGGER_FALLING |
|
||||
IRQF_ONESHOT,
|
||||
IRQF_ONESHOT | IRQF_NO_AUTOEN,
|
||||
"anx7625-intp", platform);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "fail to request irq\n");
|
||||
@@ -2746,8 +2746,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
|
||||
}
|
||||
|
||||
/* Add work function */
|
||||
if (platform->pdata.intp_irq)
|
||||
if (platform->pdata.intp_irq) {
|
||||
enable_irq(platform->pdata.intp_irq);
|
||||
queue_work(platform->workqueue, &platform->work);
|
||||
}
|
||||
|
||||
if (platform->pdata.audio_en)
|
||||
anx7625_register_audio(dev, platform);
|
||||
|
||||
@@ -1984,8 +1984,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
|
||||
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
|
||||
|
||||
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
|
||||
if (!mhdp_state->current_mode)
|
||||
return;
|
||||
if (!mhdp_state->current_mode) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
drm_mode_set_name(mhdp_state->current_mode);
|
||||
|
||||
|
||||
@@ -2432,8 +2432,6 @@ static const struct drm_gpuvm_ops lock_ops = {
|
||||
*
|
||||
* The expected usage is::
|
||||
*
|
||||
* .. code-block:: c
|
||||
*
|
||||
* vm_bind {
|
||||
* struct drm_exec exec;
|
||||
*
|
||||
|
||||
@@ -546,7 +546,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
|
||||
luminance_range->max_luminance,
|
||||
panel->vbt.backlight.pwm_freq_hz,
|
||||
intel_dp->edp_dpcd, ¤t_level, ¤t_mode,
|
||||
false);
|
||||
panel->backlight.edp.vesa.luminance_control_support);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -117,6 +117,7 @@ enum xe_guc_action {
|
||||
XE_GUC_ACTION_ENTER_S_STATE = 0x501,
|
||||
XE_GUC_ACTION_EXIT_S_STATE = 0x502,
|
||||
XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
|
||||
XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
|
||||
XE_GUC_ACTION_SCHED_CONTEXT = 0x1000,
|
||||
XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
|
||||
XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
* | 0 | 31:16 | **KEY** - KLV key identifier |
|
||||
* | | | - `GuC Self Config KLVs`_ |
|
||||
* | | | - `GuC Opt In Feature KLVs`_ |
|
||||
* | | | - `GuC Scheduling Policies KLVs`_ |
|
||||
* | | | - `GuC VGT Policy KLVs`_ |
|
||||
* | | | - `GuC VF Configuration KLVs`_ |
|
||||
* | | | |
|
||||
@@ -152,6 +153,30 @@ enum {
|
||||
#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_KEY 0x4003
|
||||
#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u
|
||||
|
||||
/**
|
||||
* DOC: GuC Scheduling Policies KLVs
|
||||
*
|
||||
* `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV.
|
||||
*
|
||||
* _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001
|
||||
* Some platforms do not allow concurrent execution of RCS and CCS
|
||||
* workloads from different address spaces. By default, the GuC prioritizes
|
||||
* RCS submissions over CCS ones, which can lead to CCS workloads being
|
||||
* significantly (or completely) starved of execution time. This KLV allows
|
||||
* the driver to specify a quantum (in ms) and a ratio (percentage value
|
||||
* between 0 and 100), and the GuC will prioritize the CCS for that
|
||||
* percentage of each quantum. For example, specifying 100ms and 30% will
|
||||
* make the GuC prioritize the CCS for 30ms of every 100ms.
|
||||
* Note that this does not necessarly mean that RCS and CCS engines will
|
||||
* only be active for their percentage of the quantum, as the restriction
|
||||
* only kicks in if both classes are fully busy with non-compatible address
|
||||
* spaces; i.e., if one engine is idle or running the same address space,
|
||||
* a pending job on the other engine will still be submitted to the HW no
|
||||
* matter what the ratio is
|
||||
*/
|
||||
#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001
|
||||
#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u
|
||||
|
||||
/**
|
||||
* DOC: GuC VGT Policy KLVs
|
||||
*
|
||||
|
||||
@@ -311,12 +311,16 @@ int xe_device_sysfs_init(struct xe_device *xe)
|
||||
if (xe->info.platform == XE_BATTLEMAGE) {
|
||||
ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto cleanup;
|
||||
|
||||
ret = late_bind_create_files(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
|
||||
|
||||
cleanup:
|
||||
xe_device_sysfs_fini(xe);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -151,6 +151,16 @@ err_lrc:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __xe_exec_queue_fini(struct xe_exec_queue *q)
|
||||
{
|
||||
int i;
|
||||
|
||||
q->ops->fini(q);
|
||||
|
||||
for (i = 0; i < q->width; ++i)
|
||||
xe_lrc_put(q->lrc[i]);
|
||||
}
|
||||
|
||||
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
|
||||
u32 logical_mask, u16 width,
|
||||
struct xe_hw_engine *hwe, u32 flags,
|
||||
@@ -181,11 +191,13 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
|
||||
if (xe_exec_queue_uses_pxp(q)) {
|
||||
err = xe_pxp_exec_queue_add(xe->pxp, q);
|
||||
if (err)
|
||||
goto err_post_alloc;
|
||||
goto err_post_init;
|
||||
}
|
||||
|
||||
return q;
|
||||
|
||||
err_post_init:
|
||||
__xe_exec_queue_fini(q);
|
||||
err_post_alloc:
|
||||
__xe_exec_queue_free(q);
|
||||
return ERR_PTR(err);
|
||||
@@ -283,13 +295,11 @@ void xe_exec_queue_destroy(struct kref *ref)
|
||||
xe_exec_queue_put(eq);
|
||||
}
|
||||
|
||||
q->ops->fini(q);
|
||||
q->ops->destroy(q);
|
||||
}
|
||||
|
||||
void xe_exec_queue_fini(struct xe_exec_queue *q)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Before releasing our ref to lrc and xef, accumulate our run ticks
|
||||
* and wakeup any waiters.
|
||||
@@ -298,9 +308,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
|
||||
if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
|
||||
wake_up_var(&q->xef->exec_queue.pending_removal);
|
||||
|
||||
for (i = 0; i < q->width; ++i)
|
||||
xe_lrc_put(q->lrc[i]);
|
||||
|
||||
__xe_exec_queue_fini(q);
|
||||
__xe_exec_queue_free(q);
|
||||
}
|
||||
|
||||
|
||||
@@ -166,8 +166,14 @@ struct xe_exec_queue_ops {
|
||||
int (*init)(struct xe_exec_queue *q);
|
||||
/** @kill: Kill inflight submissions for backend */
|
||||
void (*kill)(struct xe_exec_queue *q);
|
||||
/** @fini: Fini exec queue for submission backend */
|
||||
/** @fini: Undoes the init() for submission backend */
|
||||
void (*fini)(struct xe_exec_queue *q);
|
||||
/**
|
||||
* @destroy: Destroy exec queue for submission backend. The backend
|
||||
* function must call xe_exec_queue_fini() (which will in turn call the
|
||||
* fini() backend function) to ensure the queue is properly cleaned up.
|
||||
*/
|
||||
void (*destroy)(struct xe_exec_queue *q);
|
||||
/** @set_priority: Set priority for exec queue */
|
||||
int (*set_priority)(struct xe_exec_queue *q,
|
||||
enum xe_exec_queue_priority priority);
|
||||
|
||||
@@ -385,10 +385,20 @@ err_free:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void execlist_exec_queue_fini_async(struct work_struct *w)
|
||||
static void execlist_exec_queue_fini(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_execlist_exec_queue *exl = q->execlist;
|
||||
|
||||
drm_sched_entity_fini(&exl->entity);
|
||||
drm_sched_fini(&exl->sched);
|
||||
|
||||
kfree(exl);
|
||||
}
|
||||
|
||||
static void execlist_exec_queue_destroy_async(struct work_struct *w)
|
||||
{
|
||||
struct xe_execlist_exec_queue *ee =
|
||||
container_of(w, struct xe_execlist_exec_queue, fini_async);
|
||||
container_of(w, struct xe_execlist_exec_queue, destroy_async);
|
||||
struct xe_exec_queue *q = ee->q;
|
||||
struct xe_execlist_exec_queue *exl = q->execlist;
|
||||
struct xe_device *xe = gt_to_xe(q->gt);
|
||||
@@ -401,10 +411,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
|
||||
list_del(&exl->active_link);
|
||||
spin_unlock_irqrestore(&exl->port->lock, flags);
|
||||
|
||||
drm_sched_entity_fini(&exl->entity);
|
||||
drm_sched_fini(&exl->sched);
|
||||
kfree(exl);
|
||||
|
||||
xe_exec_queue_fini(q);
|
||||
}
|
||||
|
||||
@@ -413,10 +419,10 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
|
||||
/* NIY */
|
||||
}
|
||||
|
||||
static void execlist_exec_queue_fini(struct xe_exec_queue *q)
|
||||
static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
|
||||
{
|
||||
INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
|
||||
queue_work(system_unbound_wq, &q->execlist->fini_async);
|
||||
INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
|
||||
queue_work(system_unbound_wq, &q->execlist->destroy_async);
|
||||
}
|
||||
|
||||
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
|
||||
@@ -467,6 +473,7 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
|
||||
.init = execlist_exec_queue_init,
|
||||
.kill = execlist_exec_queue_kill,
|
||||
.fini = execlist_exec_queue_fini,
|
||||
.destroy = execlist_exec_queue_destroy,
|
||||
.set_priority = execlist_exec_queue_set_priority,
|
||||
.set_timeslice = execlist_exec_queue_set_timeslice,
|
||||
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
|
||||
|
||||
@@ -42,7 +42,7 @@ struct xe_execlist_exec_queue {
|
||||
|
||||
bool has_run;
|
||||
|
||||
struct work_struct fini_async;
|
||||
struct work_struct destroy_async;
|
||||
|
||||
enum xe_exec_queue_priority active_priority;
|
||||
struct list_head active_link;
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
#include "xe_gt_topology.h"
|
||||
#include "xe_guc_exec_queue_types.h"
|
||||
#include "xe_guc_pc.h"
|
||||
#include "xe_guc_submit.h"
|
||||
#include "xe_hw_fence.h"
|
||||
#include "xe_hw_engine_class_sysfs.h"
|
||||
#include "xe_irq.h"
|
||||
@@ -97,7 +98,7 @@ void xe_gt_sanitize(struct xe_gt *gt)
|
||||
* FIXME: if xe_uc_sanitize is called here, on TGL driver will not
|
||||
* reload
|
||||
*/
|
||||
gt->uc.guc.submission_state.enabled = false;
|
||||
xe_guc_submit_disable(>->uc.guc);
|
||||
}
|
||||
|
||||
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
|
||||
|
||||
@@ -1632,7 +1632,6 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
|
||||
u64 fair;
|
||||
|
||||
fair = div_u64(available, num_vfs);
|
||||
fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
|
||||
fair = ALIGN_DOWN(fair, alignment);
|
||||
#ifdef MAX_FAIR_LMEM
|
||||
fair = min_t(u64, MAX_FAIR_LMEM, fair);
|
||||
|
||||
@@ -880,9 +880,7 @@ int xe_guc_post_load_init(struct xe_guc *guc)
|
||||
return ret;
|
||||
}
|
||||
|
||||
guc->submission_state.enabled = true;
|
||||
|
||||
return 0;
|
||||
return xe_guc_submit_enable(guc);
|
||||
}
|
||||
|
||||
int xe_guc_reset(struct xe_guc *guc)
|
||||
@@ -1579,7 +1577,7 @@ void xe_guc_sanitize(struct xe_guc *guc)
|
||||
{
|
||||
xe_uc_fw_sanitize(&guc->fw);
|
||||
xe_guc_ct_disable(&guc->ct);
|
||||
guc->submission_state.enabled = false;
|
||||
xe_guc_submit_disable(guc);
|
||||
}
|
||||
|
||||
int xe_guc_reset_prepare(struct xe_guc *guc)
|
||||
|
||||
@@ -35,8 +35,8 @@ struct xe_guc_exec_queue {
|
||||
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
|
||||
/** @lr_tdr: long running TDR worker */
|
||||
struct work_struct lr_tdr;
|
||||
/** @fini_async: do final fini async from this worker */
|
||||
struct work_struct fini_async;
|
||||
/** @destroy_async: do final destroy async from this worker */
|
||||
struct work_struct destroy_async;
|
||||
/** @resume_time: time of last resume */
|
||||
u64 resume_time;
|
||||
/** @state: GuC specific state for this xe_exec_queue */
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "xe_guc_ct.h"
|
||||
#include "xe_guc_exec_queue_types.h"
|
||||
#include "xe_guc_id_mgr.h"
|
||||
#include "xe_guc_klv_helpers.h"
|
||||
#include "xe_guc_submit_types.h"
|
||||
#include "xe_hw_engine.h"
|
||||
#include "xe_hw_fence.h"
|
||||
@@ -316,6 +317,71 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
|
||||
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given that we want to guarantee enough RCS throughput to avoid missing
|
||||
* frames, we set the yield policy to 20% of each 80ms interval.
|
||||
*/
|
||||
#define RC_YIELD_DURATION 80 /* in ms */
|
||||
#define RC_YIELD_RATIO 20 /* in percent */
|
||||
static u32 *emit_render_compute_yield_klv(u32 *emit)
|
||||
{
|
||||
*emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD);
|
||||
*emit++ = RC_YIELD_DURATION;
|
||||
*emit++ = RC_YIELD_RATIO;
|
||||
|
||||
return emit;
|
||||
}
|
||||
|
||||
#define SCHEDULING_POLICY_MAX_DWORDS 16
|
||||
static int guc_init_global_schedule_policy(struct xe_guc *guc)
|
||||
{
|
||||
u32 data[SCHEDULING_POLICY_MAX_DWORDS];
|
||||
u32 *emit = data;
|
||||
u32 count = 0;
|
||||
int ret;
|
||||
|
||||
if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
|
||||
return 0;
|
||||
|
||||
*emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
|
||||
|
||||
if (CCS_MASK(guc_to_gt(guc)))
|
||||
emit = emit_render_compute_yield_klv(emit);
|
||||
|
||||
count = emit - data;
|
||||
if (count > 1) {
|
||||
xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS);
|
||||
|
||||
ret = xe_guc_ct_send_block(&guc->ct, data, count);
|
||||
if (ret < 0) {
|
||||
xe_gt_err(guc_to_gt(guc),
|
||||
"failed to enable GuC sheduling policies: %pe\n",
|
||||
ERR_PTR(ret));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int xe_guc_submit_enable(struct xe_guc *guc)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = guc_init_global_schedule_policy(guc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
guc->submission_state.enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void xe_guc_submit_disable(struct xe_guc *guc)
|
||||
{
|
||||
guc->submission_state.enabled = false;
|
||||
}
|
||||
|
||||
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
|
||||
{
|
||||
int i;
|
||||
@@ -1277,21 +1343,12 @@ rearm:
|
||||
return DRM_GPU_SCHED_STAT_NO_HANG;
|
||||
}
|
||||
|
||||
static void __guc_exec_queue_fini_async(struct work_struct *w)
|
||||
static void guc_exec_queue_fini(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_guc_exec_queue *ge =
|
||||
container_of(w, struct xe_guc_exec_queue, fini_async);
|
||||
struct xe_exec_queue *q = ge->q;
|
||||
struct xe_guc_exec_queue *ge = q->guc;
|
||||
struct xe_guc *guc = exec_queue_to_guc(q);
|
||||
|
||||
xe_pm_runtime_get(guc_to_xe(guc));
|
||||
trace_xe_exec_queue_destroy(q);
|
||||
|
||||
release_guc_id(guc, q);
|
||||
if (xe_exec_queue_is_lr(q))
|
||||
cancel_work_sync(&ge->lr_tdr);
|
||||
/* Confirm no work left behind accessing device structures */
|
||||
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
|
||||
xe_sched_entity_fini(&ge->entity);
|
||||
xe_sched_fini(&ge->sched);
|
||||
|
||||
@@ -1300,25 +1357,43 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
|
||||
* (timeline name).
|
||||
*/
|
||||
kfree_rcu(ge, rcu);
|
||||
}
|
||||
|
||||
static void __guc_exec_queue_destroy_async(struct work_struct *w)
|
||||
{
|
||||
struct xe_guc_exec_queue *ge =
|
||||
container_of(w, struct xe_guc_exec_queue, destroy_async);
|
||||
struct xe_exec_queue *q = ge->q;
|
||||
struct xe_guc *guc = exec_queue_to_guc(q);
|
||||
|
||||
xe_pm_runtime_get(guc_to_xe(guc));
|
||||
trace_xe_exec_queue_destroy(q);
|
||||
|
||||
if (xe_exec_queue_is_lr(q))
|
||||
cancel_work_sync(&ge->lr_tdr);
|
||||
/* Confirm no work left behind accessing device structures */
|
||||
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
|
||||
|
||||
xe_exec_queue_fini(q);
|
||||
|
||||
xe_pm_runtime_put(guc_to_xe(guc));
|
||||
}
|
||||
|
||||
static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
|
||||
static void guc_exec_queue_destroy_async(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_guc *guc = exec_queue_to_guc(q);
|
||||
struct xe_device *xe = guc_to_xe(guc);
|
||||
|
||||
INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
|
||||
INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async);
|
||||
|
||||
/* We must block on kernel engines so slabs are empty on driver unload */
|
||||
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
|
||||
__guc_exec_queue_fini_async(&q->guc->fini_async);
|
||||
__guc_exec_queue_destroy_async(&q->guc->destroy_async);
|
||||
else
|
||||
queue_work(xe->destroy_wq, &q->guc->fini_async);
|
||||
queue_work(xe->destroy_wq, &q->guc->destroy_async);
|
||||
}
|
||||
|
||||
static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||
static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||
{
|
||||
/*
|
||||
* Might be done from within the GPU scheduler, need to do async as we
|
||||
@@ -1327,7 +1402,7 @@ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||
* this we and don't really care when everything is fini'd, just that it
|
||||
* is.
|
||||
*/
|
||||
guc_exec_queue_fini_async(q);
|
||||
guc_exec_queue_destroy_async(q);
|
||||
}
|
||||
|
||||
static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
|
||||
@@ -1341,7 +1416,7 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
|
||||
if (exec_queue_registered(q))
|
||||
disable_scheduling_deregister(guc, q);
|
||||
else
|
||||
__guc_exec_queue_fini(guc, q);
|
||||
__guc_exec_queue_destroy(guc, q);
|
||||
}
|
||||
|
||||
static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
|
||||
@@ -1574,14 +1649,14 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
|
||||
#define STATIC_MSG_CLEANUP 0
|
||||
#define STATIC_MSG_SUSPEND 1
|
||||
#define STATIC_MSG_RESUME 2
|
||||
static void guc_exec_queue_fini(struct xe_exec_queue *q)
|
||||
static void guc_exec_queue_destroy(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
|
||||
|
||||
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
|
||||
guc_exec_queue_add_msg(q, msg, CLEANUP);
|
||||
else
|
||||
__guc_exec_queue_fini(exec_queue_to_guc(q), q);
|
||||
__guc_exec_queue_destroy(exec_queue_to_guc(q), q);
|
||||
}
|
||||
|
||||
static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
|
||||
@@ -1711,6 +1786,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
|
||||
.init = guc_exec_queue_init,
|
||||
.kill = guc_exec_queue_kill,
|
||||
.fini = guc_exec_queue_fini,
|
||||
.destroy = guc_exec_queue_destroy,
|
||||
.set_priority = guc_exec_queue_set_priority,
|
||||
.set_timeslice = guc_exec_queue_set_timeslice,
|
||||
.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
|
||||
@@ -1732,7 +1808,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
|
||||
xe_exec_queue_put(q);
|
||||
else if (exec_queue_destroyed(q))
|
||||
__guc_exec_queue_fini(guc, q);
|
||||
__guc_exec_queue_destroy(guc, q);
|
||||
}
|
||||
if (q->guc->suspend_pending) {
|
||||
set_exec_queue_suspended(q);
|
||||
@@ -1989,7 +2065,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
|
||||
xe_exec_queue_put(q);
|
||||
else
|
||||
__guc_exec_queue_fini(guc, q);
|
||||
__guc_exec_queue_destroy(guc, q);
|
||||
}
|
||||
|
||||
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
|
||||
|
||||
@@ -13,6 +13,8 @@ struct xe_exec_queue;
|
||||
struct xe_guc;
|
||||
|
||||
int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids);
|
||||
int xe_guc_submit_enable(struct xe_guc *guc);
|
||||
void xe_guc_submit_disable(struct xe_guc *guc);
|
||||
|
||||
int xe_guc_submit_reset_prepare(struct xe_guc *guc);
|
||||
void xe_guc_submit_reset_wait(struct xe_guc *guc);
|
||||
|
||||
@@ -286,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
|
||||
*/
|
||||
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
|
||||
{
|
||||
u64 reg_val = 0, min, max;
|
||||
u32 reg_val = 0;
|
||||
struct xe_device *xe = hwmon->xe;
|
||||
struct xe_reg rapl_limit, pkg_power_sku;
|
||||
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
|
||||
@@ -294,7 +294,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
|
||||
if (hwmon->xe->info.has_mbx_power_limits) {
|
||||
xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)®_val);
|
||||
xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, ®_val);
|
||||
} else {
|
||||
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
|
||||
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
|
||||
@@ -304,19 +304,21 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
|
||||
/* Check if PL limits are disabled. */
|
||||
if (!(reg_val & PWR_LIM_EN)) {
|
||||
*value = PL_DISABLE;
|
||||
drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n",
|
||||
drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%08x\n",
|
||||
PWR_ATTR_TO_STR(attr), channel, reg_val);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val);
|
||||
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
|
||||
*value = mul_u32_u32(reg_val, SF_POWER) >> hwmon->scl_shift_power;
|
||||
|
||||
/* For platforms with mailbox power limit support clamping would be done by pcode. */
|
||||
if (!hwmon->xe->info.has_mbx_power_limits) {
|
||||
reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
|
||||
min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
|
||||
max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
|
||||
u64 pkg_pwr, min, max;
|
||||
|
||||
pkg_pwr = xe_mmio_read64_2x32(mmio, pkg_power_sku);
|
||||
min = REG_FIELD_GET(PKG_MIN_PWR, pkg_pwr);
|
||||
max = REG_FIELD_GET(PKG_MAX_PWR, pkg_pwr);
|
||||
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
|
||||
max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
|
||||
if (min && max)
|
||||
@@ -493,8 +495,8 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
|
||||
{
|
||||
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
|
||||
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
|
||||
u32 x, y, x_w = 2; /* 2 bits */
|
||||
u64 r, tau4, out;
|
||||
u32 reg_val, x, y, x_w = 2; /* 2 bits */
|
||||
u64 tau4, out;
|
||||
int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
|
||||
u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
|
||||
|
||||
@@ -505,23 +507,24 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
|
||||
if (hwmon->xe->info.has_mbx_power_limits) {
|
||||
ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
|
||||
ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, ®_val);
|
||||
if (ret) {
|
||||
drm_err(&hwmon->xe->drm,
|
||||
"power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n",
|
||||
channel, power_attr, r, ret);
|
||||
r = 0;
|
||||
"power interval read fail, ch %d, attr %d, val 0x%08x, ret %d\n",
|
||||
channel, power_attr, reg_val, ret);
|
||||
reg_val = 0;
|
||||
}
|
||||
} else {
|
||||
r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel));
|
||||
reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
|
||||
channel));
|
||||
}
|
||||
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
|
||||
xe_pm_runtime_put(hwmon->xe);
|
||||
|
||||
x = REG_FIELD_GET(PWR_LIM_TIME_X, r);
|
||||
y = REG_FIELD_GET(PWR_LIM_TIME_Y, r);
|
||||
x = REG_FIELD_GET(PWR_LIM_TIME_X, reg_val);
|
||||
y = REG_FIELD_GET(PWR_LIM_TIME_Y, reg_val);
|
||||
|
||||
/*
|
||||
* tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)
|
||||
|
||||
@@ -35,6 +35,10 @@ static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
|
||||
|
||||
static void xe_nvm_release_dev(struct device *dev)
|
||||
{
|
||||
struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
|
||||
struct intel_dg_nvm_dev *nvm = container_of(aux, struct intel_dg_nvm_dev, aux_dev);
|
||||
|
||||
kfree(nvm);
|
||||
}
|
||||
|
||||
static bool xe_nvm_non_posted_erase(struct xe_device *xe)
|
||||
@@ -162,6 +166,5 @@ void xe_nvm_fini(struct xe_device *xe)
|
||||
|
||||
auxiliary_device_delete(&nvm->aux_dev);
|
||||
auxiliary_device_uninit(&nvm->aux_dev);
|
||||
kfree(nvm);
|
||||
xe->nvm = NULL;
|
||||
}
|
||||
|
||||
@@ -44,16 +44,18 @@ int xe_tile_sysfs_init(struct xe_tile *tile)
|
||||
kt->tile = tile;
|
||||
|
||||
err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
|
||||
if (err) {
|
||||
kobject_put(&kt->base);
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
goto err_object;
|
||||
|
||||
tile->sysfs = &kt->base;
|
||||
|
||||
err = xe_vram_freq_sysfs_init(tile);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_object;
|
||||
|
||||
return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile);
|
||||
|
||||
err_object:
|
||||
kobject_put(&kt->base);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -240,8 +240,8 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
|
||||
|
||||
pfence = xe_preempt_fence_create(q, q->lr.context,
|
||||
++q->lr.seqno);
|
||||
if (!pfence) {
|
||||
err = -ENOMEM;
|
||||
if (IS_ERR(pfence)) {
|
||||
err = PTR_ERR(pfence);
|
||||
goto out_fini;
|
||||
}
|
||||
|
||||
|
||||
@@ -555,6 +555,7 @@ struct gcr3_tbl_info {
|
||||
};
|
||||
|
||||
struct amd_io_pgtable {
|
||||
seqcount_t seqcount; /* Protects root/mode update */
|
||||
struct io_pgtable pgtbl;
|
||||
int mode;
|
||||
u64 *root;
|
||||
|
||||
@@ -1455,12 +1455,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
||||
PCI_FUNC(e->devid));
|
||||
|
||||
devid = e->devid;
|
||||
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
|
||||
if (alias)
|
||||
if (alias) {
|
||||
for (dev_i = devid_start; dev_i <= devid; ++dev_i)
|
||||
pci_seg->alias_table[dev_i] = devid_to;
|
||||
set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
|
||||
}
|
||||
set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
|
||||
set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
|
||||
break;
|
||||
case IVHD_DEV_SPECIAL: {
|
||||
u8 handle, type;
|
||||
@@ -3067,7 +3067,8 @@ static int __init early_amd_iommu_init(void)
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_CX16)) {
|
||||
pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/seqlock.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
@@ -130,8 +131,11 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
|
||||
|
||||
*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
|
||||
|
||||
write_seqcount_begin(&pgtable->seqcount);
|
||||
pgtable->root = pte;
|
||||
pgtable->mode += 1;
|
||||
write_seqcount_end(&pgtable->seqcount);
|
||||
|
||||
amd_iommu_update_and_flush_device_table(domain);
|
||||
|
||||
pte = NULL;
|
||||
@@ -153,6 +157,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
|
||||
{
|
||||
unsigned long last_addr = address + (page_size - 1);
|
||||
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
|
||||
unsigned int seqcount;
|
||||
int level, end_lvl;
|
||||
u64 *pte, *page;
|
||||
|
||||
@@ -170,8 +175,14 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
|
||||
}
|
||||
|
||||
|
||||
level = pgtable->mode - 1;
|
||||
pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
|
||||
do {
|
||||
seqcount = read_seqcount_begin(&pgtable->seqcount);
|
||||
|
||||
level = pgtable->mode - 1;
|
||||
pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
|
||||
} while (read_seqcount_retry(&pgtable->seqcount, seqcount));
|
||||
|
||||
|
||||
address = PAGE_SIZE_ALIGN(address, page_size);
|
||||
end_lvl = PAGE_SIZE_LEVEL(page_size);
|
||||
|
||||
@@ -249,6 +260,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
|
||||
unsigned long *page_size)
|
||||
{
|
||||
int level;
|
||||
unsigned int seqcount;
|
||||
u64 *pte;
|
||||
|
||||
*page_size = 0;
|
||||
@@ -256,8 +268,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
|
||||
if (address > PM_LEVEL_SIZE(pgtable->mode))
|
||||
return NULL;
|
||||
|
||||
level = pgtable->mode - 1;
|
||||
pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
|
||||
do {
|
||||
seqcount = read_seqcount_begin(&pgtable->seqcount);
|
||||
level = pgtable->mode - 1;
|
||||
pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
|
||||
} while (read_seqcount_retry(&pgtable->seqcount, seqcount));
|
||||
|
||||
*page_size = PTE_LEVEL_PAGE_SIZE(level);
|
||||
|
||||
while (level > 0) {
|
||||
@@ -541,6 +557,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
|
||||
if (!pgtable->root)
|
||||
return NULL;
|
||||
pgtable->mode = PAGE_MODE_3_LEVEL;
|
||||
seqcount_init(&pgtable->seqcount);
|
||||
|
||||
cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap;
|
||||
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
|
||||
|
||||
@@ -1575,6 +1575,10 @@ static void switch_to_super_page(struct dmar_domain *domain,
|
||||
unsigned long lvl_pages = lvl_to_nr_pages(level);
|
||||
struct dma_pte *pte = NULL;
|
||||
|
||||
if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
|
||||
!IS_ALIGNED(end_pfn + 1, lvl_pages)))
|
||||
return;
|
||||
|
||||
while (start_pfn <= end_pfn) {
|
||||
if (!pte)
|
||||
pte = pfn_to_dma_pte(domain, start_pfn, &level,
|
||||
@@ -1650,7 +1654,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||
unsigned long pages_to_remove;
|
||||
|
||||
pteval |= DMA_PTE_LARGE_PAGE;
|
||||
pages_to_remove = min_t(unsigned long, nr_pages,
|
||||
pages_to_remove = min_t(unsigned long,
|
||||
round_down(nr_pages, lvl_pages),
|
||||
nr_pte_to_next_page(pte) * lvl_pages);
|
||||
end_pfn = iov_pfn + pages_to_remove - 1;
|
||||
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
|
||||
|
||||
@@ -612,6 +612,23 @@ static u64 get_iota_region_flag(struct s390_domain *domain)
|
||||
}
|
||||
}
|
||||
|
||||
static bool reg_ioat_propagate_error(int cc, u8 status)
|
||||
{
|
||||
/*
|
||||
* If the device is in the error state the reset routine
|
||||
* will register the IOAT of the newly set domain on re-enable
|
||||
*/
|
||||
if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL)
|
||||
return false;
|
||||
/*
|
||||
* If the device was removed treat registration as success
|
||||
* and let the subsequent error event trigger tear down.
|
||||
*/
|
||||
if (cc == ZPCI_CC_INVAL_HANDLE)
|
||||
return false;
|
||||
return cc != ZPCI_CC_OK;
|
||||
}
|
||||
|
||||
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
|
||||
struct iommu_domain *domain, u8 *status)
|
||||
{
|
||||
@@ -696,7 +713,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
|
||||
|
||||
/* If we fail now DMA remains blocked via blocking domain */
|
||||
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
|
||||
if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
|
||||
if (reg_ioat_propagate_error(cc, status))
|
||||
return -EIO;
|
||||
zdev->dma_table = s390_domain->dma_table;
|
||||
zdev_s390_domain_update(zdev, domain);
|
||||
@@ -1032,7 +1049,8 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
|
||||
|
||||
lockdep_assert_held(&zdev->dom_lock);
|
||||
|
||||
if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
|
||||
if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
|
||||
zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY)
|
||||
return NULL;
|
||||
|
||||
s390_domain = to_s390_domain(zdev->s390_domain);
|
||||
@@ -1123,12 +1141,7 @@ static int s390_attach_dev_identity(struct iommu_domain *domain,
|
||||
|
||||
/* If we fail now DMA remains blocked via blocking domain */
|
||||
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
|
||||
|
||||
/*
|
||||
* If the device is undergoing error recovery the reset code
|
||||
* will re-establish the new domain.
|
||||
*/
|
||||
if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
|
||||
if (reg_ioat_propagate_error(cc, status))
|
||||
return -EIO;
|
||||
|
||||
zdev_s390_domain_update(zdev, domain);
|
||||
|
||||
@@ -133,7 +133,7 @@ struct journal_sector {
|
||||
commit_id_t commit_id;
|
||||
};
|
||||
|
||||
#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
|
||||
#define MAX_TAG_SIZE 255
|
||||
|
||||
#define METADATA_PADDING_SECTORS 8
|
||||
|
||||
|
||||
@@ -3813,8 +3813,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
struct raid_set *rs = ti->private;
|
||||
unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
|
||||
|
||||
limits->io_min = chunk_size_bytes;
|
||||
limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
|
||||
if (chunk_size_bytes) {
|
||||
limits->io_min = chunk_size_bytes;
|
||||
limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
|
||||
}
|
||||
}
|
||||
|
||||
static void raid_presuspend(struct dm_target *ti)
|
||||
|
||||
@@ -456,11 +456,15 @@ static void stripe_io_hints(struct dm_target *ti,
|
||||
struct queue_limits *limits)
|
||||
{
|
||||
struct stripe_c *sc = ti->private;
|
||||
unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
|
||||
unsigned int io_min, io_opt;
|
||||
|
||||
limits->chunk_sectors = sc->chunk_size;
|
||||
limits->io_min = chunk_size;
|
||||
limits->io_opt = chunk_size * sc->stripes;
|
||||
|
||||
if (!check_shl_overflow(sc->chunk_size, SECTOR_SHIFT, &io_min) &&
|
||||
!check_mul_overflow(io_min, sc->stripes, &io_opt)) {
|
||||
limits->io_min = io_min;
|
||||
limits->io_opt = io_opt;
|
||||
}
|
||||
}
|
||||
|
||||
static struct target_type stripe_target = {
|
||||
|
||||
@@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev)
|
||||
md_init_stacking_limits(&lim);
|
||||
lim.max_hw_sectors = mddev->chunk_sectors;
|
||||
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
|
||||
lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
|
||||
lim.io_min = mddev->chunk_sectors << 9;
|
||||
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
|
||||
if (err)
|
||||
|
||||
@@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev)
|
||||
md_init_stacking_limits(&lim);
|
||||
lim.max_hw_sectors = mddev->chunk_sectors;
|
||||
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
|
||||
lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
|
||||
lim.io_min = mddev->chunk_sectors << 9;
|
||||
lim.io_opt = lim.io_min * mddev->raid_disks;
|
||||
lim.chunk_sectors = mddev->chunk_sectors;
|
||||
|
||||
@@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev)
|
||||
|
||||
md_init_stacking_limits(&lim);
|
||||
lim.max_write_zeroes_sectors = 0;
|
||||
lim.max_hw_wzeroes_unmap_sectors = 0;
|
||||
lim.features |= BLK_FEAT_ATOMIC_WRITES;
|
||||
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
|
||||
if (err)
|
||||
|
||||
@@ -4008,6 +4008,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
|
||||
|
||||
md_init_stacking_limits(&lim);
|
||||
lim.max_write_zeroes_sectors = 0;
|
||||
lim.max_hw_wzeroes_unmap_sectors = 0;
|
||||
lim.io_min = mddev->chunk_sectors << 9;
|
||||
lim.chunk_sectors = mddev->chunk_sectors;
|
||||
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user