Merge tag 'irq-drivers-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq controller updates from Thomas Gleixner:
 "Update for interrupt chip drivers:

   - Convert the generic interrupt chip to lock guards to remove copy &
     pasta boilerplate code and gotos.

   - A new driver fot the interrupt controller in the EcoNet EN751221
     MIPS SoC.

   - Extend the SG2042-MSI driver to support the new SG2044 SoC

   - Updates and cleanups for the (ancient) VT8500 driver

   - Improve the scalability of the ARM GICV4.1 ITS driver by utilizing
     node local copies a VM's interrupt translation table when possible.
     This results in a 12% reduction of VM IPI latency in certain
     workloads.

   - The usual cleanups and improvements all over the place"

* tag 'irq-drivers-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
  irqchip/irq-pruss-intc: Simplify chained interrupt handler setup
  irqchip/gic-v4.1: Use local 4_1 ITS to generate VSGI
  irqchip/econet-en751221: Switch to of_fwnode_handle()
  irqchip/irq-vt8500: Switch to irq_domain_create_*()
  irqchip/econet-en751221: Switch to irq_domain_create_linear()
  irqchip/irq-vt8500: Use fewer global variables and add error handling
  irqchip/irq-vt8500: Use a dedicated chained handler function
  irqchip/irq-vt8500: Don't require 8 interrupts from a chained controller
  irqchip/irq-vt8500: Drop redundant copy of the device node pointer
  irqchip/irq-vt8500: Split up ack/mask functions
  irqchip/sg2042-msi: Fix wrong type cast in sg2044_msi_irq_ack()
  irqchip/sg2042-msi: Add the Sophgo SG2044 MSI interrupt controller
  irqchip/sg2042-msi: Introduce configurable chipinfo for SG2042
  irqchip/sg2042-msi: Rename functions and data structures to be SG2042 agnostic
  dt-bindings: interrupt-controller: Add Sophgo SG2044 MSI controller
  genirq/generic-chip: Fix incorrect lock guard conversions
  genirq/generic-chip: Remove unused lock wrappers
  irqchip: Convert generic irqchip locking to guards
  gpio: mvebu: Convert generic irqchip locking to guard()
  ARM: orion/gpio:: Convert generic irqchip locking to guard()
  ...
This commit is contained in:
Linus Torvalds
2025-05-27 08:00:46 -07:00
28 changed files with 683 additions and 316 deletions

View File

@@ -0,0 +1,78 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/interrupt-controller/econet,en751221-intc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: EcoNet EN751221 Interrupt Controller
maintainers:
- Caleb James DeLisle <cjd@cjdns.fr>
description:
The EcoNet EN751221 Interrupt Controller is a simple interrupt controller
designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can
be routed to either VPE but not both, so to support per-CPU interrupts, a
secondary IRQ number is allocated to control masking/unmasking on VPE#1. For
lack of a better term we call these "shadow interrupts". The assignment of
shadow interrupts is defined by the SoC integrator when wiring the interrupt
lines, so they are configurable in the device tree.
allOf:
- $ref: /schemas/interrupt-controller.yaml#
properties:
compatible:
const: econet,en751221-intc
reg:
maxItems: 1
"#interrupt-cells":
const: 1
interrupt-controller: true
interrupts:
maxItems: 1
description: Interrupt line connecting this controller to its parent.
econet,shadow-interrupts:
$ref: /schemas/types.yaml#/definitions/uint32-matrix
description:
An array of interrupt number pairs where each pair represents a shadow
interrupt relationship. The first number in each pair is the primary IRQ,
and the second is its shadow IRQ used for VPE#1 control. For example,
<8 3> means IRQ 8 is shadowed by IRQ 3, so IRQ 3 cannot be mapped, but
when VPE#1 requests IRQ 8, it will manipulate the IRQ 3 mask bit.
minItems: 1
maxItems: 20
items:
items:
- description: primary per-CPU IRQ
- description: shadow IRQ number
required:
- compatible
- reg
- interrupt-controller
- "#interrupt-cells"
- interrupts
additionalProperties: false
examples:
- |
interrupt-controller@1fb40000 {
compatible = "econet,en751221-intc";
reg = <0x1fb40000 0x100>;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&cpuintc>;
interrupts = <2>;
econet,shadow-interrupts = <7 2>, <8 3>, <13 12>, <30 29>;
};
...

View File

@@ -18,7 +18,9 @@ allOf:
properties:
compatible:
const: sophgo,sg2042-msi
enum:
- sophgo,sg2042-msi
- sophgo,sg2044-msi
reg:
items:

View File

@@ -496,11 +496,10 @@ static void orion_gpio_unmask_irq(struct irq_data *d)
u32 reg_val;
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
reg_val = irq_reg_readl(gc, ct->regs.mask);
reg_val |= mask;
irq_reg_writel(gc, reg_val, ct->regs.mask);
irq_gc_unlock(gc);
}
static void orion_gpio_mask_irq(struct irq_data *d)
@@ -510,11 +509,10 @@ static void orion_gpio_mask_irq(struct irq_data *d)
u32 mask = d->mask;
u32 reg_val;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
reg_val = irq_reg_readl(gc, ct->regs.mask);
reg_val &= ~mask;
irq_reg_writel(gc, reg_val, ct->regs.mask);
irq_gc_unlock(gc);
}
void __init orion_gpio_init(int gpio_base, int ngpio,

View File

@@ -408,9 +408,8 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
struct mvebu_gpio_chip *mvchip = gc->private;
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
mvebu_gpio_write_edge_cause(mvchip, ~mask);
irq_gc_unlock(gc);
}
static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
@@ -420,10 +419,9 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv &= ~mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
}
static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
@@ -433,11 +431,10 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
mvebu_gpio_write_edge_cause(mvchip, ~mask);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
}
static void mvebu_gpio_level_irq_mask(struct irq_data *d)
@@ -447,10 +444,9 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv &= ~mask;
mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
}
static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
@@ -460,10 +456,9 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
}
/*****************************************************************************

View File

@@ -166,6 +166,11 @@ config DW_APB_ICTL
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN_HIERARCHY
config ECONET_EN751221_INTC
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
config FARADAY_FTINTC010
bool
select IRQ_DOMAIN

View File

@@ -10,6 +10,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o
obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o
obj-$(CONFIG_ECONET_EN751221_INTC) += irq-econet-en751221.o
obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o

View File

@@ -65,15 +65,13 @@ static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct al_fic *fic = gc->private;
enum al_fic_state new_state;
int ret = 0;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) &&
((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) {
pr_debug("fic doesn't support flow type %d\n", flow_type);
ret = -EINVAL;
goto err;
return -EINVAL;
}
new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ?
@@ -91,16 +89,10 @@ static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type)
if (fic->state == AL_FIC_UNCONFIGURED) {
al_fic_set_trigger(fic, gc, new_state);
} else if (fic->state != new_state) {
pr_debug("fic %s state already configured to %d\n",
fic->name, fic->state);
ret = -EINVAL;
goto err;
pr_debug("fic %s state already configured to %d\n", fic->name, fic->state);
return -EINVAL;
}
err:
irq_gc_unlock(gc);
return ret;
return 0;
}
static void al_fic_irq_handler(struct irq_desc *desc)

View File

@@ -78,9 +78,8 @@ static int aic_retrigger(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
/* Enable interrupt on AIC5 */
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
irq_gc_unlock(gc);
return 1;
}
@@ -106,30 +105,27 @@ static void aic_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR);
irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR);
irq_gc_unlock(gc);
}
static void aic_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR);
irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR);
irq_gc_unlock(gc);
}
static void aic_pm_shutdown(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
irq_gc_unlock(gc);
}
#else
#define aic_suspend NULL
@@ -175,10 +171,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
{
struct irq_domain_chip_generic *dgc = d->gc;
struct irq_chip_generic *gc;
unsigned long flags;
unsigned smr;
int idx;
int ret;
int idx, ret;
if (!dgc)
return -EINVAL;
@@ -194,11 +188,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
gc = dgc->gc[idx];
irq_gc_lock_irqsave(gc, flags);
guard(raw_spinlock_irq)(&gc->lock);
smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
irq_gc_unlock_irqrestore(gc, flags);
return ret;
}

View File

@@ -92,11 +92,10 @@ static void aic5_mask(struct irq_data *d)
* Disable interrupt on AIC5. We always take the lock of the
* first irq chip as all chips share the same registers.
*/
irq_gc_lock(bgc);
guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
gc->mask_cache &= ~d->mask;
irq_gc_unlock(bgc);
}
static void aic5_unmask(struct irq_data *d)
@@ -109,11 +108,10 @@ static void aic5_unmask(struct irq_data *d)
* Enable interrupt on AIC5. We always take the lock of the
* first irq chip as all chips share the same registers.
*/
irq_gc_lock(bgc);
guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(gc, 1, AT91_AIC5_IECR);
gc->mask_cache |= d->mask;
irq_gc_unlock(bgc);
}
static int aic5_retrigger(struct irq_data *d)
@@ -122,11 +120,9 @@ static int aic5_retrigger(struct irq_data *d)
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
/* Enable interrupt on AIC5 */
irq_gc_lock(bgc);
guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
irq_gc_unlock(bgc);
return 1;
}
@@ -137,14 +133,12 @@ static int aic5_set_type(struct irq_data *d, unsigned type)
unsigned int smr;
int ret;
irq_gc_lock(bgc);
guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
ret = aic_common_set_type(d, type, &smr);
if (!ret)
irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
irq_gc_unlock(bgc);
return ret;
}
@@ -166,7 +160,7 @@ static void aic5_suspend(struct irq_data *d)
smr_cache[i] = irq_reg_readl(bgc, AT91_AIC5_SMR);
}
irq_gc_lock(bgc);
guard(raw_spinlock)(&bgc->lock);
for (i = 0; i < dgc->irqs_per_chip; i++) {
mask = 1 << i;
if ((mask & gc->mask_cache) == (mask & gc->wake_active))
@@ -178,7 +172,6 @@ static void aic5_suspend(struct irq_data *d)
else
irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
}
irq_gc_unlock(bgc);
}
static void aic5_resume(struct irq_data *d)
@@ -190,7 +183,7 @@ static void aic5_resume(struct irq_data *d)
int i;
u32 mask;
irq_gc_lock(bgc);
guard(raw_spinlock)(&bgc->lock);
if (smr_cache) {
irq_reg_writel(bgc, 0xffffffff, AT91_AIC5_SPU);
@@ -214,7 +207,6 @@ static void aic5_resume(struct irq_data *d)
else
irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
}
irq_gc_unlock(bgc);
}
static void aic5_pm_shutdown(struct irq_data *d)
@@ -225,13 +217,12 @@ static void aic5_pm_shutdown(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
int i;
irq_gc_lock(bgc);
guard(raw_spinlock)(&bgc->lock);
for (i = 0; i < dgc->irqs_per_chip; i++) {
irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
irq_reg_writel(bgc, 1, AT91_AIC5_ICCR);
}
irq_gc_unlock(bgc);
}
#else
#define aic5_suspend NULL
@@ -277,7 +268,6 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
unsigned int *out_type)
{
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
unsigned long flags;
unsigned smr;
int ret;
@@ -289,13 +279,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
if (ret)
return ret;
irq_gc_lock_irqsave(bgc, flags);
guard(raw_spinlock_irq)(&bgc->lock);
irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
irq_gc_unlock_irqrestore(bgc, flags);
return ret;
}

View File

@@ -63,16 +63,15 @@ static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc)
for (idx = 0; idx < b->n_words; idx++) {
int base = idx * IRQS_PER_WORD;
struct irq_chip_generic *gc =
irq_get_domain_generic_chip(b->domain, base);
struct irq_chip_generic *gc;
unsigned long pending;
int hwirq;
irq_gc_lock(gc);
pending = irq_reg_readl(gc, b->stat_offset[idx]) &
gc->mask_cache &
data->irq_map_mask[idx];
irq_gc_unlock(gc);
gc = irq_get_domain_generic_chip(b->domain, base);
scoped_guard (raw_spinlock, &gc->lock) {
pending = irq_reg_readl(gc, b->stat_offset[idx]) & gc->mask_cache &
data->irq_map_mask[idx];
}
for_each_set_bit(hwirq, &pending, IRQS_PER_WORD)
generic_handle_domain_irq(b->domain, base + hwirq);
@@ -86,11 +85,9 @@ static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc)
struct bcm7120_l2_intc_data *b = gc->private;
struct irq_chip_type *ct = gc->chip_types;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
if (b->can_wake)
irq_reg_writel(gc, gc->mask_cache | gc->wake_active,
ct->regs.mask);
irq_gc_unlock(gc);
irq_reg_writel(gc, gc->mask_cache | gc->wake_active, ct->regs.mask);
}
static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
@@ -98,9 +95,8 @@ static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
struct irq_chip_type *ct = gc->chip_types;
/* Restore the saved mask */
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, gc->mask_cache, ct->regs.mask);
irq_gc_unlock(gc);
}
static int bcm7120_l2_intc_init_one(struct device_node *dn,

View File

@@ -97,9 +97,8 @@ static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
unsigned long flags;
irq_gc_lock_irqsave(gc, flags);
guard(raw_spinlock_irqsave)(&gc->lock);
/* Save the current mask */
if (save)
b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
@@ -109,7 +108,6 @@ static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save)
irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
}
irq_gc_unlock_irqrestore(gc, flags);
}
static void brcmstb_l2_intc_shutdown(struct irq_data *d)
@@ -127,9 +125,8 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
unsigned long flags;
irq_gc_lock_irqsave(gc, flags);
guard(raw_spinlock_irqsave)(&gc->lock);
if (ct->chip.irq_ack) {
/* Clear unmasked non-wakeup interrupts */
irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
@@ -139,7 +136,6 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
/* Restore the saved mask */
irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
irq_gc_unlock_irqrestore(gc, flags);
}
static int __init brcmstb_l2_intc_of_init(struct device_node *np,

View File

@@ -50,11 +50,10 @@ static void irq_ck_mask_set_bit(struct irq_data *d)
unsigned long ifr = ct->regs.mask - 8;
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
*ct->mask_cache |= mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
irq_reg_writel(gc, irq_reg_readl(gc, ifr) & ~mask, ifr);
irq_gc_unlock(gc);
}
static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base,

View File

@@ -101,10 +101,9 @@ static void dw_apb_ictl_resume(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
writel_relaxed(~0, gc->reg_base + ct->regs.enable);
writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask);
irq_gc_unlock(gc);
}
#else
#define dw_apb_ictl_resume NULL

View File

@@ -0,0 +1,310 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* EN751221 Interrupt Controller Driver.
*
* The EcoNet EN751221 Interrupt Controller is a simple interrupt controller
* designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can
* be routed to either VPE but not both, so to support per-CPU interrupts, a
* secondary IRQ number is allocated to control masking/unmasking on VPE#1. In
* this driver, these are called "shadow interrupts". The assignment of shadow
* interrupts is defined by the SoC integrator when wiring the interrupt lines,
* so they are configurable in the device tree.
*
* If an interrupt (say 30) needs per-CPU capability, the SoC integrator
* allocates another IRQ number (say 29) to be its shadow. The device tree
* reflects this by adding the pair <30 29> to the "econet,shadow-interrupts"
* property.
*
* When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29,
* telling the hardware to mask VPE#1's view of IRQ 30.
*
* Copyright (C) 2025 Caleb James DeLisle <cjd@cjdns.fr>
*/
#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#define IRQ_COUNT 40
#define NOT_PERCPU 0xff
#define IS_SHADOW 0xfe
#define REG_MASK0 0x04
#define REG_MASK1 0x50
#define REG_PENDING0 0x08
#define REG_PENDING1 0x54
/**
* @membase: Base address of the interrupt controller registers
* @interrupt_shadows: Array of all interrupts, for each value,
* - NOT_PERCPU: This interrupt is not per-cpu, so it has no shadow
* - IS_SHADOW: This interrupt is a shadow of another per-cpu interrupt
* - else: This is a per-cpu interrupt whose shadow is the value
*/
static struct {
void __iomem *membase;
u8 interrupt_shadows[IRQ_COUNT];
} econet_intc __ro_after_init;
static DEFINE_RAW_SPINLOCK(irq_lock);
/* IRQs must be disabled */
static void econet_wreg(u32 reg, u32 val, u32 mask)
{
u32 v;
guard(raw_spinlock)(&irq_lock);
v = ioread32(econet_intc.membase + reg);
v &= ~mask;
v |= val & mask;
iowrite32(v, econet_intc.membase + reg);
}
/* IRQs must be disabled */
static void econet_chmask(u32 hwirq, bool unmask)
{
u32 reg, mask;
u8 shadow;
/*
* If the IRQ is a shadow, it should never be manipulated directly.
* It should only be masked/unmasked as a result of the "real" per-cpu
* irq being manipulated by a thread running on VPE#1.
* If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask.
* This is single processor only, so smp_processor_id() never exceeds 1.
*/
shadow = econet_intc.interrupt_shadows[hwirq];
if (WARN_ON_ONCE(shadow == IS_SHADOW))
return;
else if (shadow != NOT_PERCPU && smp_processor_id() == 1)
hwirq = shadow;
if (hwirq >= 32) {
reg = REG_MASK1;
mask = BIT(hwirq - 32);
} else {
reg = REG_MASK0;
mask = BIT(hwirq);
}
econet_wreg(reg, unmask ? mask : 0, mask);
}
/* IRQs must be disabled */
static void econet_intc_mask(struct irq_data *d)
{
econet_chmask(d->hwirq, false);
}
/* IRQs must be disabled */
static void econet_intc_unmask(struct irq_data *d)
{
econet_chmask(d->hwirq, true);
}
static void econet_mask_all(void)
{
/* IRQs are generally disabled during init, but guarding here makes it non-obligatory. */
guard(irqsave)();
econet_wreg(REG_MASK0, 0, ~0);
econet_wreg(REG_MASK1, 0, ~0);
}
static void econet_intc_handle_pending(struct irq_domain *d, u32 pending, u32 offset)
{
int hwirq;
while (pending) {
hwirq = fls(pending) - 1;
generic_handle_domain_irq(d, hwirq + offset);
pending &= ~BIT(hwirq);
}
}
static void econet_intc_from_parent(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *domain;
u32 pending0, pending1;
chained_irq_enter(chip, desc);
pending0 = ioread32(econet_intc.membase + REG_PENDING0);
pending1 = ioread32(econet_intc.membase + REG_PENDING1);
if (unlikely(!(pending0 | pending1))) {
spurious_interrupt();
} else {
domain = irq_desc_get_handler_data(desc);
econet_intc_handle_pending(domain, pending0, 0);
econet_intc_handle_pending(domain, pending1, 32);
}
chained_irq_exit(chip, desc);
}
static const struct irq_chip econet_irq_chip;
static int econet_intc_map(struct irq_domain *d, u32 irq, irq_hw_number_t hwirq)
{
int ret;
if (hwirq >= IRQ_COUNT) {
pr_err("%s: hwirq %lu out of range\n", __func__, hwirq);
return -EINVAL;
} else if (econet_intc.interrupt_shadows[hwirq] == IS_SHADOW) {
pr_err("%s: can't map hwirq %lu, it is a shadow interrupt\n", __func__, hwirq);
return -EINVAL;
}
if (econet_intc.interrupt_shadows[hwirq] == NOT_PERCPU) {
irq_set_chip_and_handler(irq, &econet_irq_chip, handle_level_irq);
} else {
irq_set_chip_and_handler(irq, &econet_irq_chip, handle_percpu_devid_irq);
ret = irq_set_percpu_devid(irq);
if (ret)
pr_warn("%s: Failed irq_set_percpu_devid for %u: %d\n", d->name, irq, ret);
}
irq_set_chip_data(irq, NULL);
return 0;
}
static const struct irq_chip econet_irq_chip = {
.name = "en751221-intc",
.irq_unmask = econet_intc_unmask,
.irq_mask = econet_intc_mask,
.irq_mask_ack = econet_intc_mask,
};
static const struct irq_domain_ops econet_domain_ops = {
.xlate = irq_domain_xlate_onecell,
.map = econet_intc_map
};
static int __init get_shadow_interrupts(struct device_node *node)
{
const char *field = "econet,shadow-interrupts";
int num_shadows;
num_shadows = of_property_count_u32_elems(node, field);
memset(econet_intc.interrupt_shadows, NOT_PERCPU,
sizeof(econet_intc.interrupt_shadows));
if (num_shadows <= 0) {
return 0;
} else if (num_shadows % 2) {
pr_err("%pOF: %s count is odd, ignoring\n", node, field);
return 0;
}
u32 *shadows __free(kfree) = kmalloc_array(num_shadows, sizeof(u32), GFP_KERNEL);
if (!shadows)
return -ENOMEM;
if (of_property_read_u32_array(node, field, shadows, num_shadows)) {
pr_err("%pOF: Failed to read %s\n", node, field);
return -EINVAL;
}
for (int i = 0; i < num_shadows; i += 2) {
u32 shadow = shadows[i + 1];
u32 target = shadows[i];
if (shadow > IRQ_COUNT) {
pr_err("%pOF: %s[%d] shadow(%d) out of range\n",
node, field, i + 1, shadow);
continue;
}
if (target >= IRQ_COUNT) {
pr_err("%pOF: %s[%d] target(%d) out of range\n", node, field, i, target);
continue;
}
if (econet_intc.interrupt_shadows[target] != NOT_PERCPU) {
pr_err("%pOF: %s[%d] target(%d) already has a shadow\n",
node, field, i, target);
continue;
}
if (econet_intc.interrupt_shadows[shadow] != NOT_PERCPU) {
pr_err("%pOF: %s[%d] shadow(%d) already has a target\n",
node, field, i + 1, shadow);
continue;
}
econet_intc.interrupt_shadows[target] = shadow;
econet_intc.interrupt_shadows[shadow] = IS_SHADOW;
}
return 0;
}
static int __init econet_intc_of_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *domain;
struct resource res;
int ret, irq;
ret = get_shadow_interrupts(node);
if (ret)
return ret;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("%pOF: DT: Failed to get IRQ from 'interrupts'\n", node);
return -EINVAL;
}
if (of_address_to_resource(node, 0, &res)) {
pr_err("%pOF: DT: Failed to get 'reg'\n", node);
ret = -EINVAL;
goto err_dispose_mapping;
}
if (!request_mem_region(res.start, resource_size(&res), res.name)) {
pr_err("%pOF: Failed to request memory\n", node);
ret = -EBUSY;
goto err_dispose_mapping;
}
econet_intc.membase = ioremap(res.start, resource_size(&res));
if (!econet_intc.membase) {
pr_err("%pOF: Failed to remap membase\n", node);
ret = -ENOMEM;
goto err_release;
}
econet_mask_all();
domain = irq_domain_create_linear(of_fwnode_handle(node), IRQ_COUNT,
&econet_domain_ops, NULL);
if (!domain) {
pr_err("%pOF: Failed to add irqdomain\n", node);
ret = -ENOMEM;
goto err_unmap;
}
irq_set_chained_handler_and_data(irq, econet_intc_from_parent, domain);
return 0;
err_unmap:
iounmap(econet_intc.membase);
err_release:
release_mem_region(res.start, resource_size(&res));
err_dispose_mapping:
irq_dispose_mapping(irq);
return ret;
}
IRQCHIP_DECLARE(econet_en751221_intc, "econet,en751221-intc", econet_intc_of_init);

View File

@@ -125,6 +125,8 @@ struct its_node {
int vlpi_redist_offset;
};
static DEFINE_PER_CPU(struct its_node *, local_4_1_its);
#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
@@ -2778,6 +2780,7 @@ static u64 inherit_vpe_l1_table_from_its(void)
}
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
*this_cpu_ptr(&local_4_1_its) = its;
return val;
}
@@ -2815,6 +2818,7 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
*mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
*this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu);
return val;
}
@@ -4180,7 +4184,7 @@ static struct irq_chip its_vpe_irq_chip = {
static struct its_node *find_4_1_its(void)
{
static struct its_node *its = NULL;
struct its_node *its = *this_cpu_ptr(&local_4_1_its);
if (!its) {
list_for_each_entry(its, &its_nodes, entry) {

View File

@@ -52,11 +52,10 @@ static void ingenic_tcu_gc_unmask_enable_reg(struct irq_data *d)
struct regmap *map = gc->private;
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
regmap_write(map, ct->regs.ack, mask);
regmap_write(map, ct->regs.enable, mask);
*ct->mask_cache |= mask;
irq_gc_unlock(gc);
}
static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d)
@@ -66,10 +65,9 @@ static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d)
struct regmap *map = gc->private;
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
regmap_write(map, ct->regs.disable, mask);
*ct->mask_cache &= ~mask;
irq_gc_unlock(gc);
}
static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d)
@@ -79,10 +77,9 @@ static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d)
struct regmap *map = gc->private;
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
regmap_write(map, ct->regs.ack, mask);
regmap_write(map, ct->regs.disable, mask);
irq_gc_unlock(gc);
}
static int __init ingenic_tcu_irq_init(struct device_node *np,

View File

@@ -71,14 +71,12 @@ static unsigned int lan966x_oic_irq_startup(struct irq_data *data)
struct lan966x_oic_chip_regs *chip_regs = gc->private;
u32 map;
irq_gc_lock(gc);
/* Map the source interrupt to the destination */
map = irq_reg_readl(gc, chip_regs->reg_off_map);
map |= data->mask;
irq_reg_writel(gc, map, chip_regs->reg_off_map);
irq_gc_unlock(gc);
scoped_guard (raw_spinlock, &gc->lock) {
/* Map the source interrupt to the destination */
map = irq_reg_readl(gc, chip_regs->reg_off_map);
map |= data->mask;
irq_reg_writel(gc, map, chip_regs->reg_off_map);
}
ct->chip.irq_ack(data);
ct->chip.irq_unmask(data);
@@ -95,14 +93,12 @@ static void lan966x_oic_irq_shutdown(struct irq_data *data)
ct->chip.irq_mask(data);
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
/* Unmap the interrupt */
map = irq_reg_readl(gc, chip_regs->reg_off_map);
map &= ~data->mask;
irq_reg_writel(gc, map, chip_regs->reg_off_map);
irq_gc_unlock(gc);
}
static int lan966x_oic_irq_set_type(struct irq_data *data,

View File

@@ -116,9 +116,8 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
u32 mask = data->mask;
unsigned long flags;
irq_gc_lock_irqsave(gc, flags);
guard(raw_spinlock)(&gc->lock);
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
@@ -137,10 +136,8 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
break;
default:
irq_gc_unlock_irqrestore(gc, flags);
return -EINVAL;
}
irq_gc_unlock_irqrestore(gc, flags);
irqd_set_trigger_type(data, type);
return 0;
@@ -157,10 +154,9 @@ static void liointc_suspend(struct irq_chip_generic *gc)
static void liointc_resume(struct irq_chip_generic *gc)
{
struct liointc_priv *priv = gc->private;
unsigned long flags;
int i;
irq_gc_lock_irqsave(gc, flags);
guard(raw_spinlock_irqsave)(&gc->lock);
/* Disable all at first */
writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE);
/* Restore map cache */
@@ -170,7 +166,6 @@ static void liointc_resume(struct irq_chip_generic *gc)
writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE);
/* Restore mask cache */
writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
irq_gc_unlock_irqrestore(gc, flags);
}
static int parent_irq[LIOINTC_NUM_PARENT];

View File

@@ -83,7 +83,7 @@ static void ocelot_irq_unmask(struct irq_data *data)
unsigned int mask = data->mask;
u32 val;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
/*
* Clear sticky bits for edge mode interrupts.
* Serval has only one trigger register replication, but the adjacent
@@ -97,7 +97,6 @@ static void ocelot_irq_unmask(struct irq_data *data)
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, mask, p->reg_off_ena_set);
irq_gc_unlock(gc);
}
static void ocelot_irq_handler(struct irq_desc *desc)

View File

@@ -581,8 +581,7 @@ static int pruss_intc_probe(struct platform_device *pdev)
host_data->intc = intc;
host_data->host_irq = i;
irq_set_handler_data(irq, host_data);
irq_set_chained_handler(irq, pruss_intc_irq_handler);
irq_set_chained_handler_and_data(irq, pruss_intc_irq_handler, host_data);
}
return 0;

View File

@@ -19,21 +19,36 @@
#include "irq-msi-lib.h"
#define SG2042_MAX_MSI_VECTOR 32
struct sg2042_msi_chipdata {
void __iomem *reg_clr; // clear reg, see TRM, 10.1.33, GP_INTR0_CLR
phys_addr_t doorbell_addr; // see TRM, 10.1.32, GP_INTR0_SET
u32 irq_first; // The vector number that MSIs starts
u32 num_irqs; // The number of vectors for MSIs
DECLARE_BITMAP(msi_map, SG2042_MAX_MSI_VECTOR);
struct mutex msi_map_lock; // lock for msi_map
struct sg204x_msi_chip_info {
const struct irq_chip *irqchip;
const struct msi_parent_ops *parent_ops;
};
static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_req)
/**
* struct sg204x_msi_chipdata - chip data for the SG204x MSI IRQ controller
* @reg_clr: clear reg, see TRM, 10.1.33, GP_INTR0_CLR
* @doorbell_addr: see TRM, 10.1.32, GP_INTR0_SET
* @irq_first: First vectors number that MSIs starts
* @num_irqs: Number of vectors for MSIs
* @msi_map: mapping for allocated MSI vectors.
* @msi_map_lock: Lock for msi_map
* @chip_info: chip specific infomations
*/
struct sg204x_msi_chipdata {
void __iomem *reg_clr;
phys_addr_t doorbell_addr;
u32 irq_first;
u32 num_irqs;
unsigned long *msi_map;
struct mutex msi_map_lock;
const struct sg204x_msi_chip_info *chip_info;
};
static int sg204x_msi_allocate_hwirq(struct sg204x_msi_chipdata *data, int num_req)
{
int first;
@@ -43,7 +58,7 @@ static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_r
return first >= 0 ? first : -ENOSPC;
}
static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, int num_req)
static void sg204x_msi_free_hwirq(struct sg204x_msi_chipdata *data, int hwirq, int num_req)
{
guard(mutex)(&data->msi_map_lock);
bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req));
@@ -51,7 +66,7 @@ static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, i
static void sg2042_msi_irq_ack(struct irq_data *d)
{
struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
int bit_off = d->hwirq;
writel(1 << bit_off, data->reg_clr);
@@ -61,7 +76,7 @@ static void sg2042_msi_irq_ack(struct irq_data *d)
static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
msg->address_hi = upper_32_bits(data->doorbell_addr);
msg->address_lo = lower_32_bits(data->doorbell_addr);
@@ -79,9 +94,38 @@ static const struct irq_chip sg2042_msi_middle_irq_chip = {
.irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg,
};
static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq)
static void sg2044_msi_irq_ack(struct irq_data *d)
{
struct sg2042_msi_chipdata *data = domain->host_data;
struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
writel(0, (u32 __iomem *)data->reg_clr + d->hwirq);
irq_chip_ack_parent(d);
}
static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
phys_addr_t doorbell = data->doorbell_addr + 4 * (d->hwirq / 32);
msg->address_lo = lower_32_bits(doorbell);
msg->address_hi = upper_32_bits(doorbell);
msg->data = d->hwirq % 32;
}
static struct irq_chip sg2044_msi_middle_irq_chip = {
.name = "SG2044 MSI",
.irq_ack = sg2044_msi_irq_ack,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
.irq_compose_msi_msg = sg2044_msi_irq_compose_msi_msg,
};
static int sg204x_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq)
{
struct sg204x_msi_chipdata *data = domain->host_data;
struct irq_fwspec fwspec;
struct irq_data *d;
int ret;
@@ -99,47 +143,45 @@ static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned in
return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
}
static int sg2042_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
static int sg204x_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
struct sg2042_msi_chipdata *data = domain->host_data;
struct sg204x_msi_chipdata *data = domain->host_data;
int hwirq, err, i;
hwirq = sg2042_msi_allocate_hwirq(data, nr_irqs);
hwirq = sg204x_msi_allocate_hwirq(data, nr_irqs);
if (hwirq < 0)
return hwirq;
for (i = 0; i < nr_irqs; i++) {
err = sg2042_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
err = sg204x_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
if (err)
goto err_hwirq;
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
&sg2042_msi_middle_irq_chip, data);
data->chip_info->irqchip, data);
}
return 0;
err_hwirq:
sg2042_msi_free_hwirq(data, hwirq, nr_irqs);
sg204x_msi_free_hwirq(data, hwirq, nr_irqs);
irq_domain_free_irqs_parent(domain, virq, i);
return err;
}
static void sg2042_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq,
static void sg204x_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
sg2042_msi_free_hwirq(data, d->hwirq, nr_irqs);
sg204x_msi_free_hwirq(data, d->hwirq, nr_irqs);
}
static const struct irq_domain_ops sg2042_msi_middle_domain_ops = {
.alloc = sg2042_msi_middle_domain_alloc,
.free = sg2042_msi_middle_domain_free,
static const struct irq_domain_ops sg204x_msi_middle_domain_ops = {
.alloc = sg204x_msi_middle_domain_alloc,
.free = sg204x_msi_middle_domain_free,
.select = msi_lib_irq_domain_select,
};
@@ -158,14 +200,30 @@ static const struct msi_parent_ops sg2042_msi_parent_ops = {
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data,
#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS)
#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
MSI_FLAG_PCI_MSIX)
static const struct msi_parent_ops sg2044_msi_parent_ops = {
.required_flags = SG2044_MSI_FLAGS_REQUIRED,
.supported_flags = SG2044_MSI_FLAGS_SUPPORTED,
.chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PCI_MSI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.prefix = "SG2044-",
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int sg204x_msi_init_domains(struct sg204x_msi_chipdata *data,
struct irq_domain *plic_domain, struct device *dev)
{
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct irq_domain *middle_domain;
middle_domain = irq_domain_create_hierarchy(plic_domain, 0, data->num_irqs, fwnode,
&sg2042_msi_middle_domain_ops, data);
&sg204x_msi_middle_domain_ops, data);
if (!middle_domain) {
pr_err("Failed to create the MSI middle domain\n");
return -ENOMEM;
@@ -174,24 +232,29 @@ static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data,
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
middle_domain->msi_parent_ops = &sg2042_msi_parent_ops;
middle_domain->msi_parent_ops = data->chip_info->parent_ops;
return 0;
}
static int sg2042_msi_probe(struct platform_device *pdev)
{
struct fwnode_reference_args args = { };
struct sg2042_msi_chipdata *data;
struct sg204x_msi_chipdata *data;
struct device *dev = &pdev->dev;
struct irq_domain *plic_domain;
struct resource *res;
int ret;
data = devm_kzalloc(dev, sizeof(struct sg2042_msi_chipdata), GFP_KERNEL);
data = devm_kzalloc(dev, sizeof(struct sg204x_msi_chipdata), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->chip_info = device_get_match_data(&pdev->dev);
if (!data->chip_info) {
dev_err(&pdev->dev, "Failed to get irqchip\n");
return -EINVAL;
}
data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr");
if (IS_ERR(data->reg_clr)) {
dev_err(dev, "Failed to map clear register\n");
@@ -232,11 +295,28 @@ static int sg2042_msi_probe(struct platform_device *pdev)
mutex_init(&data->msi_map_lock);
return sg2042_msi_init_domains(data, plic_domain, dev);
data->msi_map = devm_bitmap_zalloc(&pdev->dev, data->num_irqs, GFP_KERNEL);
if (!data->msi_map) {
dev_err(&pdev->dev, "Unable to allocate msi mapping\n");
return -ENOMEM;
}
return sg204x_msi_init_domains(data, plic_domain, dev);
}
static const struct sg204x_msi_chip_info sg2042_chip_info = {
.irqchip = &sg2042_msi_middle_irq_chip,
.parent_ops = &sg2042_msi_parent_ops,
};
static const struct sg204x_msi_chip_info sg2044_chip_info = {
.irqchip = &sg2044_msi_middle_irq_chip,
.parent_ops = &sg2044_msi_parent_ops,
};
static const struct of_device_id sg2042_msi_of_match[] = {
{ .compatible = "sophgo,sg2042-msi" },
{ .compatible = "sophgo,sg2042-msi", .data = &sg2042_chip_info },
{ .compatible = "sophgo,sg2044-msi", .data = &sg2044_chip_info },
{ }
};

View File

@@ -169,22 +169,18 @@ static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
u32 rtsr, ftsr;
int err;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
if (err)
goto unlock;
return err;
irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
unlock:
irq_gc_unlock(gc);
return err;
return 0;
}
static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data,
@@ -217,18 +213,16 @@ static void stm32_irq_suspend(struct irq_chip_generic *gc)
{
struct stm32_exti_chip_data *chip_data = gc->private;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
stm32_chip_suspend(chip_data, gc->wake_active);
irq_gc_unlock(gc);
}
static void stm32_irq_resume(struct irq_chip_generic *gc)
{
struct stm32_exti_chip_data *chip_data = gc->private;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
stm32_chip_resume(chip_data, gc->mask_cache);
irq_gc_unlock(gc);
}
static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
@@ -265,11 +259,8 @@ static void stm32_irq_ack(struct irq_data *d)
struct stm32_exti_chip_data *chip_data = gc->private;
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst);
irq_gc_unlock(gc);
}
static struct

View File

@@ -111,7 +111,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
unsigned int src_type;
unsigned int i;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
switch (flow_type & IRQF_TRIGGER_MASK) {
case IRQ_TYPE_EDGE_FALLING:
@@ -128,9 +128,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
break;
default:
irq_gc_unlock(gc);
pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
data->irq);
pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", data->irq);
return -EBADR;
}
@@ -145,9 +143,6 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
src_type_reg |= src_type;
sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
irq_gc_unlock(gc);
return IRQ_SET_MASK_OK;
}

View File

@@ -41,11 +41,9 @@ static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg)
static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
uint32_t im, mod, pol;
uint32_t mod, pol, im = data->mask;
im = data->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im;
pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im;
@@ -67,9 +65,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
case IRQ_TYPE_EDGE_RISING:
break;
default:
irq_gc_unlock(gc);
pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
__func__, data->irq);
pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", __func__, data->irq);
return -EBADR;
}
@@ -79,9 +75,6 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod);
ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol);
ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im);
irq_gc_unlock(gc);
return IRQ_SET_MASK_OK;
}

View File

@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
@@ -63,29 +64,28 @@ struct vt8500_irq_data {
struct irq_domain *domain; /* Domain for this controller */
};
/* Global variable for accessing io-mem addresses */
static struct vt8500_irq_data intc[VT8500_INTC_MAX];
static u32 active_cnt = 0;
/* Primary interrupt controller data */
static struct vt8500_irq_data *primary_intc;
static void vt8500_irq_ack(struct irq_data *d)
{
struct vt8500_irq_data *priv = d->domain->host_data;
void __iomem *base = priv->base;
void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4);
u32 status = (1 << (d->hwirq & 0x1f));
writel(status, stat_reg);
}
static void vt8500_irq_mask(struct irq_data *d)
{
struct vt8500_irq_data *priv = d->domain->host_data;
void __iomem *base = priv->base;
void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4);
u8 edge, dctr;
u32 status;
u8 dctr;
edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE;
if (edge) {
status = readl(stat_reg);
status |= (1 << (d->hwirq & 0x1f));
writel(status, stat_reg);
} else {
dctr = readb(base + VT8500_ICDC + d->hwirq);
dctr &= ~VT8500_INT_ENABLE;
writeb(dctr, base + VT8500_ICDC + d->hwirq);
}
dctr = readb(base + VT8500_ICDC + d->hwirq);
dctr &= ~VT8500_INT_ENABLE;
writeb(dctr, base + VT8500_ICDC + d->hwirq);
}
static void vt8500_irq_unmask(struct irq_data *d)
@@ -130,11 +130,11 @@ static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
}
static struct irq_chip vt8500_irq_chip = {
.name = "vt8500",
.irq_ack = vt8500_irq_mask,
.irq_mask = vt8500_irq_mask,
.irq_unmask = vt8500_irq_unmask,
.irq_set_type = vt8500_irq_set_type,
.name = "vt8500",
.irq_ack = vt8500_irq_ack,
.irq_mask = vt8500_irq_mask,
.irq_unmask = vt8500_irq_unmask,
.irq_set_type = vt8500_irq_set_type,
};
static void __init vt8500_init_irq_hw(void __iomem *base)
@@ -163,82 +163,89 @@ static const struct irq_domain_ops vt8500_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
static inline void vt8500_handle_irq_common(struct vt8500_irq_data *intc)
{
unsigned long irqnr = readl_relaxed(intc->base) & 0x3F;
unsigned long stat;
/*
* Highest Priority register default = 63, so check that this
* is a real interrupt by checking the status register
*/
if (irqnr == 63) {
stat = readl_relaxed(intc->base + VT8500_ICIS + 4);
if (!(stat & BIT(31)))
return;
}
generic_handle_domain_irq(intc->domain, irqnr);
}
static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
{
u32 stat, i;
int irqnr;
void __iomem *base;
vt8500_handle_irq_common(primary_intc);
}
/* Loop through each active controller */
for (i=0; i<active_cnt; i++) {
base = intc[i].base;
irqnr = readl_relaxed(base) & 0x3F;
/*
Highest Priority register default = 63, so check that this
is a real interrupt by checking the status register
*/
if (irqnr == 63) {
stat = readl_relaxed(base + VT8500_ICIS + 4);
if (!(stat & BIT(31)))
continue;
}
static void vt8500_handle_irq_chained(struct irq_desc *desc)
{
struct irq_domain *d = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
struct vt8500_irq_data *intc = d->host_data;
generic_handle_domain_irq(intc[i].domain, irqnr);
}
chained_irq_enter(chip, desc);
vt8500_handle_irq_common(intc);
chained_irq_exit(chip, desc);
}
static int __init vt8500_irq_init(struct device_node *node,
struct device_node *parent)
{
int irq, i;
struct device_node *np = node;
struct vt8500_irq_data *intc;
int irq, i, ret = 0;
if (active_cnt == VT8500_INTC_MAX) {
pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n",
__func__);
goto out;
}
intc = kzalloc(sizeof(*intc), GFP_KERNEL);
if (!intc)
return -ENOMEM;
intc[active_cnt].base = of_iomap(np, 0);
intc[active_cnt].domain = irq_domain_add_linear(node, 64,
&vt8500_irq_domain_ops, &intc[active_cnt]);
if (!intc[active_cnt].base) {
intc->base = of_iomap(node, 0);
if (!intc->base) {
pr_err("%s: Unable to map IO memory\n", __func__);
goto out;
ret = -ENOMEM;
goto err_free;
}
if (!intc[active_cnt].domain) {
intc->domain = irq_domain_create_linear(of_fwnode_handle(node), 64,
&vt8500_irq_domain_ops, intc);
if (!intc->domain) {
pr_err("%s: Unable to add irq domain!\n", __func__);
goto out;
ret = -ENOMEM;
goto err_unmap;
}
set_handle_irq(vt8500_handle_irq);
vt8500_init_irq_hw(intc[active_cnt].base);
vt8500_init_irq_hw(intc->base);
pr_info("vt8500-irq: Added interrupt controller\n");
active_cnt++;
/* check if this is a slaved controller */
if (of_irq_count(np) != 0) {
/* check that we have the correct number of interrupts */
if (of_irq_count(np) != 8) {
pr_err("%s: Incorrect IRQ map for slaved controller\n",
__func__);
return -EINVAL;
}
for (i = 0; i < 8; i++) {
irq = irq_of_parse_and_map(np, i);
enable_irq(irq);
/* check if this is a chained controller */
if (of_irq_count(node) != 0) {
for (i = 0; i < of_irq_count(node); i++) {
irq = irq_of_parse_and_map(node, i);
irq_set_chained_handler_and_data(irq, vt8500_handle_irq_chained,
intc);
}
pr_info("vt8500-irq: Enabled slave->parent interrupts\n");
} else {
primary_intc = intc;
set_handle_irq(vt8500_handle_irq);
}
out:
return 0;
err_unmap:
iounmap(intc->base);
err_free:
kfree(intc);
return ret;
}
IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init);

View File

@@ -257,10 +257,9 @@ static void pmu_irq_handler(struct irq_desc *desc)
* So, let's structure the code so that the window is as small as
* possible.
*/
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
done &= readl_relaxed(base + PMC_IRQ_CAUSE);
writel_relaxed(done, base + PMC_IRQ_CAUSE);
irq_gc_unlock(gc);
}
static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)

View File

@@ -1221,31 +1221,6 @@ static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
#ifdef CONFIG_SMP
static inline void irq_gc_lock(struct irq_chip_generic *gc)
{
raw_spin_lock(&gc->lock);
}
static inline void irq_gc_unlock(struct irq_chip_generic *gc)
{
raw_spin_unlock(&gc->lock);
}
#else
static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
/*
* The irqsave variants are for usage in non interrupt code. Do not use
* them in irq_chip callbacks. Use irq_gc_lock() instead.
*/
#define irq_gc_lock_irqsave(gc, flags) \
raw_spin_lock_irqsave(&(gc)->lock, flags)
#define irq_gc_unlock_irqrestore(gc, flags) \
raw_spin_unlock_irqrestore(&(gc)->lock, flags)
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{

View File

@@ -40,10 +40,9 @@ void irq_gc_mask_disable_reg(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, mask, ct->regs.disable);
*ct->mask_cache &= ~mask;
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_disable_reg);
@@ -60,10 +59,9 @@ void irq_gc_mask_set_bit(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
*ct->mask_cache |= mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
@@ -80,10 +78,9 @@ void irq_gc_mask_clr_bit(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
@@ -100,10 +97,9 @@ void irq_gc_unmask_enable_reg(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, mask, ct->regs.enable);
*ct->mask_cache |= mask;
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_unmask_enable_reg);
@@ -117,9 +113,8 @@ void irq_gc_ack_set_bit(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, mask, ct->regs.ack);
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
@@ -133,9 +128,8 @@ void irq_gc_ack_clr_bit(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = ~d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, mask, ct->regs.ack);
irq_gc_unlock(gc);
}
/**
@@ -156,11 +150,10 @@ void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, mask, ct->regs.disable);
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, mask, ct->regs.ack);
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_disable_and_ack_set);
@@ -174,9 +167,8 @@ void irq_gc_eoi(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, mask, ct->regs.eoi);
irq_gc_unlock(gc);
}
/**
@@ -196,12 +188,11 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on)
if (!(mask & gc->wake_enabled))
return -EINVAL;
irq_gc_lock(gc);
guard(raw_spinlock)(&gc->lock);
if (on)
gc->wake_active |= mask;
else
gc->wake_active &= ~mask;
irq_gc_unlock(gc);
return 0;
}
EXPORT_SYMBOL_GPL(irq_gc_set_wake);
@@ -288,7 +279,6 @@ int irq_domain_alloc_generic_chips(struct irq_domain *d,
{
struct irq_domain_chip_generic *dgc;
struct irq_chip_generic *gc;
unsigned long flags;
int numchips, i;
size_t dgc_sz;
size_t gc_sz;
@@ -340,9 +330,8 @@ int irq_domain_alloc_generic_chips(struct irq_domain *d,
goto err;
}
raw_spin_lock_irqsave(&gc_lock, flags);
list_add_tail(&gc->list, &gc_list);
raw_spin_unlock_irqrestore(&gc_lock, flags);
scoped_guard (raw_spinlock_irqsave, &gc_lock)
list_add_tail(&gc->list, &gc_list);
/* Calc pointer to the next generic chip */
tmp += gc_sz;
}
@@ -459,7 +448,6 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
struct irq_chip *chip;
unsigned long flags;
int idx;
gc = __irq_get_domain_generic_chip(d, hw_irq);
@@ -479,9 +467,8 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
/* We only init the cache for the first mapping of a generic chip */
if (!gc->installed) {
raw_spin_lock_irqsave(&gc->lock, flags);
guard(raw_spinlock_irqsave)(&gc->lock);
irq_gc_init_mask_cache(gc, dgc->gc_flags);
raw_spin_unlock_irqrestore(&gc->lock, flags);
}
/* Mark the interrupt as installed */
@@ -548,9 +535,8 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
struct irq_chip *chip = &ct->chip;
unsigned int i;
raw_spin_lock(&gc_lock);
list_add_tail(&gc->list, &gc_list);
raw_spin_unlock(&gc_lock);
scoped_guard (raw_spinlock, &gc_lock)
list_add_tail(&gc->list, &gc_list);
irq_gc_init_mask_cache(gc, flags);
@@ -616,9 +602,8 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
{
unsigned int i, virq;
raw_spin_lock(&gc_lock);
list_del(&gc->list);
raw_spin_unlock(&gc_lock);
scoped_guard (raw_spinlock, &gc_lock)
list_del(&gc->list);
for (i = 0; msk; msk >>= 1, i++) {
if (!(msk & 0x01))