From e749741b765b91eca907609b2e88c7f77604f148 Mon Sep 17 00:00:00 2001 From: Mingzheng Xing Date: Tue, 3 Jun 2025 17:42:12 +0800 Subject: [PATCH] riscv: upgrade to 6.6.0-95.0.0 - RISC-V kernel upgrade to 6.6.0-95.0.0 - Hwprobe related backport - Add ACPI NUMA support for RISC-V - Backport RISC-V external interrupt controller support for ACPI Signed-off-by: Mingzheng Xing --- 0001-riscv-kernel.patch | 8698 +++++++++++++++++++++++++++++++++++++-- kernel.spec | 8 +- 2 files changed, 8466 insertions(+), 240 deletions(-) diff --git a/0001-riscv-kernel.patch b/0001-riscv-kernel.patch index f04a5ac7..c617216a 100644 --- a/0001-riscv-kernel.patch +++ b/0001-riscv-kernel.patch @@ -1,10 +1,21 @@ -From 96d87d109c0c54f7d7471963566c4c6c8e571505 Mon Sep 17 00:00:00 2001 +From 49038ba56dd966a797a843073321cea51616391f Mon Sep 17 00:00:00 2001 From: Mingzheng Xing -Date: Wed, 7 May 2025 11:28:20 +0800 +Date: Sat, 7 Jun 2025 12:46:05 +0800 Subject: [PATCH] riscv kernel Signed-off-by: Mingzheng Xing --- + Documentation/arch/index.rst | 2 +- + Documentation/{ => arch}/riscv/acpi.rst | 0 + .../{ => arch}/riscv/boot-image-header.rst | 0 + Documentation/{ => arch}/riscv/boot.rst | 0 + Documentation/{ => arch}/riscv/features.rst | 0 + Documentation/arch/riscv/hwprobe.rst | 271 + + Documentation/{ => arch}/riscv/index.rst | 0 + .../{ => arch}/riscv/patch-acceptance.rst | 0 + Documentation/{ => arch}/riscv/uabi.rst | 0 + Documentation/{ => arch}/riscv/vector.rst | 0 + Documentation/{ => arch}/riscv/vm-layout.rst | 0 .../hwlock/xuantie,th1520-hwspinlock.yaml | 34 + .../bindings/iio/adc/thead,th1520-adc.yaml | 52 + .../bindings/iio/adc/xuantie,th1520-adc.yaml | 52 + @@ -31,16 +42,31 @@ Signed-off-by: Mingzheng Xing .../bindings/usb/xuantie,th1520-usb.yaml | 76 + .../bindings/watchdog/xuantie,th1520-wdt.yaml | 19 + .../membarrier-sync-core/arch-support.txt | 18 +- + .../maintainer/maintainer-entry-profile.rst | 2 +- + Documentation/process/index.rst | 2 +- + Documentation/riscv/hwprobe.rst | 98 - Documentation/scheduler/index.rst | 1 + Documentation/scheduler/membarrier.rst | 39 + - MAINTAINERS | 18 + + .../it_IT/riscv/patch-acceptance.rst | 2 +- + .../translations/zh_CN/arch/index.rst | 2 +- + .../{ => arch}/riscv/boot-image-header.rst | 4 +- + .../zh_CN/{ => arch}/riscv/index.rst | 4 +- + .../{ => arch}/riscv/patch-acceptance.rst | 4 +- + .../zh_CN/{ => arch}/riscv/vm-layout.rst | 4 +- + .../maintainer/maintainer-entry-profile.rst | 2 +- + MAINTAINERS | 20 +- + arch/arm64/Kconfig | 1 - arch/arm64/include/asm/tlb.h | 5 +- + arch/arm64/kernel/pci.c | 191 - + arch/ia64/Kconfig | 1 - + arch/loongarch/Kconfig | 1 - arch/loongarch/include/asm/pgalloc.h | 1 + arch/mips/include/asm/pgalloc.h | 1 + - arch/riscv/Kconfig | 41 +- + arch/riscv/Kconfig | 99 +- arch/riscv/Kconfig.socs | 49 + - arch/riscv/Makefile | 19 +- - arch/riscv/Makefile.isa | 18 + + arch/riscv/Kconfig.vendor | 19 + + arch/riscv/Makefile | 23 +- + arch/riscv/Makefile.isa | 15 + arch/riscv/boot/dts/Makefile | 2 + arch/riscv/boot/dts/sophgo/Makefile | 7 + .../riscv/boot/dts/sophgo/mango-2sockets.dtsi | 699 + @@ -80,37 +106,95 @@ Signed-off-by: Mingzheng Xing arch/riscv/configs/openeuler_defconfig | 1942 +- arch/riscv/configs/sg2042_defconfig | 9 + arch/riscv/configs/th1520_defconfig | 470 + - arch/riscv/include/asm/barrier.h | 22 + + arch/riscv/errata/andes/errata.c | 13 +- + arch/riscv/errata/sifive/errata.c | 3 + + arch/riscv/errata/thead/errata.c | 3 + + arch/riscv/include/asm/acpi.h | 15 +- + arch/riscv/include/asm/atomic.h | 17 +- + arch/riscv/include/asm/barrier.h | 58 +- + arch/riscv/include/asm/cmpxchg.h | 496 +- arch/riscv/include/asm/compat.h | 1 - - arch/riscv/include/asm/errata_list.h | 32 +- - arch/riscv/include/asm/hwcap.h | 1 + - arch/riscv/include/asm/io.h | 4 + + arch/riscv/include/asm/cpufeature-macros.h | 66 + + arch/riscv/include/asm/cpufeature.h | 69 + + arch/riscv/include/asm/csr.h | 4 + + arch/riscv/include/asm/elf.h | 2 +- + arch/riscv/include/asm/errata_list.h | 45 +- + arch/riscv/include/asm/fence.h | 10 +- + arch/riscv/include/asm/hwcap.h | 141 +- + arch/riscv/include/asm/hwprobe.h | 26 +- + arch/riscv/include/asm/insn-def.h | 4 + + arch/riscv/include/asm/io.h | 12 +- + arch/riscv/include/asm/irq.h | 55 + arch/riscv/include/asm/kvm_aia_aplic.h | 58 - arch/riscv/include/asm/kvm_aia_imsic.h | 38 - arch/riscv/include/asm/membarrier.h | 19 + + arch/riscv/include/asm/mmio.h | 5 +- + arch/riscv/include/asm/mmiowb.h | 2 +- arch/riscv/include/asm/pgalloc.h | 53 +- arch/riscv/include/asm/pgtable-64.h | 14 +- - arch/riscv/include/asm/pgtable.h | 9 +- + arch/riscv/include/asm/pgtable.h | 21 +- + arch/riscv/include/asm/processor.h | 6 + arch/riscv/include/asm/sbi.h | 9 + arch/riscv/include/asm/sparsemem.h | 2 +- - arch/riscv/include/asm/switch_to.h | 15 + + arch/riscv/include/asm/suspend.h | 5 +- + arch/riscv/include/asm/switch_to.h | 17 +- arch/riscv/include/asm/sync_core.h | 29 + arch/riscv/include/asm/tlb.h | 18 + - arch/riscv/kernel/cpufeature.c | 1 + + arch/riscv/include/asm/vdso/processor.h | 8 +- + arch/riscv/include/asm/vector.h | 12 +- + arch/riscv/include/asm/vendor_extensions.h | 103 + + .../include/asm/vendor_extensions/andes.h | 19 + + arch/riscv/include/asm/vendorid_list.h | 2 +- + arch/riscv/include/uapi/asm/hwprobe.h | 52 +- + arch/riscv/kernel/Makefile | 4 + + arch/riscv/kernel/acpi.c | 36 +- + arch/riscv/kernel/acpi_numa.c | 130 + + arch/riscv/kernel/alternative.c | 2 +- + arch/riscv/kernel/cpufeature.c | 579 +- arch/riscv/kernel/module.c | 83 +- arch/riscv/kernel/process.c | 3 + arch/riscv/kernel/sbi-ipi.c | 46 +- - arch/riscv/kernel/suspend.c | 44 + - arch/riscv/kernel/vector.c | 3 +- - arch/riscv/kvm/aia.c | 35 +- + arch/riscv/kernel/setup.c | 8 +- + arch/riscv/kernel/smpboot.c | 4 +- + arch/riscv/kernel/suspend.c | 100 +- + arch/riscv/kernel/sys_hwprobe.c | 349 + + arch/riscv/kernel/sys_riscv.c | 267 - + arch/riscv/kernel/vdso/hwprobe.c | 86 +- + arch/riscv/kernel/vector.c | 8 +- + arch/riscv/kernel/vendor_extensions.c | 56 + + arch/riscv/kernel/vendor_extensions/Makefile | 3 + + arch/riscv/kernel/vendor_extensions/andes.c | 18 + + arch/riscv/kvm/aia.c | 37 +- arch/riscv/kvm/aia_aplic.c | 2 +- arch/riscv/kvm/aia_device.c | 2 +- arch/riscv/kvm/aia_imsic.c | 2 +- + arch/riscv/kvm/main.c | 2 +- + arch/riscv/kvm/tlb.c | 2 +- + arch/riscv/kvm/vcpu_fp.c | 2 +- + arch/riscv/kvm/vcpu_onereg.c | 2 +- + arch/riscv/kvm/vcpu_vector.c | 2 +- arch/riscv/mm/dma-noncoherent.c | 9 +- arch/riscv/mm/pgtable.c | 2 + arch/riscv/mm/tlbflush.c | 31 + + arch/sw_64/Kconfig | 1 - arch/x86/include/asm/hw_irq.h | 2 - arch/x86/mm/pgtable.c | 3 + + drivers/acpi/Makefile | 2 +- + drivers/acpi/acpi_lpss.c | 15 +- + drivers/acpi/bus.c | 4 + + drivers/acpi/internal.h | 8 + + drivers/acpi/mipi-disco-img.c | 292 + + drivers/acpi/numa/Kconfig | 5 +- + drivers/acpi/numa/srat.c | 34 +- + drivers/acpi/pci_link.c | 2 + + drivers/acpi/riscv/Makefile | 2 +- + drivers/acpi/riscv/init.c | 13 + + drivers/acpi/riscv/init.h | 4 + + drivers/acpi/riscv/irq.c | 335 + + drivers/acpi/scan.c | 151 +- + drivers/acpi/thermal.c | 56 +- + drivers/acpi/utils.c | 138 +- + drivers/base/arch_numa.c | 2 +- drivers/base/platform-msi.c | 149 +- drivers/char/ipmi/ipmi_si_hardcode.c | 26 +- drivers/char/ipmi/ipmi_si_intf.c | 3 +- @@ -150,9 +234,11 @@ Signed-off-by: Mingzheng Xing drivers/clk/xuantie/gate/vosys-gate.c | 111 + drivers/clk/xuantie/gate/vpsys-gate.c | 99 + drivers/clk/xuantie/gate/xuantie-gate.c | 114 + + drivers/clocksource/timer-riscv.c | 2 +- drivers/cpufreq/Kconfig | 9 + drivers/cpufreq/Makefile | 1 + drivers/cpufreq/th1520-cpufreq.c | 584 + + drivers/cpuidle/cpuidle-riscv-sbi.c | 49 +- .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 106 +- drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 11 + drivers/dma/mv_xor_v2.c | 8 +- @@ -823,15 +909,16 @@ Signed-off-by: Mingzheng Xing drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 5 +- drivers/irqchip/Kconfig | 25 + drivers/irqchip/Makefile | 3 + - drivers/irqchip/irq-riscv-aplic-direct.c | 323 + - drivers/irqchip/irq-riscv-aplic-main.c | 211 + - drivers/irqchip/irq-riscv-aplic-main.h | 52 + - drivers/irqchip/irq-riscv-aplic-msi.c | 278 + - drivers/irqchip/irq-riscv-imsic-early.c | 201 + - drivers/irqchip/irq-riscv-imsic-platform.c | 375 + - drivers/irqchip/irq-riscv-imsic-state.c | 865 + + drivers/irqchip/irq-riscv-aplic-direct.c | 329 + + drivers/irqchip/irq-riscv-aplic-main.c | 234 + + drivers/irqchip/irq-riscv-aplic-main.h | 53 + + drivers/irqchip/irq-riscv-aplic-msi.c | 285 + + drivers/irqchip/irq-riscv-imsic-early.c | 263 + + drivers/irqchip/irq-riscv-imsic-platform.c | 395 + + drivers/irqchip/irq-riscv-imsic-state.c | 891 + drivers/irqchip/irq-riscv-imsic-state.h | 108 + - drivers/irqchip/irq-riscv-intc.c | 45 +- + drivers/irqchip/irq-riscv-intc.c | 152 +- + drivers/irqchip/irq-sifive-plic.c | 365 +- drivers/mailbox/Kconfig | 8 + drivers/mailbox/Makefile | 2 + drivers/mailbox/bcm-flexrm-mailbox.c | 8 +- @@ -996,8 +1083,11 @@ Signed-off-by: Mingzheng Xing .../controller/cadence/pcie-cadence-sophgo.c | 973 + .../controller/cadence/pcie-cadence-sophgo.h | 17 + drivers/pci/msi/msi.c | 97 +- + drivers/pci/pci-acpi.c | 182 + drivers/pci/pcie/portdrv.c | 2 +- + drivers/perf/Kconfig | 14 + drivers/perf/arm_smmuv3_pmu.c | 4 +- + drivers/perf/riscv_pmu_sbi.c | 44 +- drivers/phy/Kconfig | 1 + drivers/phy/Makefile | 3 +- drivers/phy/synopsys/Kconfig | 13 + @@ -1011,6 +1101,7 @@ Signed-off-by: Mingzheng Xing drivers/pinctrl/sophgo/pinctrl-mango.c | 453 + drivers/pinctrl/sophgo/pinctrl-sophgo.c | 292 + drivers/pinctrl/sophgo/pinctrl-sophgo.h | 70 + + .../platform/surface/surface_acpi_notify.c | 14 +- drivers/pwm/Kconfig | 11 + drivers/pwm/Makefile | 2 + drivers/pwm/pwm-sophgo.c | 276 + @@ -1259,6 +1350,8 @@ Signed-off-by: Mingzheng Xing drivers/watchdog/Makefile | 1 + drivers/watchdog/dw_wdt.c | 13 +- drivers/watchdog/th1520_wdt.c | 393 + + include/acpi/acpi_bus.h | 37 +- + include/acpi/actbl3.h | 18 +- include/asm-generic/pgalloc.h | 7 +- include/drm/bridge/dw_hdmi.h | 5 + .../dt-bindings/clock/sophgo-mango-clock.h | 165 + @@ -1278,13 +1371,14 @@ Signed-off-by: Mingzheng Xing .../dt-bindings/reset/xuantie,th1520-reset.h | 28 + .../dt-bindings/soc/th1520_system_status.h | 38 + .../dt-bindings/soc/xuantie,th1520-iopmp.h | 41 + + include/linux/acpi.h | 15 + include/linux/cpuhotplug.h | 2 + include/linux/cpumask.h | 17 + include/linux/find.h | 27 + include/linux/firmware/xuantie/ipc.h | 167 + include/linux/firmware/xuantie/th1520_event.h | 35 + include/linux/irqchip/riscv-aplic.h | 145 + - include/linux/irqchip/riscv-imsic.h | 87 + + include/linux/irqchip/riscv-imsic.h | 96 + include/linux/irqdomain.h | 17 + include/linux/irqdomain_defs.h | 2 + include/linux/mlx4/device.h | 2 +- @@ -1349,7 +1443,22 @@ Signed-off-by: Mingzheng Xing .../riscv/thead/c900-legacy/microarch.json | 80 + .../arch/riscv/thead/th1520-ddr/metrics.json | 713 + .../thead/th1520-ddr/uncore-ddr-pmu.json | 1550 ++ - 1344 files changed, 571537 insertions(+), 848 deletions(-) + .../testing/selftests/riscv/hwprobe/Makefile | 9 +- + tools/testing/selftests/riscv/hwprobe/cbo.c | 228 + + .../testing/selftests/riscv/hwprobe/hwprobe.c | 64 +- + .../testing/selftests/riscv/hwprobe/hwprobe.h | 15 + + .../selftests/riscv/vector/vstate_prctl.c | 10 +- + 1443 files changed, 575915 insertions(+), 2354 deletions(-) + rename Documentation/{ => arch}/riscv/acpi.rst (100%) + rename Documentation/{ => arch}/riscv/boot-image-header.rst (100%) + rename Documentation/{ => arch}/riscv/boot.rst (100%) + rename Documentation/{ => arch}/riscv/features.rst (100%) + create mode 100644 Documentation/arch/riscv/hwprobe.rst + rename Documentation/{ => arch}/riscv/index.rst (100%) + rename Documentation/{ => arch}/riscv/patch-acceptance.rst (100%) + rename Documentation/{ => arch}/riscv/uabi.rst (100%) + rename Documentation/{ => arch}/riscv/vector.rst (100%) + rename Documentation/{ => arch}/riscv/vm-layout.rst (100%) create mode 100644 Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml create mode 100644 Documentation/devicetree/bindings/iio/adc/thead,th1520-adc.yaml create mode 100644 Documentation/devicetree/bindings/iio/adc/xuantie,th1520-adc.yaml @@ -1371,7 +1480,13 @@ Signed-off-by: Mingzheng Xing create mode 100644 Documentation/devicetree/bindings/spi/xuantie,th1520-spi.yaml create mode 100644 Documentation/devicetree/bindings/usb/xuantie,th1520-usb.yaml create mode 100644 Documentation/devicetree/bindings/watchdog/xuantie,th1520-wdt.yaml + delete mode 100644 Documentation/riscv/hwprobe.rst create mode 100644 Documentation/scheduler/membarrier.rst + rename Documentation/translations/zh_CN/{ => arch}/riscv/boot-image-header.rst (96%) + rename Documentation/translations/zh_CN/{ => arch}/riscv/index.rst (79%) + rename Documentation/translations/zh_CN/{ => arch}/riscv/patch-acceptance.rst (93%) + rename Documentation/translations/zh_CN/{ => arch}/riscv/vm-layout.rst (98%) + create mode 100644 arch/riscv/Kconfig.vendor create mode 100644 arch/riscv/Makefile.isa create mode 100644 arch/riscv/boot/dts/sophgo/Makefile create mode 100644 arch/riscv/boot/dts/sophgo/mango-2sockets.dtsi @@ -1404,9 +1519,21 @@ Signed-off-by: Mingzheng Xing create mode 100644 arch/riscv/configs/k1_defconfig create mode 100644 arch/riscv/configs/sg2042_defconfig create mode 100644 arch/riscv/configs/th1520_defconfig + create mode 100644 arch/riscv/include/asm/cpufeature-macros.h delete mode 100644 arch/riscv/include/asm/kvm_aia_aplic.h delete mode 100644 arch/riscv/include/asm/kvm_aia_imsic.h create mode 100644 arch/riscv/include/asm/sync_core.h + create mode 100644 arch/riscv/include/asm/vendor_extensions.h + create mode 100644 arch/riscv/include/asm/vendor_extensions/andes.h + create mode 100644 arch/riscv/kernel/acpi_numa.c + create mode 100644 arch/riscv/kernel/sys_hwprobe.c + create mode 100644 arch/riscv/kernel/vendor_extensions.c + create mode 100644 arch/riscv/kernel/vendor_extensions/Makefile + create mode 100644 arch/riscv/kernel/vendor_extensions/andes.c + create mode 100644 drivers/acpi/mipi-disco-img.c + create mode 100644 drivers/acpi/riscv/init.c + create mode 100644 drivers/acpi/riscv/init.h + create mode 100644 drivers/acpi/riscv/irq.c create mode 100644 drivers/clk/sophgo/Makefile create mode 100644 drivers/clk/sophgo/clk-dummy.c create mode 100644 drivers/clk/sophgo/clk-mango.c @@ -2492,7 +2619,335 @@ Signed-off-by: Mingzheng Xing create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json create mode 100644 tools/perf/pmu-events/arch/riscv/thead/th1520-ddr/metrics.json create mode 100644 tools/perf/pmu-events/arch/riscv/thead/th1520-ddr/uncore-ddr-pmu.json + create mode 100644 tools/testing/selftests/riscv/hwprobe/cbo.c + create mode 100644 tools/testing/selftests/riscv/hwprobe/hwprobe.h +diff --git a/Documentation/arch/index.rst b/Documentation/arch/index.rst +index 84b80255b851..f4794117e56b 100644 +--- a/Documentation/arch/index.rst ++++ b/Documentation/arch/index.rst +@@ -20,7 +20,7 @@ implementation. + openrisc/index + parisc/index + ../powerpc/index +- ../riscv/index ++ riscv/index + s390/index + sh/index + sparc/index +diff --git a/Documentation/riscv/acpi.rst b/Documentation/arch/riscv/acpi.rst +similarity index 100% +rename from Documentation/riscv/acpi.rst +rename to Documentation/arch/riscv/acpi.rst +diff --git a/Documentation/riscv/boot-image-header.rst b/Documentation/arch/riscv/boot-image-header.rst +similarity index 100% +rename from Documentation/riscv/boot-image-header.rst +rename to Documentation/arch/riscv/boot-image-header.rst +diff --git a/Documentation/riscv/boot.rst b/Documentation/arch/riscv/boot.rst +similarity index 100% +rename from Documentation/riscv/boot.rst +rename to Documentation/arch/riscv/boot.rst +diff --git a/Documentation/riscv/features.rst b/Documentation/arch/riscv/features.rst +similarity index 100% +rename from Documentation/riscv/features.rst +rename to Documentation/arch/riscv/features.rst +diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst +new file mode 100644 +index 000000000000..971370894bfd +--- /dev/null ++++ b/Documentation/arch/riscv/hwprobe.rst +@@ -0,0 +1,271 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++RISC-V Hardware Probing Interface ++--------------------------------- ++ ++The RISC-V hardware probing interface is based around a single syscall, which ++is defined in :: ++ ++ struct riscv_hwprobe { ++ __s64 key; ++ __u64 value; ++ }; ++ ++ long sys_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, cpu_set_t *cpus, ++ unsigned int flags); ++ ++The arguments are split into three groups: an array of key-value pairs, a CPU ++set, and some flags. The key-value pairs are supplied with a count. Userspace ++must prepopulate the key field for each element, and the kernel will fill in the ++value if the key is recognized. If a key is unknown to the kernel, its key field ++will be cleared to -1, and its value set to 0. The CPU set is defined by ++CPU_SET(3) with size ``cpusetsize`` bytes. For value-like keys (eg. vendor, ++arch, impl), the returned value will only be valid if all CPUs in the given set ++have the same value. Otherwise -1 will be returned. For boolean-like keys, the ++value returned will be a logical AND of the values for the specified CPUs. ++Usermode can supply NULL for ``cpus`` and 0 for ``cpusetsize`` as a shortcut for ++all online CPUs. The currently supported flags are: ++ ++* :c:macro:`RISCV_HWPROBE_WHICH_CPUS`: This flag basically reverses the behavior ++ of sys_riscv_hwprobe(). Instead of populating the values of keys for a given ++ set of CPUs, the values of each key are given and the set of CPUs is reduced ++ by sys_riscv_hwprobe() to only those which match each of the key-value pairs. ++ How matching is done depends on the key type. For value-like keys, matching ++ means to be the exact same as the value. For boolean-like keys, matching ++ means the result of a logical AND of the pair's value with the CPU's value is ++ exactly the same as the pair's value. Additionally, when ``cpus`` is an empty ++ set, then it is initialized to all online CPUs which fit within it, i.e. the ++ CPU set returned is the reduction of all the online CPUs which can be ++ represented with a CPU set of size ``cpusetsize``. ++ ++All other flags are reserved for future compatibility and must be zero. ++ ++On success 0 is returned, on failure a negative error code is returned. ++ ++The following keys are defined: ++ ++* :c:macro:`RISCV_HWPROBE_KEY_MVENDORID`: Contains the value of ``mvendorid``, ++ as defined by the RISC-V privileged architecture specification. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_MARCHID`: Contains the value of ``marchid``, as ++ defined by the RISC-V privileged architecture specification. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_MIMPLID`: Contains the value of ``mimplid``, as ++ defined by the RISC-V privileged architecture specification. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_BASE_BEHAVIOR`: A bitmask containing the base ++ user-visible behavior that this kernel supports. The following base user ABIs ++ are defined: ++ ++ * :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: Support for rv32ima or ++ rv64ima, as defined by version 2.2 of the user ISA and version 1.10 of the ++ privileged ISA, with the following known exceptions (more exceptions may be ++ added, but only if it can be demonstrated that the user ABI is not broken): ++ ++ * The ``fence.i`` instruction cannot be directly executed by userspace ++ programs (it may still be executed in userspace via a ++ kernel-controlled mechanism such as the vDSO). ++ ++* :c:macro:`RISCV_HWPROBE_KEY_IMA_EXT_0`: A bitmask containing the extensions ++ that are compatible with the :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: ++ base system behavior. ++ ++ * :c:macro:`RISCV_HWPROBE_IMA_FD`: The F and D extensions are supported, as ++ defined by commit cd20cee ("FMIN/FMAX now implement ++ minimumNumber/maximumNumber, not minNum/maxNum") of the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_IMA_C`: The C extension is supported, as defined ++ by version 2.2 of the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_IMA_V`: The V extension is supported, as defined by ++ version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBA`: The Zba address generation extension is ++ supported, as defined in version 1.0 of the Bit-Manipulation ISA ++ extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBB`: The Zbb extension is supported, as defined ++ in version 1.0 of the Bit-Manipulation ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBS`: The Zbs extension is supported, as defined ++ in version 1.0 of the Bit-Manipulation ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZICBOZ`: The Zicboz extension is supported, as ++ ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBC` The Zbc extension is supported, as defined ++ in version 1.0 of the Bit-Manipulation ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBKB` The Zbkb extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBKC` The Zbkc extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBKX` The Zbkx extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKND` The Zknd extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKNE` The Zkne extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKNH` The Zknh extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKSED` The Zksed extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKSH` The Zksh extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKT` The Zkt extension is supported, as defined ++ in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVBB`: The Zvbb extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVBC`: The Zvbc extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKB`: The Zvkb extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKG`: The Zvkg extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKNED`: The Zvkned extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKNHA`: The Zvknha extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKNHB`: The Zvknhb extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKSED`: The Zvksed extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKSH`: The Zvksh extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKT`: The Zvkt extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZFH`: The Zfh extension version 1.0 is supported ++ as defined in the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZFHMIN`: The Zfhmin extension version 1.0 is ++ supported as defined in the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZIHINTNTL`: The Zihintntl extension version 1.0 ++ is supported as defined in the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVFH`: The Zvfh extension is supported as ++ defined in the RISC-V Vector manual starting from commit e2ccd0548d6c ++ ("Remove draft warnings from Zvfh[min]"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVFHMIN`: The Zvfhmin extension is supported as ++ defined in the RISC-V Vector manual starting from commit e2ccd0548d6c ++ ("Remove draft warnings from Zvfh[min]"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZFA`: The Zfa extension is supported as ++ defined in the RISC-V ISA manual starting from commit 056b6ff467c7 ++ ("Zfa is ratified"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZTSO`: The Ztso extension is supported as ++ defined in the RISC-V ISA manual starting from commit 5618fb5a216b ++ ("Ztso is now ratified.") ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZACAS`: The Zacas extension is supported as ++ defined in the Atomic Compare-and-Swap (CAS) instructions manual starting ++ from commit 5059e0ca641c ("update to ratified"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZICOND`: The Zicond extension is supported as ++ defined in the RISC-V Integer Conditional (Zicond) operations extension ++ manual starting from commit 95cf1f9 ("Add changes requested by Ved ++ during signoff") ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZIHINTPAUSE`: The Zihintpause extension is ++ supported as defined in the RISC-V ISA manual starting from commit ++ d8ab5c78c207 ("Zihintpause is ratified"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE32X`: The Vector sub-extension Zve32x is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE32F`: The Vector sub-extension Zve32f is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE64X`: The Vector sub-extension Zve64x is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE64F`: The Vector sub-extension Zve64f is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE64D`: The Vector sub-extension Zve64d is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZIMOP`: The Zimop May-Be-Operations extension is ++ supported as defined in the RISC-V ISA manual starting from commit ++ 58220614a5f ("Zimop is ratified/1.0"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCA`: The Zca extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCB`: The Zcb extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCD`: The Zcd extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCF`: The Zcf extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCMOP`: The Zcmop May-Be-Operations extension is ++ supported as defined in the RISC-V ISA manual starting from commit ++ c732a4f39a4 ("Zcmop is ratified/1.0"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZAWRS`: The Zawrs extension is supported as ++ ratified in commit 98918c844281 ("Merge pull request #1217 from ++ riscv/zawrs") of riscv-isa-manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_SUPM`: The Supm extension is supported as ++ defined in version 1.0 of the RISC-V Pointer Masking extensions. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance ++ information about the selected set of processors. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNKNOWN`: The performance of misaligned ++ accesses is unknown. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_EMULATED`: Misaligned accesses are ++ emulated via software, either in or below the kernel. These accesses are ++ always extremely slow. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower ++ than equivalent byte accesses. Misaligned accesses may be supported ++ directly in hardware, or trapped and emulated by software. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster ++ than equivalent byte accesses. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are ++ not supported at all and will generate a misaligned address fault. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE`: An unsigned int which ++ represents the size of the Zicboz block in bytes. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS`: An unsigned long which ++ represent the highest userspace virtual address usable. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_TIME_CSR_FREQ`: Frequency (in Hz) of `time CSR`. +diff --git a/Documentation/riscv/index.rst b/Documentation/arch/riscv/index.rst +similarity index 100% +rename from Documentation/riscv/index.rst +rename to Documentation/arch/riscv/index.rst +diff --git a/Documentation/riscv/patch-acceptance.rst b/Documentation/arch/riscv/patch-acceptance.rst +similarity index 100% +rename from Documentation/riscv/patch-acceptance.rst +rename to Documentation/arch/riscv/patch-acceptance.rst +diff --git a/Documentation/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst +similarity index 100% +rename from Documentation/riscv/uabi.rst +rename to Documentation/arch/riscv/uabi.rst +diff --git a/Documentation/riscv/vector.rst b/Documentation/arch/riscv/vector.rst +similarity index 100% +rename from Documentation/riscv/vector.rst +rename to Documentation/arch/riscv/vector.rst +diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/arch/riscv/vm-layout.rst +similarity index 100% +rename from Documentation/riscv/vm-layout.rst +rename to Documentation/arch/riscv/vm-layout.rst diff --git a/Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml b/Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml new file mode 100644 index 000000000000..8d36beae9676 @@ -4383,6 +4838,136 @@ index 23260ca44946..76597adfb7d5 100644 | s390: | ok | | sh: | TODO | | sparc: | TODO | +diff --git a/Documentation/maintainer/maintainer-entry-profile.rst b/Documentation/maintainer/maintainer-entry-profile.rst +index 6b64072d4bf2..7ad4bfc2cc03 100644 +--- a/Documentation/maintainer/maintainer-entry-profile.rst ++++ b/Documentation/maintainer/maintainer-entry-profile.rst +@@ -101,7 +101,7 @@ to do something different in the near future. + + ../doc-guide/maintainer-profile + ../nvdimm/maintainer-entry-profile +- ../riscv/patch-acceptance ++ ../arch/riscv/patch-acceptance + ../driver-api/media/maintainer-entry-profile + ../driver-api/vfio-pci-device-specific-driver-acceptance + ../nvme/feature-and-quirk-policy +diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst +index b501cd977053..db09a81d474b 100644 +--- a/Documentation/process/index.rst ++++ b/Documentation/process/index.rst +@@ -71,7 +71,7 @@ lack of a better place. + volatile-considered-harmful + botching-up-ioctls + clang-format +- ../riscv/patch-acceptance ++ ../arch/riscv/patch-acceptance + ../core-api/unaligned-memory-access + + .. only:: subproject and html +diff --git a/Documentation/riscv/hwprobe.rst b/Documentation/riscv/hwprobe.rst +deleted file mode 100644 +index a52996b22f75..000000000000 +--- a/Documentation/riscv/hwprobe.rst ++++ /dev/null +@@ -1,98 +0,0 @@ +-.. SPDX-License-Identifier: GPL-2.0 +- +-RISC-V Hardware Probing Interface +---------------------------------- +- +-The RISC-V hardware probing interface is based around a single syscall, which +-is defined in :: +- +- struct riscv_hwprobe { +- __s64 key; +- __u64 value; +- }; +- +- long sys_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, cpu_set_t *cpus, +- unsigned int flags); +- +-The arguments are split into three groups: an array of key-value pairs, a CPU +-set, and some flags. The key-value pairs are supplied with a count. Userspace +-must prepopulate the key field for each element, and the kernel will fill in the +-value if the key is recognized. If a key is unknown to the kernel, its key field +-will be cleared to -1, and its value set to 0. The CPU set is defined by +-CPU_SET(3). For value-like keys (eg. vendor/arch/impl), the returned value will +-be only be valid if all CPUs in the given set have the same value. Otherwise -1 +-will be returned. For boolean-like keys, the value returned will be a logical +-AND of the values for the specified CPUs. Usermode can supply NULL for cpus and +-0 for cpu_count as a shortcut for all online CPUs. There are currently no flags, +-this value must be zero for future compatibility. +- +-On success 0 is returned, on failure a negative error code is returned. +- +-The following keys are defined: +- +-* :c:macro:`RISCV_HWPROBE_KEY_MVENDORID`: Contains the value of ``mvendorid``, +- as defined by the RISC-V privileged architecture specification. +- +-* :c:macro:`RISCV_HWPROBE_KEY_MARCHID`: Contains the value of ``marchid``, as +- defined by the RISC-V privileged architecture specification. +- +-* :c:macro:`RISCV_HWPROBE_KEY_MIMPLID`: Contains the value of ``mimplid``, as +- defined by the RISC-V privileged architecture specification. +- +-* :c:macro:`RISCV_HWPROBE_KEY_BASE_BEHAVIOR`: A bitmask containing the base +- user-visible behavior that this kernel supports. The following base user ABIs +- are defined: +- +- * :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: Support for rv32ima or +- rv64ima, as defined by version 2.2 of the user ISA and version 1.10 of the +- privileged ISA, with the following known exceptions (more exceptions may be +- added, but only if it can be demonstrated that the user ABI is not broken): +- +- * The ``fence.i`` instruction cannot be directly executed by userspace +- programs (it may still be executed in userspace via a +- kernel-controlled mechanism such as the vDSO). +- +-* :c:macro:`RISCV_HWPROBE_KEY_IMA_EXT_0`: A bitmask containing the extensions +- that are compatible with the :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: +- base system behavior. +- +- * :c:macro:`RISCV_HWPROBE_IMA_FD`: The F and D extensions are supported, as +- defined by commit cd20cee ("FMIN/FMAX now implement +- minimumNumber/maximumNumber, not minNum/maxNum") of the RISC-V ISA manual. +- +- * :c:macro:`RISCV_HWPROBE_IMA_C`: The C extension is supported, as defined +- by version 2.2 of the RISC-V ISA manual. +- +- * :c:macro:`RISCV_HWPROBE_IMA_V`: The V extension is supported, as defined by +- version 1.0 of the RISC-V Vector extension manual. +- +- * :c:macro:`RISCV_HWPROBE_EXT_ZBA`: The Zba address generation extension is +- supported, as defined in version 1.0 of the Bit-Manipulation ISA +- extensions. +- +- * :c:macro:`RISCV_HWPROBE_EXT_ZBB`: The Zbb extension is supported, as defined +- in version 1.0 of the Bit-Manipulation ISA extensions. +- +- * :c:macro:`RISCV_HWPROBE_EXT_ZBS`: The Zbs extension is supported, as defined +- in version 1.0 of the Bit-Manipulation ISA extensions. +- +-* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance +- information about the selected set of processors. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNKNOWN`: The performance of misaligned +- accesses is unknown. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_EMULATED`: Misaligned accesses are +- emulated via software, either in or below the kernel. These accesses are +- always extremely slow. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower +- than equivalent byte accesses. Misaligned accesses may be supported +- directly in hardware, or trapped and emulated by software. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster +- than equivalent byte accesses. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are +- not supported at all and will generate a misaligned address fault. diff --git a/Documentation/scheduler/index.rst b/Documentation/scheduler/index.rst index 3170747226f6..43bd8a145b7a 100644 --- a/Documentation/scheduler/index.rst @@ -4440,11 +5025,115 @@ index 000000000000..2387804b1c63 + +The barrier matches a full barrier in the proximity of the membarrier system call +entry, cf. membarrier_{private,global}_expedited(). +diff --git a/Documentation/translations/it_IT/riscv/patch-acceptance.rst b/Documentation/translations/it_IT/riscv/patch-acceptance.rst +index edf67252b3fb..2d7afb1f6959 100644 +--- a/Documentation/translations/it_IT/riscv/patch-acceptance.rst ++++ b/Documentation/translations/it_IT/riscv/patch-acceptance.rst +@@ -1,6 +1,6 @@ + .. include:: ../disclaimer-ita.rst + +-:Original: :doc:`../../../riscv/patch-acceptance` ++:Original: :doc:`../../../arch/riscv/patch-acceptance` + :Translator: Federico Vaga + + arch/riscv linee guida alla manutenzione per gli sviluppatori +diff --git a/Documentation/translations/zh_CN/arch/index.rst b/Documentation/translations/zh_CN/arch/index.rst +index e3d273d7d599..c4c2e16f629c 100644 +--- a/Documentation/translations/zh_CN/arch/index.rst ++++ b/Documentation/translations/zh_CN/arch/index.rst +@@ -10,7 +10,7 @@ + + mips/index + arm64/index +- ../riscv/index ++ ../arch/riscv/index + openrisc/index + parisc/index + loongarch/index +diff --git a/Documentation/translations/zh_CN/riscv/boot-image-header.rst b/Documentation/translations/zh_CN/arch/riscv/boot-image-header.rst +similarity index 96% +rename from Documentation/translations/zh_CN/riscv/boot-image-header.rst +rename to Documentation/translations/zh_CN/arch/riscv/boot-image-header.rst +index 0234c28a7114..779b5172fe24 100644 +--- a/Documentation/translations/zh_CN/riscv/boot-image-header.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/boot-image-header.rst +@@ -1,6 +1,6 @@ +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/boot-image-header.rst ++:Original: Documentation/arch/riscv/boot-image-header.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/riscv/index.rst b/Documentation/translations/zh_CN/arch/riscv/index.rst +similarity index 79% +rename from Documentation/translations/zh_CN/riscv/index.rst +rename to Documentation/translations/zh_CN/arch/riscv/index.rst +index 131e405aa857..3b041c116169 100644 +--- a/Documentation/translations/zh_CN/riscv/index.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/index.rst +@@ -1,8 +1,8 @@ + .. SPDX-License-Identifier: GPL-2.0 + +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/index.rst ++:Original: Documentation/arch/riscv/index.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/riscv/patch-acceptance.rst b/Documentation/translations/zh_CN/arch/riscv/patch-acceptance.rst +similarity index 93% +rename from Documentation/translations/zh_CN/riscv/patch-acceptance.rst +rename to Documentation/translations/zh_CN/arch/riscv/patch-acceptance.rst +index d180d24717bf..c8eb230ca8ee 100644 +--- a/Documentation/translations/zh_CN/riscv/patch-acceptance.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/patch-acceptance.rst +@@ -1,8 +1,8 @@ + .. SPDX-License-Identifier: GPL-2.0 + +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/patch-acceptance.rst ++:Original: Documentation/arch/riscv/patch-acceptance.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/riscv/vm-layout.rst b/Documentation/translations/zh_CN/arch/riscv/vm-layout.rst +similarity index 98% +rename from Documentation/translations/zh_CN/riscv/vm-layout.rst +rename to Documentation/translations/zh_CN/arch/riscv/vm-layout.rst +index 91884e2dfff8..4b9f4dcf6c19 100644 +--- a/Documentation/translations/zh_CN/riscv/vm-layout.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/vm-layout.rst +@@ -1,7 +1,7 @@ + .. SPDX-License-Identifier: GPL-2.0 +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/vm-layout.rst ++:Original: Documentation/arch/riscv/vm-layout.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst b/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst +index a1ee99c4786e..0f5acfb1012e 100644 +--- a/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst ++++ b/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst +@@ -89,4 +89,4 @@ + + ../doc-guide/maintainer-profile + ../../../nvdimm/maintainer-entry-profile +- ../../../riscv/patch-acceptance ++ ../../../arch/riscv/patch-acceptance diff --git a/MAINTAINERS b/MAINTAINERS -index c6a3ac61989c..75b4a363fcec 100644 +index 28b2a80f6a8c..6478bca3869d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS -@@ -13781,7 +13781,9 @@ M: Mathieu Desnoyers +@@ -13803,7 +13803,9 @@ M: Mathieu Desnoyers M: "Paul E. McKenney" L: linux-kernel@vger.kernel.org S: Supported @@ -4454,7 +5143,7 @@ index c6a3ac61989c..75b4a363fcec 100644 F: include/uapi/linux/membarrier.h F: kernel/sched/membarrier.c -@@ -18523,6 +18525,20 @@ S: Maintained +@@ -18552,6 +18554,20 @@ S: Maintained F: drivers/mtd/nand/raw/r852.c F: drivers/mtd/nand/raw/r852.h @@ -4475,7 +5164,16 @@ index c6a3ac61989c..75b4a363fcec 100644 RISC-V ARCHITECTURE M: Paul Walmsley M: Palmer Dabbelt -@@ -18594,6 +18610,8 @@ M: Fu Wei +@@ -18560,7 +18576,7 @@ L: linux-riscv@lists.infradead.org + S: Supported + Q: https://patchwork.kernel.org/project/linux-riscv/list/ + C: irc://irc.libera.chat/riscv +-P: Documentation/riscv/patch-acceptance.rst ++P: Documentation/arch/riscv/patch-acceptance.rst + T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git + F: arch/riscv/ + N: riscv +@@ -18623,6 +18639,8 @@ M: Fu Wei L: linux-riscv@lists.infradead.org S: Maintained F: arch/riscv/boot/dts/thead/ @@ -4484,6 +5182,18 @@ index c6a3ac61989c..75b4a363fcec 100644 RNBD BLOCK DRIVERS M: Md. Haris Iqbal +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 6d9def7a9c02..c0d0e959ef62 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -1592,7 +1592,6 @@ config ARM64_BOOTPARAM_HOTPLUG_CPU0 + config NUMA + bool "NUMA Memory Allocation and Scheduler Support" + select GENERIC_ARCH_NUMA +- select ACPI_NUMA if ACPI + select OF_NUMA + select HAVE_SETUP_PER_CPU_AREA + select NEED_PER_CPU_EMBED_FIRST_CHUNK diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 2c29239d05c3..846c563689a8 100644 --- a/arch/arm64/include/asm/tlb.h @@ -4500,6 +5210,237 @@ index 2c29239d05c3..846c563689a8 100644 } #endif +diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c +index f872c57e9909..fd9a7bed83ce 100644 +--- a/arch/arm64/kernel/pci.c ++++ b/arch/arm64/kernel/pci.c +@@ -6,28 +6,7 @@ + * Copyright (C) 2014 ARM Ltd. + */ + +-#include +-#include +-#include +-#include +-#include + #include +-#include +-#include +-#include +- +-#ifdef CONFIG_ACPI +-/* +- * Try to assign the IRQ number when probing a new device +- */ +-int pcibios_alloc_irq(struct pci_dev *dev) +-{ +- if (!acpi_disabled) +- acpi_pci_irq_enable(dev); +- +- return 0; +-} +-#endif + + /* + * raw_pci_read/write - Platform-specific PCI config space access. +@@ -61,173 +40,3 @@ int pcibus_to_node(struct pci_bus *bus) + EXPORT_SYMBOL(pcibus_to_node); + + #endif +- +-#ifdef CONFIG_ACPI +- +-struct acpi_pci_generic_root_info { +- struct acpi_pci_root_info common; +- struct pci_config_window *cfg; /* config space mapping */ +-}; +- +-int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +-{ +- struct pci_config_window *cfg = bus->sysdata; +- struct acpi_device *adev = to_acpi_device(cfg->parent); +- struct acpi_pci_root *root = acpi_driver_data(adev); +- +- return root->segment; +-} +- +-int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +-{ +- struct pci_config_window *cfg; +- struct acpi_device *adev; +- struct device *bus_dev; +- +- if (acpi_disabled) +- return 0; +- +- cfg = bridge->bus->sysdata; +- +- /* +- * On Hyper-V there is no corresponding ACPI device for a root bridge, +- * therefore ->parent is set as NULL by the driver. And set 'adev' as +- * NULL in this case because there is no proper ACPI device. +- */ +- if (!cfg->parent) +- adev = NULL; +- else +- adev = to_acpi_device(cfg->parent); +- +- bus_dev = &bridge->bus->dev; +- +- ACPI_COMPANION_SET(&bridge->dev, adev); +- set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev))); +- +- return 0; +-} +- +-static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) +-{ +- struct resource_entry *entry, *tmp; +- int status; +- +- status = acpi_pci_probe_root_resources(ci); +- resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { +- if (!(entry->res->flags & IORESOURCE_WINDOW)) +- resource_list_destroy_entry(entry); +- } +- return status; +-} +- +-/* +- * Lookup the bus range for the domain in MCFG, and set up config space +- * mapping. +- */ +-static struct pci_config_window * +-pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) +-{ +- struct device *dev = &root->device->dev; +- struct resource *bus_res = &root->secondary; +- u16 seg = root->segment; +- const struct pci_ecam_ops *ecam_ops; +- struct resource cfgres; +- struct acpi_device *adev; +- struct pci_config_window *cfg; +- int ret; +- +- ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); +- if (ret) { +- dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); +- return NULL; +- } +- +- adev = acpi_resource_consumer(&cfgres); +- if (adev) +- dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres, +- dev_name(&adev->dev)); +- else +- dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n", +- &cfgres); +- +- cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); +- if (IS_ERR(cfg)) { +- dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, +- PTR_ERR(cfg)); +- return NULL; +- } +- +- return cfg; +-} +- +-/* release_info: free resources allocated by init_info */ +-static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) +-{ +- struct acpi_pci_generic_root_info *ri; +- +- ri = container_of(ci, struct acpi_pci_generic_root_info, common); +- pci_ecam_free(ri->cfg); +- kfree(ci->ops); +- kfree(ri); +-} +- +-/* Interface called from ACPI code to setup PCI host controller */ +-struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +-{ +- struct acpi_pci_generic_root_info *ri; +- struct pci_bus *bus, *child; +- struct acpi_pci_root_ops *root_ops; +- struct pci_host_bridge *host; +- +- ri = kzalloc(sizeof(*ri), GFP_KERNEL); +- if (!ri) +- return NULL; +- +- root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); +- if (!root_ops) { +- kfree(ri); +- return NULL; +- } +- +- ri->cfg = pci_acpi_setup_ecam_mapping(root); +- if (!ri->cfg) { +- kfree(ri); +- kfree(root_ops); +- return NULL; +- } +- +- root_ops->release_info = pci_acpi_generic_release_info; +- root_ops->prepare_resources = pci_acpi_root_prepare_resources; +- root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; +- bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); +- if (!bus) +- return NULL; +- +- /* If we must preserve the resource configuration, claim now */ +- host = pci_find_host_bridge(bus); +- if (host->preserve_config) +- pci_bus_claim_resources(bus); +- +- /* +- * Assign whatever was left unassigned. If we didn't claim above, +- * this will reassign everything. +- */ +- pci_assign_unassigned_root_bus_resources(bus); +- +- list_for_each_entry(child, &bus->children, node) +- pcie_bus_configure_settings(child); +- +- return bus; +-} +- +-void pcibios_add_bus(struct pci_bus *bus) +-{ +- acpi_pci_add_bus(bus); +-} +- +-void pcibios_remove_bus(struct pci_bus *bus) +-{ +- acpi_pci_remove_bus(bus); +-} +- +-#endif +diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig +index 53faa122b0f4..88182df75060 100644 +--- a/arch/ia64/Kconfig ++++ b/arch/ia64/Kconfig +@@ -16,7 +16,6 @@ config IA64 + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_MIGHT_HAVE_PC_SERIO + select ACPI +- select ACPI_NUMA if NUMA + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_SUPPORTS_ACPI +diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig +index 8739e15c137b..4ce4b491edcd 100644 +--- a/arch/loongarch/Kconfig ++++ b/arch/loongarch/Kconfig +@@ -468,7 +468,6 @@ config NR_CPUS + config NUMA + bool "NUMA Support" + select SMP +- select ACPI_NUMA if ACPI + help + Say Y to compile the kernel with NUMA (Non-Uniform Memory Access) + support. This option improves performance on systems with more diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index c9f9895f237d..a7b9c9e73593 100644 --- a/arch/loongarch/include/asm/pgalloc.h @@ -4525,11 +5466,22 @@ index 40e40a7eb94a..f4440edcd8fe 100644 pud_init(pud); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig -index 3be10e723b2c..7b9125dd1209 100644 +index 3be10e723b2c..81c26ac55153 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig -@@ -28,18 +28,22 @@ config RISCV +@@ -13,6 +13,7 @@ config 32BIT + config RISCV + def_bool y + select ACPI_GENERIC_GSI if ACPI ++ select ACPI_MCFG if (ACPI && PCI) + select ACPI_REDUCED_HARDWARE_ONLY if ACPI + select ARCH_DMA_DEFAULT_COHERENT + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION +@@ -26,20 +27,25 @@ config RISCV + select ARCH_HAS_FORTIFY_SOURCE + select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE ++ select ARCH_HAS_HW_PTE_YOUNG select ARCH_HAS_KCOV select ARCH_HAS_MEMBARRIER_CALLBACKS + select ARCH_HAS_MEMBARRIER_SYNC_CORE @@ -4551,7 +5503,7 @@ index 3be10e723b2c..7b9125dd1209 100644 select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_STACKWALK -@@ -64,7 +68,7 @@ config RISCV +@@ -64,7 +70,7 @@ config RISCV select CLINT_TIMER if !MMU select CLONE_BACKWARDS select COMMON_CLK @@ -4560,7 +5512,7 @@ index 3be10e723b2c..7b9125dd1209 100644 select EDAC_SUPPORT select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE) select GENERIC_ARCH_TOPOLOGY -@@ -119,6 +123,7 @@ config RISCV +@@ -119,6 +125,7 @@ config RISCV select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION select HAVE_EBPF_JIT if MMU @@ -4568,7 +5520,7 @@ index 3be10e723b2c..7b9125dd1209 100644 select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ERROR_INJECTION select HAVE_GCC_PLUGINS -@@ -147,6 +152,7 @@ config RISCV +@@ -147,14 +154,18 @@ config RISCV select IRQ_FORCED_THREADING select KASAN_VMALLOC if KASAN select LOCK_MM_AND_FIND_VMA @@ -4576,8 +5528,10 @@ index 3be10e723b2c..7b9125dd1209 100644 select MODULES_USE_ELF_RELA if MODULES select MODULE_SECTIONS if MODULES select OF -@@ -155,6 +161,8 @@ config RISCV + select OF_EARLY_FLATTREE + select OF_IRQ select PCI_DOMAINS_GENERIC if PCI ++ select PCI_ECAM if (ACPI && PCI) select PCI_MSI if PCI select RISCV_ALTERNATIVE if !XIP_KERNEL + select RISCV_APLIC @@ -4585,7 +5539,7 @@ index 3be10e723b2c..7b9125dd1209 100644 select RISCV_INTC select RISCV_TIMER if RISCV_SBI select SIFIVE_PLIC -@@ -223,6 +231,20 @@ config KASAN_SHADOW_OFFSET +@@ -223,6 +234,20 @@ config KASAN_SHADOW_OFFSET default 0xdfffffff00000000 if 64BIT default 0xffffffff if 32BIT @@ -4606,7 +5560,7 @@ index 3be10e723b2c..7b9125dd1209 100644 config ARCH_FLATMEM_ENABLE def_bool !NUMA -@@ -281,6 +303,7 @@ config RISCV_DMA_NONCOHERENT +@@ -281,6 +306,7 @@ config RISCV_DMA_NONCOHERENT select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select DMA_BOUNCE_UNALIGNED_KMALLOC if SWIOTLB @@ -4614,7 +5568,15 @@ index 3be10e723b2c..7b9125dd1209 100644 config RISCV_NONSTANDARD_CACHE_OPS bool -@@ -507,7 +530,7 @@ config RISCV_ISA_V +@@ -298,6 +324,7 @@ config AS_HAS_OPTION_ARCH + + source "arch/riscv/Kconfig.socs" + source "arch/riscv/Kconfig.errata" ++source "arch/riscv/Kconfig.vendor" + + menu "Platform type" + +@@ -507,7 +534,7 @@ config RISCV_ISA_V depends on TOOLCHAIN_HAS_V depends on FPU select DYNAMIC_SIGFRAME @@ -4623,7 +5585,75 @@ index 3be10e723b2c..7b9125dd1209 100644 help Say N here if you want to disable all vector related procedure in the kernel. -@@ -697,6 +720,20 @@ config ARCH_SUPPORTS_KEXEC_PURGATORY +@@ -525,6 +552,53 @@ config RISCV_ISA_V_DEFAULT_ENABLE + + If you don't know what to do here, say Y. + ++config RISCV_ISA_ZAWRS ++ bool "Zawrs extension support for more efficient busy waiting" ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ The Zawrs extension defines instructions to be used in polling loops ++ which allow a hart to enter a low-power state or to trap to the ++ hypervisor while waiting on a store to a memory location. Enable the ++ use of these instructions in the kernel when the Zawrs extension is ++ detected at boot. ++ ++config TOOLCHAIN_HAS_ZABHA ++ bool ++ default y ++ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zabha) ++ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zabha) ++ depends on AS_HAS_OPTION_ARCH ++ ++config RISCV_ISA_ZABHA ++ bool "Zabha extension support for atomic byte/halfword operations" ++ depends on TOOLCHAIN_HAS_ZABHA ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ Enable the use of the Zabha ISA-extension to implement kernel ++ byte/halfword atomic memory operations when it is detected at boot. ++ ++ If you don't know what to do here, say Y. ++ ++config TOOLCHAIN_HAS_ZACAS ++ bool ++ default y ++ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zacas) ++ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zacas) ++ depends on AS_HAS_OPTION_ARCH ++ ++config RISCV_ISA_ZACAS ++ bool "Zacas extension support for atomic CAS" ++ depends on TOOLCHAIN_HAS_ZACAS ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ Enable the use of the Zacas ISA-extension to implement kernel atomic ++ cmpxchg operations when it is detected at boot. ++ ++ If you don't know what to do here, say Y. ++ + config TOOLCHAIN_HAS_ZBB + bool + default y +@@ -579,13 +653,6 @@ config RISCV_ISA_ZICBOZ + + If you don't know what to do here, say Y. + +-config TOOLCHAIN_HAS_ZIHINTPAUSE +- bool +- default y +- depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zihintpause) +- depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause) +- depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600 +- + config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI + def_bool y + # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc +@@ -697,6 +764,20 @@ config ARCH_SUPPORTS_KEXEC_PURGATORY config ARCH_SUPPORTS_CRASH_DUMP def_bool y @@ -4716,11 +5746,36 @@ index 30fd6a512828..d468306a1a5c 100644 +endif + endmenu # "SoC selection" +diff --git a/arch/riscv/Kconfig.vendor b/arch/riscv/Kconfig.vendor +new file mode 100644 +index 000000000000..6f1cdd32ed29 +--- /dev/null ++++ b/arch/riscv/Kconfig.vendor +@@ -0,0 +1,19 @@ ++menu "Vendor extensions" ++ ++config RISCV_ISA_VENDOR_EXT ++ bool ++ ++menu "Andes" ++config RISCV_ISA_VENDOR_EXT_ANDES ++ bool "Andes vendor extension support" ++ select RISCV_ISA_VENDOR_EXT ++ default y ++ help ++ Say N here if you want to disable all Andes vendor extension ++ support. This will cause any Andes vendor extensions that are ++ requested by hardware probing to be ignored. ++ ++ If you don't know what to do here, say Y. ++endmenu ++ ++endmenu diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile -index b43a6bb7e4dc..c33a055a06f3 100644 +index b43a6bb7e4dc..30099b367479 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile -@@ -54,22 +54,7 @@ endif +@@ -54,22 +54,13 @@ endif endif endif @@ -4737,14 +5792,19 @@ index b43a6bb7e4dc..c33a055a06f3 100644 -else -riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei -endif -- ++include $(srctree)/arch/riscv/Makefile.isa ++ ++# Check if the toolchain supports Zacas ++riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZACAS) := $(riscv-march-y)_zacas + -# Check if the toolchain supports Zihintpause extension -riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause -+include $(srctree)/arch/riscv/Makefile.isa ++# Check if the toolchain supports Zabha ++riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZABHA) := $(riscv-march-y)_zabha # Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by # matching non-v and non-multi-letter extensions out with the filter ([^v_]*) -@@ -152,7 +137,7 @@ ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy) +@@ -152,7 +143,7 @@ ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy) KBUILD_IMAGE := $(boot)/loader.bin else ifeq ($(CONFIG_EFI_ZBOOT),) @@ -4755,10 +5815,10 @@ index b43a6bb7e4dc..c33a055a06f3 100644 endif diff --git a/arch/riscv/Makefile.isa b/arch/riscv/Makefile.isa new file mode 100644 -index 000000000000..322a83958b96 +index 000000000000..279f24f32763 --- /dev/null +++ b/arch/riscv/Makefile.isa -@@ -0,0 +1,18 @@ +@@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# ISA string setting @@ -4774,9 +5834,6 @@ index 000000000000..322a83958b96 +else +riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei +endif -+ -+# Check if the toolchain supports Zihintpause extension -+riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile index f60a280abb15..513e9f338637 100644 --- a/arch/riscv/boot/dts/Makefile @@ -23685,22 +24742,266 @@ index 000000000000..10d67d6ff963 +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_V4L_MEM2MEM_DRIVERS=y +diff --git a/arch/riscv/errata/andes/errata.c b/arch/riscv/errata/andes/errata.c +index 17a904869724..fc1a34faa5f3 100644 +--- a/arch/riscv/errata/andes/errata.c ++++ b/arch/riscv/errata/andes/errata.c +@@ -17,10 +17,11 @@ + #include + #include + #include ++#include + +-#define ANDESTECH_AX45MP_MARCHID 0x8000000000008a45UL +-#define ANDESTECH_AX45MP_MIMPID 0x500UL +-#define ANDESTECH_SBI_EXT_ANDES 0x0900031E ++#define ANDES_AX45MP_MARCHID 0x8000000000008a45UL ++#define ANDES_AX45MP_MIMPID 0x500UL ++#define ANDES_SBI_EXT_ANDES 0x0900031E + + #define ANDES_SBI_EXT_IOCP_SW_WORKAROUND 1 + +@@ -32,7 +33,7 @@ static long ax45mp_iocp_sw_workaround(void) + * ANDES_SBI_EXT_IOCP_SW_WORKAROUND SBI EXT checks if the IOCP is missing and + * cache is controllable only then CMO will be applied to the platform. + */ +- ret = sbi_ecall(ANDESTECH_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND, ++ ret = sbi_ecall(ANDES_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND, + 0, 0, 0, 0, 0, 0); + + return ret.error ? 0 : ret.value; +@@ -50,7 +51,7 @@ static void errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigne + + done = true; + +- if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID) ++ if (arch_id != ANDES_AX45MP_MARCHID || impid != ANDES_AX45MP_MIMPID) + return; + + if (!ax45mp_iocp_sw_workaround()) +@@ -65,6 +66,8 @@ void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct al + unsigned long archid, unsigned long impid, + unsigned int stage) + { ++ BUILD_BUG_ON(ERRATA_ANDES_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ + if (stage == RISCV_ALTERNATIVES_BOOT) + errata_probe_iocp(stage, archid, impid); + +diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c +index 3d9a32d791f7..b68b023115c2 100644 +--- a/arch/riscv/errata/sifive/errata.c ++++ b/arch/riscv/errata/sifive/errata.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + struct errata_info_t { + char name[32]; +@@ -91,6 +92,8 @@ void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + u32 cpu_apply_errata = 0; + u32 tmp; + ++ BUILD_BUG_ON(ERRATA_SIFIVE_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ + if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) + return; + +diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c +index 0554ed4bf087..928d8f7fe288 100644 +--- a/arch/riscv/errata/thead/errata.c ++++ b/arch/riscv/errata/thead/errata.c +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + + static bool errata_probe_pbmt(unsigned int stage, + unsigned long arch_id, unsigned long impid) +@@ -95,6 +96,8 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + u32 tmp; + void *oldptr, *altptr; + ++ BUILD_BUG_ON(ERRATA_THEAD_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ + for (alt = begin; alt < end; alt++) { + if (alt->vendor_id != THEAD_VENDOR_ID) + continue; +diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h +index d5604d2073bc..cff06b058fce 100644 +--- a/arch/riscv/include/asm/acpi.h ++++ b/arch/riscv/include/asm/acpi.h +@@ -61,11 +61,14 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { } + + void acpi_init_rintc_map(void); + struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu); +-u32 get_acpi_id_for_cpu(int cpu); ++static inline u32 get_acpi_id_for_cpu(int cpu) ++{ ++ return acpi_cpu_get_madt_rintc(cpu)->uid; ++} ++ + int acpi_get_riscv_isa(struct acpi_table_header *table, + unsigned int cpu, const char **isa); + +-static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } + #else + static inline void acpi_init_rintc_map(void) { } + static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu) +@@ -81,4 +84,12 @@ static inline int acpi_get_riscv_isa(struct acpi_table_header *table, + + #endif /* CONFIG_ACPI */ + ++#ifdef CONFIG_ACPI_NUMA ++int acpi_numa_get_nid(unsigned int cpu); ++void acpi_map_cpus_to_nodes(void); ++#else ++static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } ++static inline void acpi_map_cpus_to_nodes(void) { } ++#endif /* CONFIG_ACPI_NUMA */ ++ + #endif /*_ASM_ACPI_H*/ +diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h +index f5dfef6c2153..0e0522e588ca 100644 +--- a/arch/riscv/include/asm/atomic.h ++++ b/arch/riscv/include/asm/atomic.h +@@ -17,7 +17,6 @@ + #endif + + #include +-#include + + #define __atomic_acquire_fence() \ + __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") +@@ -207,7 +206,7 @@ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int + " add %[rc], %[p], %[a]\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : [a]"r" (a), [u]"r" (u) +@@ -228,7 +227,7 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, + " add %[rc], %[p], %[a]\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : [a]"r" (a), [u]"r" (u) +@@ -248,7 +247,7 @@ static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) + " addi %[rc], %[p], 1\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -268,7 +267,7 @@ static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v) + " addi %[rc], %[p], -1\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -288,7 +287,7 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v) + " bltz %[rc], 1f\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -310,7 +309,7 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v) + " addi %[rc], %[p], 1\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -331,7 +330,7 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v) + " addi %[rc], %[p], -1\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -352,7 +351,7 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) + " bltz %[rc], 1f\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h -index 110752594228..2b1f98b7e9bf 100644 +index 110752594228..feebe8e02ae0 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h -@@ -29,12 +29,22 @@ +@@ -11,13 +11,13 @@ + #define _ASM_RISCV_BARRIER_H + + #ifndef __ASSEMBLY__ ++#include ++#include + + #define nop() __asm__ __volatile__ ("nop") + #define __nops(n) ".rept " #n "\nnop\n.endr\n" + #define nops(n) __asm__ __volatile__ (__nops(n)) + +-#define RISCV_FENCE(p, s) \ +- __asm__ __volatile__ ("fence " #p "," #s : : : "memory") + + /* These barriers need to enforce ordering on both devices or memory. */ + #define mb() RISCV_FENCE(iorw,iorw) +@@ -29,21 +29,6 @@ #define __smp_rmb() RISCV_FENCE(r,r) #define __smp_wmb() RISCV_FENCE(w,w) +-#define __smp_store_release(p, v) \ +-do { \ +- compiletime_assert_atomic_type(*p); \ +- RISCV_FENCE(rw,w); \ +- WRITE_ONCE(*p, v); \ +-} while (0) +- +-#define __smp_load_acquire(p) \ +-({ \ +- typeof(*p) ___p1 = READ_ONCE(*p); \ +- compiletime_assert_atomic_type(*p); \ +- RISCV_FENCE(r,rw); \ +- ___p1; \ +-}) +- + /* + * This is a very specific barrier: it's currently only used in two places in + * the kernel, both in the scheduler. See include/linux/spinlock.h for the two +@@ -71,6 +56,45 @@ do { \ + */ + #define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) + +#ifdef CONFIG_ARCH_SOPHGO - #define __smp_store_release(p, v) \ - do { \ - compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(rw,w); \ - WRITE_ONCE(*p, v); \ ++#define __smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ RISCV_FENCE(rw,w); \ ++ WRITE_ONCE(*p, v); \ + RISCV_FENCE(w,rw); \ - } while (0) ++} while (0) +#else +#define __smp_store_release(p, v) \ +do { \ @@ -23709,28 +25010,604 @@ index 110752594228..2b1f98b7e9bf 100644 + WRITE_ONCE(*p, v); \ +} while (0) +#endif - - #define __smp_load_acquire(p) \ - ({ \ -@@ -44,6 +54,18 @@ do { \ - ___p1; \ - }) - -+#define smp_cond_load_acquire(ptr, cond_expr) ({ \ ++ ++#define __smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = READ_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ RISCV_FENCE(r, rw); \ ++ ___p1; \ ++}) ++ ++#ifdef CONFIG_RISCV_ISA_ZAWRS ++#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + for (;;) { \ -+ VAL = __smp_load_acquire(__PTR); \ ++ VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ -+ cpu_relax(); \ ++ __cmpwait_relaxed(ptr, VAL); \ + } \ + (typeof(*ptr))VAL; \ +}) ++#endif + - /* - * This is a very specific barrier: it's currently only used in two places in - * the kernel, both in the scheduler. See include/linux/spinlock.h for the two + #include + + #endif /* __ASSEMBLY__ */ +diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h +index 2f4726d3cfcc..1f4cd12e4664 100644 +--- a/arch/riscv/include/asm/cmpxchg.h ++++ b/arch/riscv/include/asm/cmpxchg.h +@@ -8,143 +8,87 @@ + + #include + +-#include ++#include + #include ++#include ++#include ++#include + +-#define __xchg_relaxed(ptr, new, size) \ ++#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ + ({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- " amoswap.w %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- " amoswap.d %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ ++ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ ++ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ ++ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ ++ << __s; \ ++ ulong __newx = (ulong)(n) << __s; \ ++ ulong __retx; \ ++ ulong __rc; \ ++ \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ "0: lr.w %0, %2\n" \ ++ " and %1, %0, %z4\n" \ ++ " or %1, %1, %z3\n" \ ++ " sc.w" sc_sfx " %1, %1, %2\n" \ ++ " bnez %1, 0b\n" \ ++ append \ ++ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ ++ : "rJ" (__newx), "rJ" (~__mask) \ ++ : "memory"); \ ++ \ ++ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + }) + +-#define arch_xchg_relaxed(ptr, x) \ ++#define __arch_xchg(sfx, prepend, append, r, p, n) \ + ({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __xchg_relaxed((ptr), \ +- _x_, sizeof(*(ptr))); \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ " amoswap" sfx " %0, %2, %1\n" \ ++ append \ ++ : "=r" (r), "+A" (*(p)) \ ++ : "r" (n) \ ++ : "memory"); \ + }) + +-#define __xchg_acquire(ptr, new, size) \ ++#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \ ++ sc_append, swap_append) \ + ({ \ + __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ ++ __typeof__(*(__ptr)) __new = (new); \ ++ __typeof__(*(__ptr)) __ret; \ ++ \ ++ switch (sizeof(*__ptr)) { \ ++ case 1: \ ++ case 2: \ ++ __arch_xchg_masked(sc_sfx, prepend, sc_append, \ ++ __ret, __ptr, __new); \ ++ break; \ + case 4: \ +- __asm__ __volatile__ ( \ +- " amoswap.w %0, %2, %1\n" \ +- RISCV_ACQUIRE_BARRIER \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ ++ __arch_xchg(".w" swap_sfx, prepend, swap_append, \ ++ __ret, __ptr, __new); \ + break; \ + case 8: \ +- __asm__ __volatile__ ( \ +- " amoswap.d %0, %2, %1\n" \ +- RISCV_ACQUIRE_BARRIER \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ ++ __arch_xchg(".d" swap_sfx, prepend, swap_append, \ ++ __ret, __ptr, __new); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +- __ret; \ ++ (__typeof__(*(__ptr)))__ret; \ + }) + +-#define arch_xchg_acquire(ptr, x) \ +-({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __xchg_acquire((ptr), \ +- _x_, sizeof(*(ptr))); \ +-}) ++#define arch_xchg_relaxed(ptr, x) \ ++ _arch_xchg(ptr, x, "", "", "", "", "") + +-#define __xchg_release(ptr, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- " amoswap.w %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- " amoswap.d %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) ++#define arch_xchg_acquire(ptr, x) \ ++ _arch_xchg(ptr, x, "", "", "", \ ++ RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER) + + #define arch_xchg_release(ptr, x) \ +-({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __xchg_release((ptr), \ +- _x_, sizeof(*(ptr))); \ +-}) +- +-#define __arch_xchg(ptr, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- " amoswap.w.aqrl %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- " amoswap.d.aqrl %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) ++ _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "") + + #define arch_xchg(ptr, x) \ +-({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \ +-}) ++ _arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "") + + #define xchg32(ptr, x) \ + ({ \ +@@ -163,190 +107,128 @@ + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ +-#define __cmpxchg_relaxed(ptr, old, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) + +-#define arch_cmpxchg_relaxed(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ ++#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \ ++({ \ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ ++ IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ ++ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA) && \ ++ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ ++ r = o; \ ++ \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ " amocas" cas_sfx " %0, %z2, %1\n" \ ++ append \ ++ : "+&r" (r), "+A" (*(p)) \ ++ : "rJ" (n) \ ++ : "memory"); \ ++ } else { \ ++ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ ++ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ ++ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ ++ << __s; \ ++ ulong __newx = (ulong)(n) << __s; \ ++ ulong __oldx = (ulong)(o) << __s; \ ++ ulong __retx; \ ++ ulong __rc; \ ++ \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ "0: lr.w %0, %2\n" \ ++ " and %1, %0, %z5\n" \ ++ " bne %1, %z3, 1f\n" \ ++ " and %1, %0, %z6\n" \ ++ " or %1, %1, %z4\n" \ ++ " sc.w" sc_sfx " %1, %1, %2\n" \ ++ " bnez %1, 0b\n" \ ++ append \ ++ "1:\n" \ ++ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ ++ : "rJ" ((long)__oldx), "rJ" (__newx), \ ++ "rJ" (__mask), "rJ" (~__mask) \ ++ : "memory"); \ ++ \ ++ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ ++ } \ + }) + +-#define __cmpxchg_acquire(ptr, old, new, size) \ ++#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ + ({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ +- case 4: \ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ ++ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ ++ r = o; \ ++ \ + __asm__ __volatile__ ( \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- RISCV_ACQUIRE_BARRIER \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ ++ prepend \ ++ " amocas" sc_cas_sfx " %0, %z2, %1\n" \ ++ append \ ++ : "+&r" (r), "+A" (*(p)) \ ++ : "rJ" (n) \ + : "memory"); \ +- break; \ +- case 8: \ ++ } else { \ ++ register unsigned int __rc; \ ++ \ + __asm__ __volatile__ ( \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d %1, %z4, %2\n" \ ++ prepend \ ++ "0: lr" lr_sfx " %0, %2\n" \ ++ " bne %0, %z3, 1f\n" \ ++ " sc" sc_cas_sfx " %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ +- RISCV_ACQUIRE_BARRIER \ ++ append \ + "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ ++ : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ ++ : "rJ" (co o), "rJ" (n) \ + : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ + } \ +- __ret; \ + }) + +-#define arch_cmpxchg_acquire(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ +-}) +- +-#define __cmpxchg_release(ptr, old, new, size) \ ++#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \ + ({ \ + __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ +- : "memory"); \ ++ __typeof__(*(__ptr)) __old = (old); \ ++ __typeof__(*(__ptr)) __new = (new); \ ++ __typeof__(*(__ptr)) __ret; \ ++ \ ++ switch (sizeof(*__ptr)) { \ ++ case 1: \ ++ __arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \ ++ prepend, append, \ ++ __ret, __ptr, __old, __new); \ + break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ +- : "memory"); \ ++ case 2: \ ++ __arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \ ++ prepend, append, \ ++ __ret, __ptr, __old, __new); \ + break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) +- +-#define arch_cmpxchg_release(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg_release((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ +-}) +- +-#define __cmpxchg(ptr, old, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ + case 4: \ +- __asm__ __volatile__ ( \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w.rl %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- " fence rw, rw\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ +- : "memory"); \ ++ __arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \ ++ __ret, __ptr, (long), __old, __new); \ + break; \ + case 8: \ +- __asm__ __volatile__ ( \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d.rl %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- " fence rw, rw\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ +- : "memory"); \ ++ __arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \ ++ __ret, __ptr, /**/, __old, __new); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +- __ret; \ ++ (__typeof__(*(__ptr)))__ret; \ + }) + ++#define arch_cmpxchg_relaxed(ptr, o, n) \ ++ _arch_cmpxchg((ptr), (o), (n), "", "", "") ++ ++#define arch_cmpxchg_acquire(ptr, o, n) \ ++ _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER) ++ ++#define arch_cmpxchg_release(ptr, o, n) \ ++ _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "") ++ + #define arch_cmpxchg(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ +-}) ++ _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n") + + #define arch_cmpxchg_local(ptr, o, n) \ +- (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) ++ arch_cmpxchg_relaxed((ptr), (o), (n)) + + #define arch_cmpxchg64(ptr, o, n) \ + ({ \ +@@ -360,4 +242,82 @@ + arch_cmpxchg_relaxed((ptr), (o), (n)); \ + }) + ++#define arch_cmpxchg64_relaxed(ptr, o, n) \ ++({ \ ++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ ++ arch_cmpxchg_relaxed((ptr), (o), (n)); \ ++}) ++ ++#define arch_cmpxchg64_acquire(ptr, o, n) \ ++({ \ ++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ ++ arch_cmpxchg_acquire((ptr), (o), (n)); \ ++}) ++ ++#define arch_cmpxchg64_release(ptr, o, n) \ ++({ \ ++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ ++ arch_cmpxchg_release((ptr), (o), (n)); \ ++}) ++ ++#ifdef CONFIG_RISCV_ISA_ZAWRS ++/* ++ * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to ++ * @val we expect it to still terminate within a "reasonable" amount of time ++ * for an implementation-specific other reason, a pending, locally-enabled ++ * interrupt, or because it has been configured to raise an illegal ++ * instruction exception. ++ */ ++static __always_inline void __cmpwait(volatile void *ptr, ++ unsigned long val, ++ int size) ++{ ++ unsigned long tmp; ++ ++ asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop", ++ 0, RISCV_ISA_EXT_ZAWRS, 1) ++ : : : : no_zawrs); ++ ++ switch (size) { ++ case 1: ++ fallthrough; ++ case 2: ++ /* RISC-V doesn't have lr instructions on byte and half-word. */ ++ goto no_zawrs; ++ case 4: ++ asm volatile( ++ " lr.w %0, %1\n" ++ " xor %0, %0, %2\n" ++ " bnez %0, 1f\n" ++ ZAWRS_WRS_NTO "\n" ++ "1:" ++ : "=&r" (tmp), "+A" (*(u32 *)ptr) ++ : "r" (val)); ++ break; ++#if __riscv_xlen == 64 ++ case 8: ++ asm volatile( ++ " lr.d %0, %1\n" ++ " xor %0, %0, %2\n" ++ " bnez %0, 1f\n" ++ ZAWRS_WRS_NTO "\n" ++ "1:" ++ : "=&r" (tmp), "+A" (*(u64 *)ptr) ++ : "r" (val)); ++ break; ++#endif ++ default: ++ BUILD_BUG(); ++ } ++ ++ return; ++ ++no_zawrs: ++ asm volatile(RISCV_PAUSE : : : "memory"); ++} ++ ++#define __cmpwait_relaxed(ptr, val) \ ++ __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr))) ++#endif ++ + #endif /* _ASM_RISCV_CMPXCHG_H */ diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h index 2ac955b51148..6b79287baecc 100644 --- a/arch/riscv/include/asm/compat.h @@ -23743,10 +25620,229 @@ index 2ac955b51148..6b79287baecc 100644 #include static inline int is_compat_task(void) +diff --git a/arch/riscv/include/asm/cpufeature-macros.h b/arch/riscv/include/asm/cpufeature-macros.h +new file mode 100644 +index 000000000000..a8103edbf51f +--- /dev/null ++++ b/arch/riscv/include/asm/cpufeature-macros.h +@@ -0,0 +1,66 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright 2022-2024 Rivos, Inc ++ */ ++ ++#ifndef _ASM_CPUFEATURE_MACROS_H ++#define _ASM_CPUFEATURE_MACROS_H ++ ++#include ++#include ++ ++#define STANDARD_EXT 0 ++ ++bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit); ++#define riscv_isa_extension_available(isa_bitmap, ext) \ ++ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) ++ ++static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1) ++ : ++ : [vendor] "i" (vendor), [ext] "i" (ext) ++ : ++ : l_no); ++ ++ return true; ++l_no: ++ return false; ++} ++ ++static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1) ++ : ++ : [vendor] "i" (vendor), [ext] "i" (ext) ++ : ++ : l_yes); ++ ++ return false; ++l_yes: ++ return true; ++} ++ ++static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_unlikely(STANDARD_EXT, ext); ++ ++ return __riscv_isa_extension_available(NULL, ext); ++} ++ ++static __always_inline bool riscv_has_extension_likely(const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_likely(STANDARD_EXT, ext); ++ ++ return __riscv_isa_extension_available(NULL, ext); ++} ++ ++#endif /* _ASM_CPUFEATURE_MACROS_H */ +diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h +index d0345bd659c9..c8346dc0bed8 100644 +--- a/arch/riscv/include/asm/cpufeature.h ++++ b/arch/riscv/include/asm/cpufeature.h +@@ -7,7 +7,12 @@ + #define _ASM_CPUFEATURE_H + + #include ++#include ++#include ++#include ++#include + #include ++#include + + /* + * These are probed via a device_initcall(), via either the SBI or directly +@@ -31,5 +36,69 @@ DECLARE_PER_CPU(long, misaligned_access_speed); + extern struct riscv_isainfo hart_isa[NR_CPUS]; + + void check_unaligned_access(int cpu); ++void __init riscv_user_isa_enable(void); ++ ++#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \ ++ .name = #_name, \ ++ .property = #_name, \ ++ .id = _id, \ ++ .subset_ext_ids = _subset_exts, \ ++ .subset_ext_size = _subset_exts_size, \ ++ .validate = _validate \ ++} ++ ++#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, NULL) ++ ++#define __RISCV_ISA_EXT_DATA_VALIDATE(_name, _id, _validate) \ ++ _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, _validate) ++ ++/* Used to declare pure "lasso" extension (Zk for instance) */ ++#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \ ++ _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \ ++ ARRAY_SIZE(_bundled_exts), NULL) ++ ++/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */ ++#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \ ++ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), NULL) ++#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ ++ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) ++ ++unsigned long riscv_get_elf_hwcap(void); ++ ++struct riscv_isa_ext_data { ++ const unsigned int id; ++ const char *name; ++ const char *property; ++ const unsigned int *subset_ext_ids; ++ const unsigned int subset_ext_size; ++ int (*validate)(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap); ++}; ++ ++extern const struct riscv_isa_ext_data riscv_isa_ext[]; ++extern const size_t riscv_isa_ext_count; ++extern bool riscv_isa_fallback; ++ ++unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); ++static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_likely(STANDARD_EXT, ext)) ++ return true; ++ ++ return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); ++} ++ ++static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_unlikely(STANDARD_EXT, ext)) ++ return true; ++ ++ return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); ++} + + #endif +diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h +index 777cb8299551..f5e928b3bff4 100644 +--- a/arch/riscv/include/asm/csr.h ++++ b/arch/riscv/include/asm/csr.h +@@ -194,6 +194,7 @@ + /* xENVCFG flags */ + #define ENVCFG_STCE (_AC(1, ULL) << 63) + #define ENVCFG_PBMTE (_AC(1, ULL) << 62) ++#define ENVCFG_ADUE (_AC(1, ULL) << 61) + #define ENVCFG_CBZE (_AC(1, UL) << 7) + #define ENVCFG_CBCFE (_AC(1, UL) << 6) + #define ENVCFG_CBIE_SHIFT 4 +@@ -275,6 +276,7 @@ + #define CSR_SIE 0x104 + #define CSR_STVEC 0x105 + #define CSR_SCOUNTEREN 0x106 ++#define CSR_SENVCFG 0x10a + #define CSR_SSCRATCH 0x140 + #define CSR_SEPC 0x141 + #define CSR_SCAUSE 0x142 +@@ -397,6 +399,7 @@ + # define CSR_STATUS CSR_MSTATUS + # define CSR_IE CSR_MIE + # define CSR_TVEC CSR_MTVEC ++# define CSR_ENVCFG CSR_MENVCFG + # define CSR_SCRATCH CSR_MSCRATCH + # define CSR_EPC CSR_MEPC + # define CSR_CAUSE CSR_MCAUSE +@@ -421,6 +424,7 @@ + # define CSR_STATUS CSR_SSTATUS + # define CSR_IE CSR_SIE + # define CSR_TVEC CSR_STVEC ++# define CSR_ENVCFG CSR_SENVCFG + # define CSR_SCRATCH CSR_SSCRATCH + # define CSR_EPC CSR_SEPC + # define CSR_CAUSE CSR_SCAUSE +diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h +index b3b2dfbdf945..06c236bfab53 100644 +--- a/arch/riscv/include/asm/elf.h ++++ b/arch/riscv/include/asm/elf.h +@@ -14,7 +14,7 @@ + #include + #include + #include +-#include ++#include + + /* + * These are used to set parameters in the core dumps. diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h -index d3f3c237adad..c7fd111e3a50 100644 +index d3f3c237adad..5469341b60ce 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h +@@ -12,8 +12,8 @@ + #include + + #ifdef CONFIG_ERRATA_ANDES +-#define ERRATA_ANDESTECH_NO_IOCP 0 +-#define ERRATA_ANDESTECH_NUMBER 1 ++#define ERRATA_ANDES_NO_IOCP 0 ++#define ERRATA_ANDES_NUMBER 1 + #endif + + #ifdef CONFIG_ERRATA_SIFIVE @@ -128,9 +128,12 @@ asm volatile(ALTERNATIVE( \ * 0000000 11001 00000 000 00000 0001011 */ @@ -23761,7 +25857,7 @@ index d3f3c237adad..c7fd111e3a50 100644 #define ALT_CMO_OP(_op, _start, _size, _cachesize) \ asm volatile(ALTERNATIVE_2( \ -@@ -157,6 +160,33 @@ asm volatile(ALTERNATIVE_2( \ +@@ -157,18 +160,36 @@ asm volatile(ALTERNATIVE_2( \ "r"((unsigned long)(_start) + (_size)) \ : "a0") @@ -23795,22 +25891,290 @@ index d3f3c237adad..c7fd111e3a50 100644 #define THEAD_C9XX_RV_IRQ_PMU 17 #define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5 +-#define ALT_SBI_PMU_OVERFLOW(__ovl) \ +-asm volatile(ALTERNATIVE( \ +- "csrr %0, " __stringify(CSR_SSCOUNTOVF), \ +- "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \ +- THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \ +- CONFIG_ERRATA_THEAD_PMU) \ +- : "=r" (__ovl) : \ +- : "memory") +- + #endif /* __ASSEMBLY__ */ + + #endif +diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h +index 2b443a3a487f..6bcd80325dfc 100644 +--- a/arch/riscv/include/asm/fence.h ++++ b/arch/riscv/include/asm/fence.h +@@ -1,12 +1,18 @@ + #ifndef _ASM_RISCV_FENCE_H + #define _ASM_RISCV_FENCE_H + ++#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n" ++#define RISCV_FENCE(p, s) \ ++ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); }) ++ + #ifdef CONFIG_SMP +-#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n" +-#define RISCV_RELEASE_BARRIER "\tfence rw, w\n" ++#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw) ++#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w) ++#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw) + #else + #define RISCV_ACQUIRE_BARRIER + #define RISCV_RELEASE_BARRIER ++#define RISCV_FULL_BARRIER + #endif + + #endif /* _ASM_RISCV_FENCE_H */ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h -index f4157034efa9..5e8a89b38091 100644 +index f4157034efa9..869da082252a 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h -@@ -58,6 +58,7 @@ +@@ -8,25 +8,16 @@ + #ifndef _ASM_RISCV_HWCAP_H + #define _ASM_RISCV_HWCAP_H + +-#include +-#include +-#include + #include + + #define RISCV_ISA_EXT_a ('a' - 'a') +-#define RISCV_ISA_EXT_b ('b' - 'a') + #define RISCV_ISA_EXT_c ('c' - 'a') + #define RISCV_ISA_EXT_d ('d' - 'a') + #define RISCV_ISA_EXT_f ('f' - 'a') + #define RISCV_ISA_EXT_h ('h' - 'a') + #define RISCV_ISA_EXT_i ('i' - 'a') +-#define RISCV_ISA_EXT_j ('j' - 'a') +-#define RISCV_ISA_EXT_k ('k' - 'a') + #define RISCV_ISA_EXT_m ('m' - 'a') +-#define RISCV_ISA_EXT_p ('p' - 'a') + #define RISCV_ISA_EXT_q ('q' - 'a') +-#define RISCV_ISA_EXT_s ('s' - 'a') +-#define RISCV_ISA_EXT_u ('u' - 'a') + #define RISCV_ISA_EXT_v ('v' - 'a') + + /* +@@ -58,85 +49,69 @@ #define RISCV_ISA_EXT_ZICSR 40 #define RISCV_ISA_EXT_ZIFENCEI 41 #define RISCV_ISA_EXT_ZIHPM 42 +- +-#define RISCV_ISA_EXT_MAX 64 +#define RISCV_ISA_EXT_SMSTATEEN 43 ++#define RISCV_ISA_EXT_ZICOND 44 ++#define RISCV_ISA_EXT_ZBC 45 ++#define RISCV_ISA_EXT_ZBKB 46 ++#define RISCV_ISA_EXT_ZBKC 47 ++#define RISCV_ISA_EXT_ZBKX 48 ++#define RISCV_ISA_EXT_ZKND 49 ++#define RISCV_ISA_EXT_ZKNE 50 ++#define RISCV_ISA_EXT_ZKNH 51 ++#define RISCV_ISA_EXT_ZKR 52 ++#define RISCV_ISA_EXT_ZKSED 53 ++#define RISCV_ISA_EXT_ZKSH 54 ++#define RISCV_ISA_EXT_ZKT 55 ++#define RISCV_ISA_EXT_ZVBB 56 ++#define RISCV_ISA_EXT_ZVBC 57 ++#define RISCV_ISA_EXT_ZVKB 58 ++#define RISCV_ISA_EXT_ZVKG 59 ++#define RISCV_ISA_EXT_ZVKNED 60 ++#define RISCV_ISA_EXT_ZVKNHA 61 ++#define RISCV_ISA_EXT_ZVKNHB 62 ++#define RISCV_ISA_EXT_ZVKSED 63 ++#define RISCV_ISA_EXT_ZVKSH 64 ++#define RISCV_ISA_EXT_ZVKT 65 ++#define RISCV_ISA_EXT_ZFH 66 ++#define RISCV_ISA_EXT_ZFHMIN 67 ++#define RISCV_ISA_EXT_ZIHINTNTL 68 ++#define RISCV_ISA_EXT_ZVFH 69 ++#define RISCV_ISA_EXT_ZVFHMIN 70 ++#define RISCV_ISA_EXT_ZFA 71 ++#define RISCV_ISA_EXT_ZTSO 72 ++#define RISCV_ISA_EXT_ZACAS 73 ++#define RISCV_ISA_EXT_ZVE32X 74 ++#define RISCV_ISA_EXT_ZVE32F 75 ++#define RISCV_ISA_EXT_ZVE64X 76 ++#define RISCV_ISA_EXT_ZVE64F 77 ++#define RISCV_ISA_EXT_ZVE64D 78 ++#define RISCV_ISA_EXT_ZIMOP 79 ++#define RISCV_ISA_EXT_ZCA 80 ++#define RISCV_ISA_EXT_ZCB 81 ++#define RISCV_ISA_EXT_ZCD 82 ++#define RISCV_ISA_EXT_ZCF 83 ++#define RISCV_ISA_EXT_ZCMOP 84 ++#define RISCV_ISA_EXT_ZAWRS 85 ++#define RISCV_ISA_EXT_SVVPTC 86 ++#define RISCV_ISA_EXT_SMMPM 87 ++#define RISCV_ISA_EXT_SMNPM 88 ++#define RISCV_ISA_EXT_SSNPM 89 ++#define RISCV_ISA_EXT_ZABHA 90 ++#define RISCV_ISA_EXT_ZICCRSE 91 ++#define RISCV_ISA_EXT_SVADE 92 ++#define RISCV_ISA_EXT_SVADU 93 ++ ++#define RISCV_ISA_EXT_XLINUXENVCFG 127 ++ ++#define RISCV_ISA_EXT_MAX 128 ++#define RISCV_ISA_EXT_INVALID U32_MAX + + #ifdef CONFIG_RISCV_M_MODE + #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA ++#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SMNPM + #else + #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA +-#endif +- +-#ifndef __ASSEMBLY__ +- +-#include +- +-unsigned long riscv_get_elf_hwcap(void); +- +-struct riscv_isa_ext_data { +- const unsigned int id; +- const char *name; +- const char *property; +-}; +- +-extern const struct riscv_isa_ext_data riscv_isa_ext[]; +-extern const size_t riscv_isa_ext_count; +-extern bool riscv_isa_fallback; +- +-unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); +- +-#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext) +- +-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); +-#define riscv_isa_extension_available(isa_bitmap, ext) \ +- __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) +- +-static __always_inline bool +-riscv_has_extension_likely(const unsigned long ext) +-{ +- compiletime_assert(ext < RISCV_ISA_EXT_MAX, +- "ext must be < RISCV_ISA_EXT_MAX"); +- +- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { +- asm goto( +- ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) +- : +- : [ext] "i" (ext) +- : +- : l_no); +- } else { +- if (!__riscv_isa_extension_available(NULL, ext)) +- goto l_no; +- } +- +- return true; +-l_no: +- return false; +-} +- +-static __always_inline bool +-riscv_has_extension_unlikely(const unsigned long ext) +-{ +- compiletime_assert(ext < RISCV_ISA_EXT_MAX, +- "ext must be < RISCV_ISA_EXT_MAX"); +- +- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { +- asm goto( +- ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) +- : +- : [ext] "i" (ext) +- : +- : l_yes); +- } else { +- if (__riscv_isa_extension_available(NULL, ext)) +- goto l_yes; +- } +- +- return false; +-l_yes: +- return true; +-} +- ++#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SSNPM + #endif + + #endif /* _ASM_RISCV_HWCAP_H */ +diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h +index 7cad513538d8..ef01c182af2b 100644 +--- a/arch/riscv/include/asm/hwprobe.h ++++ b/arch/riscv/include/asm/hwprobe.h +@@ -8,11 +8,35 @@ + + #include + +-#define RISCV_HWPROBE_MAX_KEY 5 ++#define RISCV_HWPROBE_MAX_KEY 8 + + static inline bool riscv_hwprobe_key_is_valid(__s64 key) + { + return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY; + } - #define RISCV_ISA_EXT_MAX 64 ++static inline bool hwprobe_key_is_bitmask(__s64 key) ++{ ++ switch (key) { ++ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: ++ case RISCV_HWPROBE_KEY_IMA_EXT_0: ++ case RISCV_HWPROBE_KEY_CPUPERF_0: ++ return true; ++ } ++ ++ return false; ++} ++ ++static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair, ++ struct riscv_hwprobe *other_pair) ++{ ++ if (pair->key != other_pair->key) ++ return false; ++ ++ if (hwprobe_key_is_bitmask(pair->key)) ++ return (pair->value & other_pair->value) == other_pair->value; ++ ++ return pair->value == other_pair->value; ++} ++ + #endif +diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h +index 6960beb75f32..cbd51bfdf527 100644 +--- a/arch/riscv/include/asm/insn-def.h ++++ b/arch/riscv/include/asm/insn-def.h +@@ -196,4 +196,8 @@ + INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ + RS1(base), SIMM12(4)) ++#define RISCV_PAUSE ".4byte 0x100000f" ++#define ZAWRS_WRS_NTO ".4byte 0x00d00073" ++#define ZAWRS_WRS_STO ".4byte 0x01d00073" ++ + #endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h -index 42497d487a17..bbdc3c7ed6ca 100644 +index 42497d487a17..8118363494e0 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h +@@ -47,10 +47,10 @@ + * sufficient to ensure this works sanely on controllers that support I/O + * writes. + */ +-#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory"); +-#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory"); +-#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory"); +-#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory"); ++#define __io_pbr() RISCV_FENCE(io, i) ++#define __io_par(v) RISCV_FENCE(i, ior) ++#define __io_pbw() RISCV_FENCE(iow, o) ++#define __io_paw() RISCV_FENCE(o, io) + + /* + * Accesses from a single hart to a single I/O address must be ordered. This @@ -140,4 +140,8 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) #endif @@ -23820,6 +26184,74 @@ index 42497d487a17..bbdc3c7ed6ca 100644 + ioremap_prot((addr), (size), _PAGE_IOREMAP_WC) + #endif /* _ASM_RISCV_IO_H */ +diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h +index 8e10a94430a2..7e9a84a005ed 100644 +--- a/arch/riscv/include/asm/irq.h ++++ b/arch/riscv/include/asm/irq.h +@@ -12,8 +12,63 @@ + + #include + ++#define INVALID_CONTEXT UINT_MAX ++ + void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void)); + + struct fwnode_handle *riscv_get_intc_hwnode(void); + ++#ifdef CONFIG_ACPI ++ ++enum riscv_irqchip_type { ++ ACPI_RISCV_IRQCHIP_INTC = 0x00, ++ ACPI_RISCV_IRQCHIP_IMSIC = 0x01, ++ ACPI_RISCV_IRQCHIP_PLIC = 0x02, ++ ACPI_RISCV_IRQCHIP_APLIC = 0x03, ++}; ++ ++int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, ++ u32 *id, u32 *nr_irqs, u32 *nr_idcs); ++struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi); ++unsigned long acpi_rintc_index_to_hartid(u32 index); ++unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx); ++unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id); ++unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx); ++int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res); ++ ++#else ++static inline int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, ++ u32 *id, u32 *nr_irqs, u32 *nr_idcs) ++{ ++ return 0; ++} ++ ++static inline unsigned long acpi_rintc_index_to_hartid(u32 index) ++{ ++ return INVALID_HARTID; ++} ++ ++static inline unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, ++ unsigned int ctxt_idx) ++{ ++ return INVALID_HARTID; ++} ++ ++static inline unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id) ++{ ++ return INVALID_CONTEXT; ++} ++ ++static inline unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) ++{ ++ return INVALID_CONTEXT; ++} ++ ++static inline int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) ++{ ++ return 0; ++} ++ ++#endif /* CONFIG_ACPI */ ++ + #endif /* _ASM_RISCV_IRQ_H */ diff --git a/arch/riscv/include/asm/kvm_aia_aplic.h b/arch/riscv/include/asm/kvm_aia_aplic.h deleted file mode 100644 index 6dd1a4809ec1..000000000000 @@ -23958,6 +26390,42 @@ index 6c016ebb5020..47b240d0d596 100644 * Matches a full barrier in the proximity of the membarrier * system call entry. */ +diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h +index 4c58ee7f95ec..06cadfd7a237 100644 +--- a/arch/riscv/include/asm/mmio.h ++++ b/arch/riscv/include/asm/mmio.h +@@ -12,6 +12,7 @@ + #define _ASM_RISCV_MMIO_H + + #include ++#include + #include + + /* Generic IO read/write. These perform native-endian accesses. */ +@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) + * doesn't define any ordering between the memory space and the I/O space. + */ + #define __io_br() do {} while (0) +-#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) +-#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) ++#define __io_ar(v) RISCV_FENCE(i, ir) ++#define __io_bw() RISCV_FENCE(w, o) + #define __io_aw() mmiowb_set_pending() + + #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) +diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h +index 0b2333e71fdc..52ce4a399d9b 100644 +--- a/arch/riscv/include/asm/mmiowb.h ++++ b/arch/riscv/include/asm/mmiowb.h +@@ -7,7 +7,7 @@ + * "o,w" is sufficient to ensure that all writes to the device have completed + * before the write to the spinlock is allowed to commit. + */ +-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory"); ++#define mmiowb() RISCV_FENCE(o, w) + + #include + #include diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index d169a4f41a2e..deaf971253a2 100644 --- a/arch/riscv/include/asm/pgalloc.h @@ -24068,10 +26536,18 @@ index 3272ca7a5270..b99bd66107a6 100644 static inline u64 riscv_page_mtmask(void) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h -index e58315cedfd3..d094015802ef 100644 +index e58315cedfd3..e93155c2c200 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h -@@ -205,7 +205,8 @@ extern struct pt_alloc_ops pt_ops __initdata; +@@ -117,6 +117,7 @@ + #include + #include + #include ++#include + + #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) + +@@ -205,7 +206,8 @@ extern struct pt_alloc_ops pt_ops __initdata; #define PAGE_TABLE __pgprot(_PAGE_TABLE) @@ -24081,7 +26557,25 @@ index e58315cedfd3..d094015802ef 100644 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) extern pgd_t swapper_pg_dir[]; -@@ -663,6 +664,12 @@ static inline int pmd_write(pmd_t pmd) +@@ -620,6 +622,17 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) + return __pgprot(prot); + } + ++/* ++ * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By ++ * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in ++ * DT. ++ */ ++#define arch_has_hw_pte_young arch_has_hw_pte_young ++static inline bool arch_has_hw_pte_young(void) ++{ ++ return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU); ++} ++ + /* + * THP functions + */ +@@ -663,6 +676,12 @@ static inline int pmd_write(pmd_t pmd) return pte_write(pmd_pte(pmd)); } @@ -24094,6 +26588,23 @@ index e58315cedfd3..d094015802ef 100644 static inline int pmd_dirty(pmd_t pmd) { return pte_dirty(pmd_pte(pmd)); +diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h +index 4f6af8c6cfa0..0de0e2e29a82 100644 +--- a/arch/riscv/include/asm/processor.h ++++ b/arch/riscv/include/asm/processor.h +@@ -57,6 +57,12 @@ + + #define STACK_TOP DEFAULT_MAP_WINDOW + ++#ifdef CONFIG_MMU ++#define user_max_virt_addr() arch_get_mmap_end(ULONG_MAX, 0, 0) ++#else ++#define user_max_virt_addr() 0 ++#endif /* CONFIG_MMU */ ++ + /* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 3ed853b8a8c8..8d261a317175 100644 --- a/arch/riscv/include/asm/sbi.h @@ -24134,10 +26645,40 @@ index 2f901a410586..87ab782be702 100644 #else #define MAX_PHYSMEM_BITS 32 #endif /* CONFIG_64BIT */ +diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h +index 02f87867389a..4ffb022b097f 100644 +--- a/arch/riscv/include/asm/suspend.h ++++ b/arch/riscv/include/asm/suspend.h +@@ -13,7 +13,7 @@ struct suspend_context { + /* Saved and restored by low-level functions */ + struct pt_regs regs; + /* Saved and restored by high-level functions */ +- unsigned long scratch; ++ unsigned long envcfg; + unsigned long tvec; + unsigned long ie; + #ifdef CONFIG_MMU +@@ -55,4 +55,7 @@ int hibernate_resume_nonboot_cpu_disable(void); + asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp, + unsigned long cpu_resume); + asmlinkage int hibernate_core_restore_code(void); ++bool riscv_sbi_hsm_is_supported(void); ++bool riscv_sbi_suspend_state_is_valid(u32 state); ++int riscv_sbi_hart_suspend(u32 state); + #endif diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h -index a727be723c56..1da3f54d52f0 100644 +index a727be723c56..7508f3ec8063 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h +@@ -9,7 +9,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include @@ -63,6 +63,21 @@ static __always_inline bool has_fpu(void) return riscv_has_extension_likely(RISCV_ISA_EXT_f) || riscv_has_extension_likely(RISCV_ISA_EXT_d); @@ -24224,18 +26765,1334 @@ index 50b63b5c15bd..1f6c38420d8e 100644 #define tlb_flush tlb_flush #include +diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h +index 96b65a5396df..8f383f05a290 100644 +--- a/arch/riscv/include/asm/vdso/processor.h ++++ b/arch/riscv/include/asm/vdso/processor.h +@@ -5,6 +5,7 @@ + #ifndef __ASSEMBLY__ + + #include ++#include + + static inline void cpu_relax(void) + { +@@ -14,16 +15,11 @@ static inline void cpu_relax(void) + __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); + #endif + +-#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE + /* + * Reduce instruction retirement. + * This assumes the PC changes. + */ +- __asm__ __volatile__ ("pause"); +-#else +- /* Encoding of the pause instruction */ +- __asm__ __volatile__ (".4byte 0x100000F"); +-#endif ++ __asm__ __volatile__ (RISCV_PAUSE); + barrier(); + } + +diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h +index c5ee07b3df07..be77ae870829 100644 +--- a/arch/riscv/include/asm/vector.h ++++ b/arch/riscv/include/asm/vector.h +@@ -15,7 +15,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +@@ -25,7 +25,7 @@ bool riscv_v_first_use_handler(struct pt_regs *regs); + + static __always_inline bool has_vector(void) + { +- return riscv_has_extension_unlikely(RISCV_ISA_EXT_v); ++ return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X); + } + + static inline void __riscv_v_vstate_clean(struct pt_regs *regs) +@@ -79,7 +79,7 @@ static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src + { + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvl x0, %2, %1\n\t" + ".option pop\n\t" + "csrw " __stringify(CSR_VSTART) ", %0\n\t" +@@ -97,7 +97,7 @@ static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to, + __vstate_csr_save(save_to); + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vse8.v v0, (%1)\n\t" + "add %1, %1, %0\n\t" +@@ -119,7 +119,7 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_ + riscv_v_enable(); + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vle8.v v0, (%1)\n\t" + "add %1, %1, %0\n\t" +@@ -141,7 +141,7 @@ static inline void __riscv_v_vstate_discard(void) + riscv_v_enable(); + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vmv.v.i v0, -1\n\t" + "vmv.v.i v8, -1\n\t" +diff --git a/arch/riscv/include/asm/vendor_extensions.h b/arch/riscv/include/asm/vendor_extensions.h +new file mode 100644 +index 000000000000..0517ce38c5be +--- /dev/null ++++ b/arch/riscv/include/asm/vendor_extensions.h +@@ -0,0 +1,103 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright 2024 Rivos, Inc ++ */ ++ ++#ifndef _ASM_VENDOR_EXTENSIONS_H ++#define _ASM_VENDOR_EXTENSIONS_H ++ ++#include ++ ++#include ++ ++/* ++ * The extension keys of each vendor must be strictly less than this value. ++ */ ++#define RISCV_ISA_VENDOR_EXT_MAX 32 ++ ++struct riscv_isavendorinfo { ++ DECLARE_BITMAP(isa, RISCV_ISA_VENDOR_EXT_MAX); ++}; ++ ++struct riscv_isa_vendor_ext_data_list { ++ bool is_initialized; ++ const size_t ext_data_count; ++ const struct riscv_isa_ext_data *ext_data; ++ struct riscv_isavendorinfo per_hart_isa_bitmap[NR_CPUS]; ++ struct riscv_isavendorinfo all_harts_isa_bitmap; ++}; ++ ++extern struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[]; ++ ++extern const size_t riscv_isa_vendor_ext_list_size; ++ ++/* ++ * The alternatives need some way of distinguishing between vendor extensions ++ * and errata. Incrementing all of the vendor extension keys so they are at ++ * least 0x8000 accomplishes that. ++ */ ++#define RISCV_VENDOR_EXT_ALTERNATIVES_BASE 0x8000 ++ ++#define VENDOR_EXT_ALL_CPUS -1 ++ ++bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit); ++#define riscv_cpu_isa_vendor_extension_available(cpu, vendor, ext) \ ++ __riscv_isa_vendor_extension_available(cpu, vendor, RISCV_ISA_VENDOR_EXT_##ext) ++#define riscv_isa_vendor_extension_available(vendor, ext) \ ++ __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, \ ++ RISCV_ISA_VENDOR_EXT_##ext) ++ ++static __always_inline bool riscv_has_vendor_extension_likely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_likely(vendor, ++ ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ ++ return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext); ++} ++ ++static __always_inline bool riscv_has_vendor_extension_unlikely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_unlikely(vendor, ++ ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ ++ return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext); ++} ++ ++static __always_inline bool riscv_cpu_has_vendor_extension_likely(const unsigned long vendor, ++ int cpu, const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_likely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) ++ return true; ++ ++ return __riscv_isa_vendor_extension_available(cpu, vendor, ext); ++} ++ ++static __always_inline bool riscv_cpu_has_vendor_extension_unlikely(const unsigned long vendor, ++ int cpu, ++ const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_unlikely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) ++ return true; ++ ++ return __riscv_isa_vendor_extension_available(cpu, vendor, ext); ++} ++ ++#endif /* _ASM_VENDOR_EXTENSIONS_H */ +diff --git a/arch/riscv/include/asm/vendor_extensions/andes.h b/arch/riscv/include/asm/vendor_extensions/andes.h +new file mode 100644 +index 000000000000..7bb2fc43438f +--- /dev/null ++++ b/arch/riscv/include/asm/vendor_extensions/andes.h +@@ -0,0 +1,19 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H ++#define _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H ++ ++#include ++ ++#include ++ ++#define RISCV_ISA_VENDOR_EXT_XANDESPMU 0 ++ ++/* ++ * Extension keys should be strictly less than max. ++ * It is safe to increment this when necessary. ++ */ ++#define RISCV_ISA_VENDOR_EXT_MAX_ANDES 32 ++ ++extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_andes; ++ ++#endif +diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h +index e55407ace0c3..2f2bb0c84f9a 100644 +--- a/arch/riscv/include/asm/vendorid_list.h ++++ b/arch/riscv/include/asm/vendorid_list.h +@@ -5,7 +5,7 @@ + #ifndef ASM_VENDOR_LIST_H + #define ASM_VENDOR_LIST_H + +-#define ANDESTECH_VENDOR_ID 0x31e ++#define ANDES_VENDOR_ID 0x31e + #define SIFIVE_VENDOR_ID 0x489 + #define THEAD_VENDOR_ID 0x5b7 + +diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h +index 006bfb48343d..6fdaefa62e14 100644 +--- a/arch/riscv/include/uapi/asm/hwprobe.h ++++ b/arch/riscv/include/uapi/asm/hwprobe.h +@@ -10,7 +10,7 @@ + + /* + * Interface for probing hardware capabilities from userspace, see +- * Documentation/riscv/hwprobe.rst for more information. ++ * Documentation/arch/riscv/hwprobe.rst for more information. + */ + struct riscv_hwprobe { + __s64 key; +@@ -29,6 +29,50 @@ struct riscv_hwprobe { + #define RISCV_HWPROBE_EXT_ZBA (1 << 3) + #define RISCV_HWPROBE_EXT_ZBB (1 << 4) + #define RISCV_HWPROBE_EXT_ZBS (1 << 5) ++#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6) ++#define RISCV_HWPROBE_EXT_ZBC (1 << 7) ++#define RISCV_HWPROBE_EXT_ZBKB (1 << 8) ++#define RISCV_HWPROBE_EXT_ZBKC (1 << 9) ++#define RISCV_HWPROBE_EXT_ZBKX (1 << 10) ++#define RISCV_HWPROBE_EXT_ZKND (1 << 11) ++#define RISCV_HWPROBE_EXT_ZKNE (1 << 12) ++#define RISCV_HWPROBE_EXT_ZKNH (1 << 13) ++#define RISCV_HWPROBE_EXT_ZKSED (1 << 14) ++#define RISCV_HWPROBE_EXT_ZKSH (1 << 15) ++#define RISCV_HWPROBE_EXT_ZKT (1 << 16) ++#define RISCV_HWPROBE_EXT_ZVBB (1 << 17) ++#define RISCV_HWPROBE_EXT_ZVBC (1 << 18) ++#define RISCV_HWPROBE_EXT_ZVKB (1 << 19) ++#define RISCV_HWPROBE_EXT_ZVKG (1 << 20) ++#define RISCV_HWPROBE_EXT_ZVKNED (1 << 21) ++#define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22) ++#define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23) ++#define RISCV_HWPROBE_EXT_ZVKSED (1 << 24) ++#define RISCV_HWPROBE_EXT_ZVKSH (1 << 25) ++#define RISCV_HWPROBE_EXT_ZVKT (1 << 26) ++#define RISCV_HWPROBE_EXT_ZFH (1 << 27) ++#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28) ++#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29) ++#define RISCV_HWPROBE_EXT_ZVFH (1 << 30) ++#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31) ++#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32) ++#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) ++#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34) ++#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35) ++#define RISCV_HWPROBE_EXT_ZIHINTPAUSE (1ULL << 36) ++#define RISCV_HWPROBE_EXT_ZVE32X (1ULL << 37) ++#define RISCV_HWPROBE_EXT_ZVE32F (1ULL << 38) ++#define RISCV_HWPROBE_EXT_ZVE64X (1ULL << 39) ++#define RISCV_HWPROBE_EXT_ZVE64F (1ULL << 40) ++#define RISCV_HWPROBE_EXT_ZVE64D (1ULL << 41) ++#define RISCV_HWPROBE_EXT_ZIMOP (1ULL << 42) ++#define RISCV_HWPROBE_EXT_ZCA (1ULL << 43) ++#define RISCV_HWPROBE_EXT_ZCB (1ULL << 44) ++#define RISCV_HWPROBE_EXT_ZCD (1ULL << 45) ++#define RISCV_HWPROBE_EXT_ZCF (1ULL << 46) ++#define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47) ++#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) ++#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49) + #define RISCV_HWPROBE_KEY_CPUPERF_0 5 + #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) + #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) +@@ -36,6 +80,12 @@ struct riscv_hwprobe { + #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0) + #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0) + #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0) ++#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6 ++#define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7 ++#define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8 + /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ + ++/* Flags */ ++#define RISCV_HWPROBE_WHICH_CPUS (1 << 0) ++ + #endif +diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile +index a2499fcc1cf3..70d04e1875f0 100644 +--- a/arch/riscv/kernel/Makefile ++++ b/arch/riscv/kernel/Makefile +@@ -52,12 +52,15 @@ obj-y += setup.o + obj-y += signal.o + obj-y += syscall_table.o + obj-y += sys_riscv.o ++obj-y += sys_hwprobe.o + obj-y += time.o + obj-y += traps.o + obj-y += riscv_ksyms.o + obj-y += stacktrace.o + obj-y += cacheinfo.o + obj-y += patch.o ++obj-y += vendor_extensions.o ++obj-y += vendor_extensions/ + obj-y += probes/ + obj-$(CONFIG_MMU) += vdso.o vdso/ + +@@ -104,3 +107,4 @@ obj-$(CONFIG_COMPAT) += compat_vdso/ + obj-$(CONFIG_64BIT) += pi/ + obj-$(CONFIG_ACPI) += acpi.o + obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o ++obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o +diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c +index 07a43843368d..84879fcfbab3 100644 +--- a/arch/riscv/kernel/acpi.c ++++ b/arch/riscv/kernel/acpi.c +@@ -190,11 +190,6 @@ struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu) + return &cpu_madt_rintc[cpu]; + } + +-u32 get_acpi_id_for_cpu(int cpu) +-{ +- return acpi_cpu_get_madt_rintc(cpu)->uid; +-} +- + /* + * __acpi_map_table() will be called before paging_init(), so early_ioremap() + * or early_memremap() should be called here to for ACPI table mapping. +@@ -223,29 +218,26 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) + #ifdef CONFIG_PCI + + /* +- * These interfaces are defined just to enable building ACPI core. +- * TODO: Update it with actual implementation when external interrupt +- * controller support is added in RISC-V ACPI. ++ * raw_pci_read/write - Platform-specific PCI config space access. + */ +-int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, +- int reg, int len, u32 *val) ++int raw_pci_read(unsigned int domain, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 *val) + { +- return PCIBIOS_DEVICE_NOT_FOUND; +-} ++ struct pci_bus *b = pci_find_bus(domain, bus); + +-int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, +- int reg, int len, u32 val) +-{ +- return PCIBIOS_DEVICE_NOT_FOUND; ++ if (!b) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ return b->ops->read(b, devfn, reg, len, val); + } + +-int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) ++int raw_pci_write(unsigned int domain, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 val) + { +- return -1; +-} ++ struct pci_bus *b = pci_find_bus(domain, bus); + +-struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +-{ +- return NULL; ++ if (!b) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ return b->ops->write(b, devfn, reg, len, val); + } ++ + #endif /* CONFIG_PCI */ +diff --git a/arch/riscv/kernel/acpi_numa.c b/arch/riscv/kernel/acpi_numa.c +new file mode 100644 +index 000000000000..1a97cbdafd01 +--- /dev/null ++++ b/arch/riscv/kernel/acpi_numa.c +@@ -0,0 +1,130 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * ACPI 6.6 based NUMA setup for RISCV ++ * Lots of code was borrowed from arch/arm64/kernel/acpi_numa.c ++ * ++ * Copyright 2004 Andi Kleen, SuSE Labs. ++ * Copyright (C) 2013-2016, Linaro Ltd. ++ * Author: Hanjun Guo ++ * Copyright (C) 2024 Intel Corporation. ++ * ++ * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs. ++ * ++ * Called from acpi_numa_init while reading the SRAT and SLIT tables. ++ * Assumes all memory regions belonging to a single proximity domain ++ * are in one chunk. Holes between them will be included in the node. ++ */ ++ ++#define pr_fmt(fmt) "ACPI: NUMA: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE }; ++ ++int __init acpi_numa_get_nid(unsigned int cpu) ++{ ++ return acpi_early_node_map[cpu]; ++} ++ ++static inline int get_cpu_for_acpi_id(u32 uid) ++{ ++ int cpu; ++ ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) ++ if (uid == get_acpi_id_for_cpu(cpu)) ++ return cpu; ++ ++ return -EINVAL; ++} ++ ++static int __init acpi_parse_rintc_pxm(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_srat_rintc_affinity *pa; ++ int cpu, pxm, node; ++ ++ if (srat_disabled()) ++ return -EINVAL; ++ pa = (struct acpi_srat_rintc_affinity *)header; ++ if (!pa) ++ return -EINVAL; ++ ++ if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED)) ++ return 0; ++ ++ pxm = pa->proximity_domain; ++ node = pxm_to_node(pxm); ++ ++ /* ++ * If we can't map the UID to a logical cpu this ++ * means that the UID is not part of possible cpus ++ * so we do not need a NUMA mapping for it, skip ++ * the SRAT entry and keep parsing. ++ */ ++ cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid); ++ if (cpu < 0) ++ return 0; ++ ++ acpi_early_node_map[cpu] = node; ++ pr_info("SRAT: PXM %d -> HARTID 0x%lx -> Node %d\n", pxm, ++ cpuid_to_hartid_map(cpu), node); ++ ++ return 0; ++} ++ ++void __init acpi_map_cpus_to_nodes(void) ++{ ++ int i; ++ ++ /* ++ * In ACPI, SMP and CPU NUMA information is provided in separate ++ * static tables, namely the MADT and the SRAT. ++ * ++ * Thus, it is simpler to first create the cpu logical map through ++ * an MADT walk and then map the logical cpus to their node ids ++ * as separate steps. ++ */ ++ acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), ++ ACPI_SRAT_TYPE_RINTC_AFFINITY, acpi_parse_rintc_pxm, 0); ++ ++ for (i = 0; i < nr_cpu_ids; i++) ++ early_map_cpu_to_node(i, acpi_numa_get_nid(i)); ++} ++ ++/* Callback for Proximity Domain -> logical node ID mapping */ ++void __init acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) ++{ ++ int pxm, node; ++ ++ if (srat_disabled()) ++ return; ++ ++ if (pa->header.length < sizeof(struct acpi_srat_rintc_affinity)) { ++ pr_err("SRAT: Invalid SRAT header length: %d\n", pa->header.length); ++ bad_srat(); ++ return; ++ } ++ ++ if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED)) ++ return; ++ ++ pxm = pa->proximity_domain; ++ node = acpi_map_pxm_to_node(pxm); ++ ++ if (node == NUMA_NO_NODE) { ++ pr_err("SRAT: Too many proximity domains %d\n", pxm); ++ bad_srat(); ++ return; ++ } ++ ++ node_set(node, numa_nodes_parsed); ++} +diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c +index 319a1da0358b..0128b161bfda 100644 +--- a/arch/riscv/kernel/alternative.c ++++ b/arch/riscv/kernel/alternative.c +@@ -43,7 +43,7 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info + + switch (cpu_mfr_info->vendor_id) { + #ifdef CONFIG_ERRATA_ANDES +- case ANDESTECH_VENDOR_ID: ++ case ANDES_VENDOR_ID: + cpu_mfr_info->patch_func = andes_errata_patch_func; + break; + #endif diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c -index bb5fb2b820a2..6fdb7d166a41 100644 +index bb5fb2b820a2..820f579e4581 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c -@@ -176,6 +176,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #include "copy-unaligned.h" + +@@ -32,6 +33,8 @@ + #define MISALIGNED_BUFFER_SIZE 0x4000 + #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80) + ++static bool any_cpu_has_zicboz; ++ + unsigned long elf_hwcap __read_mostly; + + /* Host ISA bitmap */ +@@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(riscv_isa_extension_base); + * + * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used. + */ +-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) ++bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit) + { + const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa; + +@@ -80,37 +83,204 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) + } + EXPORT_SYMBOL_GPL(__riscv_isa_extension_available); + +-static bool riscv_isa_extension_check(int id) ++static int riscv_ext_zicbom_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) + { +- switch (id) { +- case RISCV_ISA_EXT_ZICBOM: +- if (!riscv_cbom_block_size) { +- pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n"); +- return false; +- } else if (!is_power_of_2(riscv_cbom_block_size)) { +- pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n"); +- return false; +- } +- return true; +- case RISCV_ISA_EXT_ZICBOZ: +- if (!riscv_cboz_block_size) { +- pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n"); +- return false; +- } else if (!is_power_of_2(riscv_cboz_block_size)) { +- pr_err("cboz-block-size present, but is not a power-of-2\n"); +- return false; +- } +- return true; ++ if (!riscv_cbom_block_size) { ++ pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n"); ++ return -EINVAL; ++ } ++ if (!is_power_of_2(riscv_cbom_block_size)) { ++ pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n"); ++ return -EINVAL; + } ++ return 0; ++} + +- return true; ++static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (!riscv_cboz_block_size) { ++ pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n"); ++ return -EINVAL; ++ } ++ if (!is_power_of_2(riscv_cboz_block_size)) { ++ pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n"); ++ return -EINVAL; ++ } ++ any_cpu_has_zicboz = true; ++ return 0; + } + +-#define __RISCV_ISA_EXT_DATA(_name, _id) { \ +- .name = #_name, \ +- .property = #_name, \ +- .id = _id, \ ++static int riscv_ext_zca_depends(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA)) ++ return 0; ++ ++ return -EPROBE_DEFER; + } ++static int riscv_ext_zcd_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA) && ++ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_d)) ++ return 0; ++ ++ return -EPROBE_DEFER; ++} ++ ++static int riscv_ext_zcf_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (IS_ENABLED(CONFIG_64BIT)) ++ return -EINVAL; ++ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA) && ++ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_f)) ++ return 0; ++ ++ return -EPROBE_DEFER; ++} ++ ++static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ /* SVADE has already been detected, use SVADE only */ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_SVADE)) ++ return -EOPNOTSUPP; ++ ++ return 0; ++} ++ ++static const unsigned int riscv_zk_bundled_exts[] = { ++ RISCV_ISA_EXT_ZBKB, ++ RISCV_ISA_EXT_ZBKC, ++ RISCV_ISA_EXT_ZBKX, ++ RISCV_ISA_EXT_ZKND, ++ RISCV_ISA_EXT_ZKNE, ++ RISCV_ISA_EXT_ZKR, ++ RISCV_ISA_EXT_ZKT, ++}; ++ ++static const unsigned int riscv_zkn_bundled_exts[] = { ++ RISCV_ISA_EXT_ZBKB, ++ RISCV_ISA_EXT_ZBKC, ++ RISCV_ISA_EXT_ZBKX, ++ RISCV_ISA_EXT_ZKND, ++ RISCV_ISA_EXT_ZKNE, ++ RISCV_ISA_EXT_ZKNH, ++}; ++ ++static const unsigned int riscv_zks_bundled_exts[] = { ++ RISCV_ISA_EXT_ZBKB, ++ RISCV_ISA_EXT_ZBKC, ++ RISCV_ISA_EXT_ZKSED, ++ RISCV_ISA_EXT_ZKSH ++}; ++ ++#define RISCV_ISA_EXT_ZVKN \ ++ RISCV_ISA_EXT_ZVKNED, \ ++ RISCV_ISA_EXT_ZVKNHB, \ ++ RISCV_ISA_EXT_ZVKB, \ ++ RISCV_ISA_EXT_ZVKT ++ ++static const unsigned int riscv_zvkn_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKN ++}; ++ ++static const unsigned int riscv_zvknc_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKN, ++ RISCV_ISA_EXT_ZVBC ++}; ++ ++static const unsigned int riscv_zvkng_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKN, ++ RISCV_ISA_EXT_ZVKG ++}; ++ ++#define RISCV_ISA_EXT_ZVKS \ ++ RISCV_ISA_EXT_ZVKSED, \ ++ RISCV_ISA_EXT_ZVKSH, \ ++ RISCV_ISA_EXT_ZVKB, \ ++ RISCV_ISA_EXT_ZVKT ++ ++static const unsigned int riscv_zvks_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKS ++}; ++ ++static const unsigned int riscv_zvksc_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKS, ++ RISCV_ISA_EXT_ZVBC ++}; ++ ++static const unsigned int riscv_zvksg_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKS, ++ RISCV_ISA_EXT_ZVKG ++}; ++ ++static const unsigned int riscv_zvbb_exts[] = { ++ RISCV_ISA_EXT_ZVKB ++}; ++ ++#define RISCV_ISA_EXT_ZVE64F_IMPLY_LIST \ ++ RISCV_ISA_EXT_ZVE64X, \ ++ RISCV_ISA_EXT_ZVE32F, \ ++ RISCV_ISA_EXT_ZVE32X ++ ++#define RISCV_ISA_EXT_ZVE64D_IMPLY_LIST \ ++ RISCV_ISA_EXT_ZVE64F, \ ++ RISCV_ISA_EXT_ZVE64F_IMPLY_LIST ++ ++#define RISCV_ISA_EXT_V_IMPLY_LIST \ ++ RISCV_ISA_EXT_ZVE64D, \ ++ RISCV_ISA_EXT_ZVE64D_IMPLY_LIST ++ ++static const unsigned int riscv_zve32f_exts[] = { ++ RISCV_ISA_EXT_ZVE32X ++}; ++ ++static const unsigned int riscv_zve64f_exts[] = { ++ RISCV_ISA_EXT_ZVE64F_IMPLY_LIST ++}; ++ ++static const unsigned int riscv_zve64d_exts[] = { ++ RISCV_ISA_EXT_ZVE64D_IMPLY_LIST ++}; ++ ++static const unsigned int riscv_v_exts[] = { ++ RISCV_ISA_EXT_V_IMPLY_LIST ++}; ++ ++static const unsigned int riscv_zve64x_exts[] = { ++ RISCV_ISA_EXT_ZVE32X, ++ RISCV_ISA_EXT_ZVE64X ++}; ++ ++/* ++ * While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V ++ * privileged ISA, the existence of the CSRs is implied by any extension which ++ * specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the ++ * existence of the CSR, and treat it as a subset of those other extensions. ++ */ ++static const unsigned int riscv_xlinuxenvcfg_exts[] = { ++ RISCV_ISA_EXT_XLINUXENVCFG ++}; ++ ++/* ++ * Zc* spec states that: ++ * - C always implies Zca ++ * - C+F implies Zcf (RV32 only) ++ * - C+D implies Zcd ++ * ++ * These extensions will be enabled and then validated depending on the ++ * availability of F/D RV32. ++ */ ++static const unsigned int riscv_c_exts[] = { ++ RISCV_ISA_EXT_ZCA, ++ RISCV_ISA_EXT_ZCF, ++ RISCV_ISA_EXT_ZCD, ++}; + + /* + * The canonical order of ISA extension names in the ISA string is defined in +@@ -158,36 +328,177 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { + __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f), + __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d), + __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q), +- __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c), +- __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b), +- __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k), +- __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j), +- __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p), +- __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v), ++ __RISCV_ISA_EXT_SUPERSET(c, RISCV_ISA_EXT_c, riscv_c_exts), ++ __RISCV_ISA_EXT_SUPERSET(v, RISCV_ISA_EXT_v, riscv_v_exts), + __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h), +- __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM), +- __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ), ++ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts, ++ riscv_ext_zicbom_validate), ++ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, ++ riscv_ext_zicboz_validate), ++ __RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE), + __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR), ++ __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND), + __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR), + __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI), ++ __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL), + __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), + __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), ++ __RISCV_ISA_EXT_DATA(zimop, RISCV_ISA_EXT_ZIMOP), ++ __RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA), ++ __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), ++ __RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS), ++ __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), ++ __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH), ++ __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN), ++ __RISCV_ISA_EXT_DATA(zca, RISCV_ISA_EXT_ZCA), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcb, RISCV_ISA_EXT_ZCB, riscv_ext_zca_depends), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcd, RISCV_ISA_EXT_ZCD, riscv_ext_zcd_validate), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcf, RISCV_ISA_EXT_ZCF, riscv_ext_zcf_validate), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcmop, RISCV_ISA_EXT_ZCMOP, riscv_ext_zca_depends), + __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA), __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB), ++ __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC), ++ __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB), ++ __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC), ++ __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX), __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS), ++ __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts), ++ __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND), ++ __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE), ++ __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH), ++ __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR), ++ __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT), ++ __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED), ++ __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH), ++ __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO), ++ __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts), ++ __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC), ++ __RISCV_ISA_EXT_SUPERSET(zve32f, RISCV_ISA_EXT_ZVE32F, riscv_zve32f_exts), ++ __RISCV_ISA_EXT_DATA(zve32x, RISCV_ISA_EXT_ZVE32X), ++ __RISCV_ISA_EXT_SUPERSET(zve64d, RISCV_ISA_EXT_ZVE64D, riscv_zve64d_exts), ++ __RISCV_ISA_EXT_SUPERSET(zve64f, RISCV_ISA_EXT_ZVE64F, riscv_zve64f_exts), ++ __RISCV_ISA_EXT_SUPERSET(zve64x, RISCV_ISA_EXT_ZVE64X, riscv_zve64x_exts), ++ __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH), ++ __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN), ++ __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB), ++ __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG), ++ __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts), ++ __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED), ++ __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA), ++ __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB), ++ __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts), ++ __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED), ++ __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH), ++ __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT), __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA), ++ __RISCV_ISA_EXT_DATA(smmpm, RISCV_ISA_EXT_SMMPM), ++ __RISCV_ISA_EXT_SUPERSET(smnpm, RISCV_ISA_EXT_SMNPM, riscv_xlinuxenvcfg_exts), + __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN), __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA), __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), ++ __RISCV_ISA_EXT_SUPERSET(ssnpm, RISCV_ISA_EXT_SSNPM, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), ++ __RISCV_ISA_EXT_DATA(svade, RISCV_ISA_EXT_SVADE), ++ __RISCV_ISA_EXT_DATA_VALIDATE(svadu, RISCV_ISA_EXT_SVADU, riscv_ext_svadu_validate), + __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), + __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), + __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), ++ __RISCV_ISA_EXT_DATA(svvptc, RISCV_ISA_EXT_SVVPTC), + }; + + const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext); + +-static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo, +- unsigned long *isa2hwcap, const char *isa) ++static void riscv_isa_set_ext(const struct riscv_isa_ext_data *ext, unsigned long *bitmap) ++{ ++ if (ext->id != RISCV_ISA_EXT_INVALID) ++ set_bit(ext->id, bitmap); ++ ++ for (int i = 0; i < ext->subset_ext_size; i++) { ++ if (ext->subset_ext_ids[i] != RISCV_ISA_EXT_INVALID) ++ set_bit(ext->subset_ext_ids[i], bitmap); ++ } ++} ++ ++static const struct riscv_isa_ext_data *riscv_get_isa_ext_data(unsigned int ext_id) ++{ ++ for (int i = 0; i < riscv_isa_ext_count; i++) { ++ if (riscv_isa_ext[i].id == ext_id) ++ return &riscv_isa_ext[i]; ++ } ++ ++ return NULL; ++} ++ ++/* ++ * "Resolve" a source ISA bitmap into one that matches kernel configuration as ++ * well as correct extension dependencies. Some extensions depends on specific ++ * kernel configuration to be usable (V needs CONFIG_RISCV_ISA_V for instance) ++ * and this function will actually validate all the extensions provided in ++ * source_isa into the resolved_isa based on extensions validate() callbacks. ++ */ ++static void __init riscv_resolve_isa(unsigned long *source_isa, ++ unsigned long *resolved_isa, unsigned long *this_hwcap, ++ unsigned long *isa2hwcap) ++{ ++ bool loop; ++ const struct riscv_isa_ext_data *ext; ++ DECLARE_BITMAP(prev_resolved_isa, RISCV_ISA_EXT_MAX); ++ int max_loop_count = riscv_isa_ext_count, ret; ++ unsigned int bit; ++ ++ do { ++ loop = false; ++ if (max_loop_count-- < 0) { ++ pr_err("Failed to reach a stable ISA state\n"); ++ return; ++ } ++ bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX); ++ for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) { ++ ext = riscv_get_isa_ext_data(bit); ++ ++ if (ext && ext->validate) { ++ ret = ext->validate(ext, resolved_isa); ++ if (ret == -EPROBE_DEFER) { ++ loop = true; ++ continue; ++ } else if (ret) { ++ /* Disable the extension entirely */ ++ clear_bit(bit, source_isa); ++ continue; ++ } ++ } ++ ++ set_bit(bit, resolved_isa); ++ /* No need to keep it in source isa now that it is enabled */ ++ clear_bit(bit, source_isa); ++ ++ /* Single letter extensions get set in hwcap */ ++ if (bit < RISCV_ISA_EXT_BASE) ++ *this_hwcap |= isa2hwcap[bit]; ++ } ++ } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa))); ++} ++ ++static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap) ++{ ++ for (int i = 0; i < riscv_isa_ext_count; i++) { ++ const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i]; ++ ++ if ((name_end - name == strlen(ext->name)) && ++ !strncasecmp(name, ext->name, name_end - name)) { ++ riscv_isa_set_ext(ext, bitmap); ++ break; ++ } ++ } ++} ++ ++static void __init riscv_parse_isa_string(const char *isa, unsigned long *bitmap) + { + /* + * For all possible cpus, we have already validated in +@@ -200,15 +511,31 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + while (*isa) { + const char *ext = isa++; + const char *ext_end = isa; +- bool ext_long = false, ext_err = false; ++ bool ext_err = false; + + switch (*ext) { ++ case 'x': ++ case 'X': ++ if (acpi_disabled) ++ pr_warn_once("Vendor extensions are ignored in riscv,isa. Use riscv,isa-extensions instead."); ++ /* ++ * To skip an extension, we find its end. ++ * As multi-letter extensions must be split from other multi-letter ++ * extensions with an "_", the end of a multi-letter extension will ++ * either be the null character or the "_" at the start of the next ++ * multi-letter extension. ++ */ ++ for (; *isa && *isa != '_'; ++isa) ++ ; ++ ext_err = true; ++ break; + case 's': + /* +- * Workaround for invalid single-letter 's' & 'u'(QEMU). ++ * Workaround for invalid single-letter 's' & 'u' (QEMU). + * No need to set the bit in riscv_isa as 's' & 'u' are +- * not valid ISA extensions. It works until multi-letter +- * extension starting with "Su" appears. ++ * not valid ISA extensions. It works unless the first ++ * multi-letter extension in the ISA string begins with ++ * "Su" and is not prefixed with an underscore. + */ + if (ext[-1] != '_' && ext[1] == 'u') { + ++isa; +@@ -217,8 +544,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + } + fallthrough; + case 'S': +- case 'x': +- case 'X': + case 'z': + case 'Z': + /* +@@ -239,7 +564,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + * character itself while eliminating the extensions version number. + * A simple re-increment solves this problem. + */ +- ext_long = true; + for (; *isa && *isa != '_'; ++isa) + if (unlikely(!isalnum(*isa))) + ext_err = true; +@@ -317,29 +641,10 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + if (*isa == '_') + ++isa; + +-#define SET_ISA_EXT_MAP(name, bit) \ +- do { \ +- if ((ext_end - ext == strlen(name)) && \ +- !strncasecmp(ext, name, strlen(name)) && \ +- riscv_isa_extension_check(bit)) \ +- set_bit(bit, isainfo->isa); \ +- } while (false) \ +- + if (unlikely(ext_err)) + continue; +- if (!ext_long) { +- int nr = tolower(*ext) - 'a'; + +- if (riscv_isa_extension_check(nr)) { +- *this_hwcap |= isa2hwcap[nr]; +- set_bit(nr, isainfo->isa); +- } +- } else { +- for (int i = 0; i < riscv_isa_ext_count; i++) +- SET_ISA_EXT_MAP(riscv_isa_ext[i].name, +- riscv_isa_ext[i].id); +- } +-#undef SET_ISA_EXT_MAP ++ match_isa_ext(ext, ext_end, bitmap); + } + } + +@@ -366,6 +671,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + for_each_possible_cpu(cpu) { + struct riscv_isainfo *isainfo = &hart_isa[cpu]; + unsigned long this_hwcap = 0; ++ DECLARE_BITMAP(source_isa, RISCV_ISA_EXT_MAX) = { 0 }; + + if (acpi_disabled) { + node = of_cpu_device_node_get(cpu); +@@ -388,7 +694,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + } + } + +- riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa); ++ riscv_parse_isa_string(isa, source_isa); + + /* + * These ones were as they were part of the base ISA when the +@@ -396,10 +702,10 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + * unconditionally where `i` is in riscv,isa on DT systems. + */ + if (acpi_disabled) { +- set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa); +- set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa); +- set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa); +- set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa); ++ set_bit(RISCV_ISA_EXT_ZICSR, source_isa); ++ set_bit(RISCV_ISA_EXT_ZIFENCEI, source_isa); ++ set_bit(RISCV_ISA_EXT_ZICNTR, source_isa); ++ set_bit(RISCV_ISA_EXT_ZIHPM, source_isa); + } + + /* +@@ -412,9 +718,11 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + */ + if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) { + this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v]; +- clear_bit(RISCV_ISA_EXT_v, isainfo->isa); ++ clear_bit(RISCV_ISA_EXT_v, source_isa); + } + ++ riscv_resolve_isa(source_isa, isainfo->isa, &this_hwcap, isa2hwcap); ++ + /* + * All "okay" hart should have same isa. Set HWCAP based on + * common capabilities of every "okay" hart, in case they don't +@@ -435,6 +743,61 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + acpi_put_table((struct acpi_table_header *)rhct); + } + ++static void __init riscv_fill_cpu_vendor_ext(struct device_node *cpu_node, int cpu) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return; ++ ++ for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) { ++ struct riscv_isa_vendor_ext_data_list *ext_list = riscv_isa_vendor_ext_list[i]; ++ ++ for (int j = 0; j < ext_list->ext_data_count; j++) { ++ const struct riscv_isa_ext_data ext = ext_list->ext_data[j]; ++ struct riscv_isavendorinfo *isavendorinfo = &ext_list->per_hart_isa_bitmap[cpu]; ++ ++ if (of_property_match_string(cpu_node, "riscv,isa-extensions", ++ ext.property) < 0) ++ continue; ++ ++ /* ++ * Assume that subset extensions are all members of the ++ * same vendor. ++ */ ++ if (ext.subset_ext_size) ++ for (int k = 0; k < ext.subset_ext_size; k++) ++ set_bit(ext.subset_ext_ids[k], isavendorinfo->isa); ++ ++ set_bit(ext.id, isavendorinfo->isa); ++ } ++ } ++} ++ ++/* ++ * Populate all_harts_isa_bitmap for each vendor with all of the extensions that ++ * are shared across CPUs for that vendor. ++ */ ++static void __init riscv_fill_vendor_ext_list(int cpu) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return; ++ ++ for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) { ++ struct riscv_isa_vendor_ext_data_list *ext_list = riscv_isa_vendor_ext_list[i]; ++ ++ if (!ext_list->is_initialized) { ++ bitmap_copy(ext_list->all_harts_isa_bitmap.isa, ++ ext_list->per_hart_isa_bitmap[cpu].isa, ++ RISCV_ISA_VENDOR_EXT_MAX); ++ ext_list->is_initialized = true; ++ } else { ++ bitmap_and(ext_list->all_harts_isa_bitmap.isa, ++ ext_list->all_harts_isa_bitmap.isa, ++ ext_list->per_hart_isa_bitmap[cpu].isa, ++ RISCV_ISA_VENDOR_EXT_MAX); ++ } ++ } ++} ++ + static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + { + unsigned int cpu; +@@ -443,6 +806,7 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + unsigned long this_hwcap = 0; + struct device_node *cpu_node; + struct riscv_isainfo *isainfo = &hart_isa[cpu]; ++ DECLARE_BITMAP(source_isa, RISCV_ISA_EXT_MAX) = { 0 }; + + cpu_node = of_cpu_device_node_get(cpu); + if (!cpu_node) { +@@ -456,20 +820,18 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + } + + for (int i = 0; i < riscv_isa_ext_count; i++) { +- if (of_property_match_string(cpu_node, "riscv,isa-extensions", +- riscv_isa_ext[i].property) < 0) +- continue; ++ const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i]; + +- if (!riscv_isa_extension_check(riscv_isa_ext[i].id)) ++ if (of_property_match_string(cpu_node, "riscv,isa-extensions", ++ ext->property) < 0) + continue; + +- /* Only single letter extensions get set in hwcap */ +- if (strnlen(riscv_isa_ext[i].name, 2) == 1) +- this_hwcap |= isa2hwcap[riscv_isa_ext[i].id]; +- +- set_bit(riscv_isa_ext[i].id, isainfo->isa); ++ riscv_isa_set_ext(ext, source_isa); + } + ++ riscv_resolve_isa(source_isa, isainfo->isa, &this_hwcap, isa2hwcap); ++ riscv_fill_cpu_vendor_ext(cpu_node, cpu); ++ + of_node_put(cpu_node); + + /* +@@ -485,6 +847,8 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); + else + bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); ++ ++ riscv_fill_vendor_ext_list(cpu); + } + + if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX)) +@@ -539,8 +903,14 @@ void __init riscv_fill_hwcap(void) + elf_hwcap &= ~COMPAT_HWCAP_ISA_F; + } + +- if (elf_hwcap & COMPAT_HWCAP_ISA_V) { ++ if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_ZVE32X)) { ++ /* ++ * This cannot fail when called on the boot hart ++ */ + riscv_v_setup_vsize(); ++ } ++ ++ if (elf_hwcap & COMPAT_HWCAP_ISA_V) { + /* + * ISA string in device tree might have 'v' flag, but + * CONFIG_RISCV_ISA_V is disabled in kernel. +@@ -668,7 +1038,7 @@ void check_unaligned_access(int cpu) + __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE)); + } + +-static int check_unaligned_access_boot_cpu(void) ++static int __init check_unaligned_access_boot_cpu(void) + { + check_unaligned_access(0); + return 0; +@@ -676,6 +1046,14 @@ static int check_unaligned_access_boot_cpu(void) + + arch_initcall(check_unaligned_access_boot_cpu); + ++void __init riscv_user_isa_enable(void) ++{ ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) ++ csr_set(CSR_ENVCFG, ENVCFG_CBZE); ++ else if (any_cpu_has_zicboz) ++ pr_warn("Zicboz disabled as it is unavailable on some harts\n"); ++} ++ + #ifdef CONFIG_RISCV_ALTERNATIVE + /* + * Alternative patch sites consider 48 bits when determining when to patch +@@ -716,28 +1094,45 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, + { + struct alt_entry *alt; + void *oldptr, *altptr; +- u16 id, value; ++ u16 id, value, vendor; + + if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) + return; + + for (alt = begin; alt < end; alt++) { +- if (alt->vendor_id != 0) +- continue; +- + id = PATCH_ID_CPUFEATURE_ID(alt->patch_id); ++ vendor = PATCH_ID_CPUFEATURE_ID(alt->vendor_id); + +- if (id >= RISCV_ISA_EXT_MAX) { +- WARN(1, "This extension id:%d is not in ISA extension list", id); +- continue; +- } ++ /* ++ * Any alternative with a patch_id that is less than ++ * RISCV_ISA_EXT_MAX is interpreted as a standard extension. ++ * ++ * Any alternative with patch_id that is greater than or equal ++ * to RISCV_VENDOR_EXT_ALTERNATIVES_BASE is interpreted as a ++ * vendor extension. ++ */ ++ if (id < RISCV_ISA_EXT_MAX) { ++ /* ++ * This patch should be treated as errata so skip ++ * processing here. ++ */ ++ if (alt->vendor_id != 0) ++ continue; + +- if (!__riscv_isa_extension_available(NULL, id)) +- continue; ++ if (!__riscv_isa_extension_available(NULL, id)) ++ continue; + +- value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id); +- if (!riscv_cpufeature_patch_check(id, value)) ++ value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id); ++ if (!riscv_cpufeature_patch_check(id, value)) ++ continue; ++ } else if (id >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE) { ++ if (!__riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ++ id - RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) ++ continue; ++ } else { ++ WARN(1, "This extension id:%d is not in ISA extension list", id); + continue; ++ } + + oldptr = ALT_OLD_PTR(alt); + altptr = ALT_ALT_PTR(alt); diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index df4f6fec5d17..ced5a09abaaa 100644 --- a/arch/riscv/kernel/module.c @@ -24443,11 +28300,76 @@ index a4559695ce62..0e2e19352469 100644 + return 0; } +IRQCHIP_DECLARE(riscv_aclint_sswi, "riscv,aclint-sswi", aclint_sswi_probe); +diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c +index c2cdf812ebd0..d949fd3c0884 100644 +--- a/arch/riscv/kernel/setup.c ++++ b/arch/riscv/kernel/setup.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -299,17 +300,22 @@ void __init setup_arch(char **cmdline_p) + setup_smp(); + #endif + +- if (!acpi_disabled) ++ if (!acpi_disabled) { + acpi_init_rintc_map(); ++ acpi_map_cpus_to_nodes(); ++ } + + riscv_init_cbo_blocksizes(); + riscv_fill_hwcap(); + init_rt_signal_env(); + apply_boot_alternatives(); ++ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) && + riscv_isa_extension_available(NULL, ZICBOM)) + riscv_noncoherent_supported(); + riscv_set_dma_cache_alignment(); ++ ++ riscv_user_isa_enable(); + } + + bool arch_cpu_is_hotpluggable(int cpu) +diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c +index 1b8da4e40a4d..3f37eec7a790 100644 +--- a/arch/riscv/kernel/smpboot.c ++++ b/arch/riscv/kernel/smpboot.c +@@ -25,6 +25,8 @@ + #include + #include + #include ++ ++#include + #include + #include + #include +@@ -105,7 +107,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un + if (hart == cpuid_to_hartid_map(0)) { + BUG_ON(found_boot_cpu); + found_boot_cpu = true; +- early_map_cpu_to_node(0, acpi_numa_get_nid(cpu_count)); + return 0; + } + +@@ -115,7 +116,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un + } + + cpuid_to_hartid_map(cpu_count) = hart; +- early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count)); + cpu_count++; + + return 0; diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c -index 3c89b8ec69c4..239509367e42 100644 +index 3c89b8ec69c4..9a8a0dc035b2 100644 --- a/arch/riscv/kernel/suspend.c +++ b/arch/riscv/kernel/suspend.c -@@ -4,8 +4,12 @@ +@@ -4,13 +4,18 @@ * Copyright (c) 2022 Ventana Micro Systems Inc. */ @@ -24460,7 +28382,25 @@ index 3c89b8ec69c4..239509367e42 100644 #include void suspend_save_csrs(struct suspend_context *context) -@@ -85,3 +89,43 @@ int cpu_suspend(unsigned long arg, + { +- context->scratch = csr_read(CSR_SCRATCH); ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) ++ context->envcfg = csr_read(CSR_ENVCFG); + context->tvec = csr_read(CSR_TVEC); + context->ie = csr_read(CSR_IE); + +@@ -31,7 +36,9 @@ void suspend_save_csrs(struct suspend_context *context) + + void suspend_restore_csrs(struct suspend_context *context) + { +- csr_write(CSR_SCRATCH, context->scratch); ++ csr_write(CSR_SCRATCH, 0); ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) ++ csr_write(CSR_ENVCFG, context->envcfg); + csr_write(CSR_TVEC, context->tvec); + csr_write(CSR_IE, context->ie); + +@@ -85,3 +92,92 @@ int cpu_suspend(unsigned long arg, return rc; } @@ -24503,9 +28443,817 @@ index 3c89b8ec69c4..239509367e42 100644 +} + +arch_initcall(sbi_system_suspend_init); ++ ++static int sbi_suspend_finisher(unsigned long suspend_type, ++ unsigned long resume_addr, ++ unsigned long opaque) ++{ ++ struct sbiret ret; ++ ++ ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, ++ suspend_type, resume_addr, opaque, 0, 0, 0); ++ ++ return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; ++} ++ ++int riscv_sbi_hart_suspend(u32 state) ++{ ++ if (state & SBI_HSM_SUSP_NON_RET_BIT) ++ return cpu_suspend(state, sbi_suspend_finisher); ++ else ++ return sbi_suspend_finisher(state, 0, 0); ++} ++ ++bool riscv_sbi_suspend_state_is_valid(u32 state) ++{ ++ if (state > SBI_HSM_SUSPEND_RET_DEFAULT && ++ state < SBI_HSM_SUSPEND_RET_PLATFORM) ++ return false; ++ ++ if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && ++ state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) ++ return false; ++ ++ return true; ++} ++ ++bool riscv_sbi_hsm_is_supported(void) ++{ ++ /* ++ * The SBI HSM suspend function is only available when: ++ * 1) SBI version is 0.3 or higher ++ * 2) SBI HSM extension is available ++ */ ++ if (sbi_spec_version < sbi_mk_version(0, 3) || ++ !sbi_probe_extension(SBI_EXT_HSM)) { ++ pr_info("HSM suspend not available\n"); ++ return false; ++ } ++ ++ return true; ++} +#endif /* CONFIG_RISCV_SBI */ +diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c +new file mode 100644 +index 000000000000..052a41f53dc2 +--- /dev/null ++++ b/arch/riscv/kernel/sys_hwprobe.c +@@ -0,0 +1,349 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * The hwprobe interface, for allowing userspace to probe to see which features ++ * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for ++ * more details. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++static void hwprobe_arch_id(struct riscv_hwprobe *pair, ++ const struct cpumask *cpus) ++{ ++ u64 id = -1ULL; ++ bool first = true; ++ int cpu; ++ ++ for_each_cpu(cpu, cpus) { ++ u64 cpu_id; ++ ++ switch (pair->key) { ++ case RISCV_HWPROBE_KEY_MVENDORID: ++ cpu_id = riscv_cached_mvendorid(cpu); ++ break; ++ case RISCV_HWPROBE_KEY_MIMPID: ++ cpu_id = riscv_cached_mimpid(cpu); ++ break; ++ case RISCV_HWPROBE_KEY_MARCHID: ++ cpu_id = riscv_cached_marchid(cpu); ++ break; ++ } ++ ++ if (first) { ++ id = cpu_id; ++ first = false; ++ } ++ ++ /* ++ * If there's a mismatch for the given set, return -1 in the ++ * value. ++ */ ++ if (id != cpu_id) { ++ id = -1ULL; ++ break; ++ } ++ } ++ ++ pair->value = id; ++} ++ ++static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, ++ const struct cpumask *cpus) ++{ ++ int cpu; ++ u64 missing = 0; ++ ++ pair->value = 0; ++ if (has_fpu()) ++ pair->value |= RISCV_HWPROBE_IMA_FD; ++ ++ if (riscv_isa_extension_available(NULL, c)) ++ pair->value |= RISCV_HWPROBE_IMA_C; ++ ++ if (has_vector()) ++ pair->value |= RISCV_HWPROBE_IMA_V; ++ ++ /* ++ * Loop through and record extensions that 1) anyone has, and 2) anyone ++ * doesn't have. ++ */ ++ for_each_cpu(cpu, cpus) { ++ struct riscv_isainfo *isainfo = &hart_isa[cpu]; ++ ++#define EXT_KEY(ext) \ ++ do { \ ++ if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ ++ pair->value |= RISCV_HWPROBE_EXT_##ext; \ ++ else \ ++ missing |= RISCV_HWPROBE_EXT_##ext; \ ++ } while (false) ++ ++ /* ++ * Only use EXT_KEY() for extensions which can be exposed to userspace, ++ * regardless of the kernel's configuration, as no other checks, besides ++ * presence in the hart_isa bitmap, are made. ++ */ ++ EXT_KEY(ZACAS); ++ EXT_KEY(ZAWRS); ++ EXT_KEY(ZBA); ++ EXT_KEY(ZBB); ++ EXT_KEY(ZBC); ++ EXT_KEY(ZBKB); ++ EXT_KEY(ZBKC); ++ EXT_KEY(ZBKX); ++ EXT_KEY(ZBS); ++ EXT_KEY(ZCA); ++ EXT_KEY(ZCB); ++ EXT_KEY(ZCMOP); ++ EXT_KEY(ZICBOZ); ++ EXT_KEY(ZICOND); ++ EXT_KEY(ZIHINTNTL); ++ EXT_KEY(ZIHINTPAUSE); ++ EXT_KEY(ZIMOP); ++ EXT_KEY(ZKND); ++ EXT_KEY(ZKNE); ++ EXT_KEY(ZKNH); ++ EXT_KEY(ZKSED); ++ EXT_KEY(ZKSH); ++ EXT_KEY(ZKT); ++ EXT_KEY(ZTSO); ++ ++ if (has_vector()) { ++ EXT_KEY(ZVBB); ++ EXT_KEY(ZVBC); ++ EXT_KEY(ZVE32F); ++ EXT_KEY(ZVE32X); ++ EXT_KEY(ZVE64D); ++ EXT_KEY(ZVE64F); ++ EXT_KEY(ZVE64X); ++ EXT_KEY(ZVFH); ++ EXT_KEY(ZVFHMIN); ++ EXT_KEY(ZVKB); ++ EXT_KEY(ZVKG); ++ EXT_KEY(ZVKNED); ++ EXT_KEY(ZVKNHA); ++ EXT_KEY(ZVKNHB); ++ EXT_KEY(ZVKSED); ++ EXT_KEY(ZVKSH); ++ EXT_KEY(ZVKT); ++ } ++ ++ if (has_fpu()) { ++ EXT_KEY(ZCD); ++ EXT_KEY(ZCF); ++ EXT_KEY(ZFA); ++ EXT_KEY(ZFH); ++ EXT_KEY(ZFHMIN); ++ } ++ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) ++ EXT_KEY(SUPM); ++#undef EXT_KEY ++ } ++ ++ /* Now turn off reporting features if any CPU is missing it. */ ++ pair->value &= ~missing; ++} ++ ++static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext) ++{ ++ struct riscv_hwprobe pair; ++ ++ hwprobe_isa_ext0(&pair, cpus); ++ return (pair.value & ext); ++} ++ ++static u64 hwprobe_misaligned(const struct cpumask *cpus) ++{ ++ int cpu; ++ u64 perf = -1ULL; ++ ++ for_each_cpu(cpu, cpus) { ++ int this_perf = per_cpu(misaligned_access_speed, cpu); ++ ++ if (perf == -1ULL) ++ perf = this_perf; ++ ++ if (perf != this_perf) { ++ perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; ++ break; ++ } ++ } ++ ++ if (perf == -1ULL) ++ return RISCV_HWPROBE_MISALIGNED_UNKNOWN; ++ ++ return perf; ++} ++ ++static void hwprobe_one_pair(struct riscv_hwprobe *pair, ++ const struct cpumask *cpus) ++{ ++ switch (pair->key) { ++ case RISCV_HWPROBE_KEY_MVENDORID: ++ case RISCV_HWPROBE_KEY_MARCHID: ++ case RISCV_HWPROBE_KEY_MIMPID: ++ hwprobe_arch_id(pair, cpus); ++ break; ++ /* ++ * The kernel already assumes that the base single-letter ISA ++ * extensions are supported on all harts, and only supports the ++ * IMA base, so just cheat a bit here and tell that to ++ * userspace. ++ */ ++ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: ++ pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; ++ break; ++ ++ case RISCV_HWPROBE_KEY_IMA_EXT_0: ++ hwprobe_isa_ext0(pair, cpus); ++ break; ++ ++ case RISCV_HWPROBE_KEY_CPUPERF_0: ++ pair->value = hwprobe_misaligned(cpus); ++ break; ++ ++ case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: ++ pair->value = 0; ++ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) ++ pair->value = riscv_cboz_block_size; ++ break; ++ case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS: ++ pair->value = user_max_virt_addr(); ++ break; ++ ++ case RISCV_HWPROBE_KEY_TIME_CSR_FREQ: ++ pair->value = riscv_timebase; ++ break; ++ ++ /* ++ * For forward compatibility, unknown keys don't fail the whole ++ * call, but get their element key set to -1 and value set to 0 ++ * indicating they're unrecognized. ++ */ ++ default: ++ pair->key = -1; ++ pair->value = 0; ++ break; ++ } ++} ++ ++static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, ++ size_t pair_count, size_t cpusetsize, ++ unsigned long __user *cpus_user, ++ unsigned int flags) ++{ ++ size_t out; ++ int ret; ++ cpumask_t cpus; ++ ++ /* Check the reserved flags. */ ++ if (flags != 0) ++ return -EINVAL; ++ ++ /* ++ * The interface supports taking in a CPU mask, and returns values that ++ * are consistent across that mask. Allow userspace to specify NULL and ++ * 0 as a shortcut to all online CPUs. ++ */ ++ cpumask_clear(&cpus); ++ if (!cpusetsize && !cpus_user) { ++ cpumask_copy(&cpus, cpu_online_mask); ++ } else { ++ if (cpusetsize > cpumask_size()) ++ cpusetsize = cpumask_size(); ++ ++ ret = copy_from_user(&cpus, cpus_user, cpusetsize); ++ if (ret) ++ return -EFAULT; ++ ++ /* ++ * Userspace must provide at least one online CPU, without that ++ * there's no way to define what is supported. ++ */ ++ cpumask_and(&cpus, &cpus, cpu_online_mask); ++ if (cpumask_empty(&cpus)) ++ return -EINVAL; ++ } ++ ++ for (out = 0; out < pair_count; out++, pairs++) { ++ struct riscv_hwprobe pair; ++ ++ if (get_user(pair.key, &pairs->key)) ++ return -EFAULT; ++ ++ pair.value = 0; ++ hwprobe_one_pair(&pair, &cpus); ++ ret = put_user(pair.key, &pairs->key); ++ if (ret == 0) ++ ret = put_user(pair.value, &pairs->value); ++ ++ if (ret) ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++#ifdef CONFIG_MMU ++ ++static int __init init_hwprobe_vdso_data(void) ++{ ++ struct vdso_data *vd = __arch_get_k_vdso_data(); ++ struct arch_vdso_data *avd = &vd->arch_data; ++ u64 id_bitsmash = 0; ++ struct riscv_hwprobe pair; ++ int key; ++ ++ /* ++ * Initialize vDSO data with the answers for the "all CPUs" case, to ++ * save a syscall in the common case. ++ */ ++ for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { ++ pair.key = key; ++ hwprobe_one_pair(&pair, cpu_online_mask); ++ ++ WARN_ON_ONCE(pair.key < 0); ++ ++ avd->all_cpu_hwprobe_values[key] = pair.value; ++ /* ++ * Smash together the vendor, arch, and impl IDs to see if ++ * they're all 0 or any negative. ++ */ ++ if (key <= RISCV_HWPROBE_KEY_MIMPID) ++ id_bitsmash |= pair.value; ++ } ++ ++ /* ++ * If the arch, vendor, and implementation ID are all the same across ++ * all harts, then assume all CPUs are the same, and allow the vDSO to ++ * answer queries for arbitrary masks. However if all values are 0 (not ++ * populated) or any value returns -1 (varies across CPUs), then the ++ * vDSO should defer to the kernel for exotic cpu masks. ++ */ ++ avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; ++ return 0; ++} ++ ++arch_initcall_sync(init_hwprobe_vdso_data); ++ ++#endif /* CONFIG_MMU */ ++ ++SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, ++ size_t, pair_count, size_t, cpusetsize, unsigned long __user *, ++ cpus, unsigned int, flags) ++{ ++ return do_riscv_hwprobe(pairs, pair_count, cpusetsize, ++ cpus, flags); ++} +diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c +index 473159b5f303..f1c1416a9f1e 100644 +--- a/arch/riscv/kernel/sys_riscv.c ++++ b/arch/riscv/kernel/sys_riscv.c +@@ -7,15 +7,7 @@ + + #include + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include + #include +-#include + + static long riscv_sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, +@@ -77,265 +69,6 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, + return 0; + } + +-/* +- * The hwprobe interface, for allowing userspace to probe to see which features +- * are supported by the hardware. See Documentation/riscv/hwprobe.rst for more +- * details. +- */ +-static void hwprobe_arch_id(struct riscv_hwprobe *pair, +- const struct cpumask *cpus) +-{ +- u64 id = -1ULL; +- bool first = true; +- int cpu; +- +- for_each_cpu(cpu, cpus) { +- u64 cpu_id; +- +- switch (pair->key) { +- case RISCV_HWPROBE_KEY_MVENDORID: +- cpu_id = riscv_cached_mvendorid(cpu); +- break; +- case RISCV_HWPROBE_KEY_MIMPID: +- cpu_id = riscv_cached_mimpid(cpu); +- break; +- case RISCV_HWPROBE_KEY_MARCHID: +- cpu_id = riscv_cached_marchid(cpu); +- break; +- } +- +- if (first) { +- id = cpu_id; +- first = false; +- } +- +- /* +- * If there's a mismatch for the given set, return -1 in the +- * value. +- */ +- if (id != cpu_id) { +- id = -1ULL; +- break; +- } +- } +- +- pair->value = id; +-} +- +-static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, +- const struct cpumask *cpus) +-{ +- int cpu; +- u64 missing = 0; +- +- pair->value = 0; +- if (has_fpu()) +- pair->value |= RISCV_HWPROBE_IMA_FD; +- +- if (riscv_isa_extension_available(NULL, c)) +- pair->value |= RISCV_HWPROBE_IMA_C; +- +- if (has_vector()) +- pair->value |= RISCV_HWPROBE_IMA_V; +- +- /* +- * Loop through and record extensions that 1) anyone has, and 2) anyone +- * doesn't have. +- */ +- for_each_cpu(cpu, cpus) { +- struct riscv_isainfo *isainfo = &hart_isa[cpu]; +- +- if (riscv_isa_extension_available(isainfo->isa, ZBA)) +- pair->value |= RISCV_HWPROBE_EXT_ZBA; +- else +- missing |= RISCV_HWPROBE_EXT_ZBA; +- +- if (riscv_isa_extension_available(isainfo->isa, ZBB)) +- pair->value |= RISCV_HWPROBE_EXT_ZBB; +- else +- missing |= RISCV_HWPROBE_EXT_ZBB; +- +- if (riscv_isa_extension_available(isainfo->isa, ZBS)) +- pair->value |= RISCV_HWPROBE_EXT_ZBS; +- else +- missing |= RISCV_HWPROBE_EXT_ZBS; +- } +- +- /* Now turn off reporting features if any CPU is missing it. */ +- pair->value &= ~missing; +-} +- +-static u64 hwprobe_misaligned(const struct cpumask *cpus) +-{ +- int cpu; +- u64 perf = -1ULL; +- +- for_each_cpu(cpu, cpus) { +- int this_perf = per_cpu(misaligned_access_speed, cpu); +- +- if (perf == -1ULL) +- perf = this_perf; +- +- if (perf != this_perf) { +- perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; +- break; +- } +- } +- +- if (perf == -1ULL) +- return RISCV_HWPROBE_MISALIGNED_UNKNOWN; +- +- return perf; +-} +- +-static void hwprobe_one_pair(struct riscv_hwprobe *pair, +- const struct cpumask *cpus) +-{ +- switch (pair->key) { +- case RISCV_HWPROBE_KEY_MVENDORID: +- case RISCV_HWPROBE_KEY_MARCHID: +- case RISCV_HWPROBE_KEY_MIMPID: +- hwprobe_arch_id(pair, cpus); +- break; +- /* +- * The kernel already assumes that the base single-letter ISA +- * extensions are supported on all harts, and only supports the +- * IMA base, so just cheat a bit here and tell that to +- * userspace. +- */ +- case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: +- pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; +- break; +- +- case RISCV_HWPROBE_KEY_IMA_EXT_0: +- hwprobe_isa_ext0(pair, cpus); +- break; +- +- case RISCV_HWPROBE_KEY_CPUPERF_0: +- pair->value = hwprobe_misaligned(cpus); +- break; +- +- /* +- * For forward compatibility, unknown keys don't fail the whole +- * call, but get their element key set to -1 and value set to 0 +- * indicating they're unrecognized. +- */ +- default: +- pair->key = -1; +- pair->value = 0; +- break; +- } +-} +- +-static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, +- size_t pair_count, size_t cpu_count, +- unsigned long __user *cpus_user, +- unsigned int flags) +-{ +- size_t out; +- int ret; +- cpumask_t cpus; +- +- /* Check the reserved flags. */ +- if (flags != 0) +- return -EINVAL; +- +- /* +- * The interface supports taking in a CPU mask, and returns values that +- * are consistent across that mask. Allow userspace to specify NULL and +- * 0 as a shortcut to all online CPUs. +- */ +- cpumask_clear(&cpus); +- if (!cpu_count && !cpus_user) { +- cpumask_copy(&cpus, cpu_online_mask); +- } else { +- if (cpu_count > cpumask_size()) +- cpu_count = cpumask_size(); +- +- ret = copy_from_user(&cpus, cpus_user, cpu_count); +- if (ret) +- return -EFAULT; +- +- /* +- * Userspace must provide at least one online CPU, without that +- * there's no way to define what is supported. +- */ +- cpumask_and(&cpus, &cpus, cpu_online_mask); +- if (cpumask_empty(&cpus)) +- return -EINVAL; +- } +- +- for (out = 0; out < pair_count; out++, pairs++) { +- struct riscv_hwprobe pair; +- +- if (get_user(pair.key, &pairs->key)) +- return -EFAULT; +- +- pair.value = 0; +- hwprobe_one_pair(&pair, &cpus); +- ret = put_user(pair.key, &pairs->key); +- if (ret == 0) +- ret = put_user(pair.value, &pairs->value); +- +- if (ret) +- return -EFAULT; +- } +- +- return 0; +-} +- +-#ifdef CONFIG_MMU +- +-static int __init init_hwprobe_vdso_data(void) +-{ +- struct vdso_data *vd = __arch_get_k_vdso_data(); +- struct arch_vdso_data *avd = &vd->arch_data; +- u64 id_bitsmash = 0; +- struct riscv_hwprobe pair; +- int key; +- +- /* +- * Initialize vDSO data with the answers for the "all CPUs" case, to +- * save a syscall in the common case. +- */ +- for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { +- pair.key = key; +- hwprobe_one_pair(&pair, cpu_online_mask); +- +- WARN_ON_ONCE(pair.key < 0); +- +- avd->all_cpu_hwprobe_values[key] = pair.value; +- /* +- * Smash together the vendor, arch, and impl IDs to see if +- * they're all 0 or any negative. +- */ +- if (key <= RISCV_HWPROBE_KEY_MIMPID) +- id_bitsmash |= pair.value; +- } +- +- /* +- * If the arch, vendor, and implementation ID are all the same across +- * all harts, then assume all CPUs are the same, and allow the vDSO to +- * answer queries for arbitrary masks. However if all values are 0 (not +- * populated) or any value returns -1 (varies across CPUs), then the +- * vDSO should defer to the kernel for exotic cpu masks. +- */ +- avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; +- return 0; +-} +- +-arch_initcall_sync(init_hwprobe_vdso_data); +- +-#endif /* CONFIG_MMU */ +- +-SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, +- size_t, pair_count, size_t, cpu_count, unsigned long __user *, +- cpus, unsigned int, flags) +-{ +- return do_riscv_hwprobe(pairs, pair_count, cpu_count, +- cpus, flags); +-} +- + /* Not defined using SYSCALL_DEFINE0 to avoid error injection */ + asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused) + { +diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c +index cadf725ef798..1e926e4b5881 100644 +--- a/arch/riscv/kernel/vdso/hwprobe.c ++++ b/arch/riscv/kernel/vdso/hwprobe.c +@@ -3,26 +3,22 @@ + * Copyright 2023 Rivos, Inc + */ + ++#include + #include + #include + #include + + extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, ++ size_t cpusetsize, unsigned long *cpus, + unsigned int flags); + +-/* Add a prototype to avoid -Wmissing-prototypes warning. */ +-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, +- unsigned int flags); +- +-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, +- unsigned int flags) ++static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags) + { + const struct vdso_data *vd = __arch_get_vdso_data(); + const struct arch_vdso_data *avd = &vd->arch_data; +- bool all_cpus = !cpu_count && !cpus; ++ bool all_cpus = !cpusetsize && !cpus; + struct riscv_hwprobe *p = pairs; + struct riscv_hwprobe *end = pairs + pair_count; + +@@ -33,7 +29,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, + * masks. + */ + if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus)) +- return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags); ++ return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); + + /* This is something we can handle, fill out the pairs. */ + while (p < end) { +@@ -50,3 +46,71 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, + + return 0; + } ++ ++static int riscv_vdso_get_cpus(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags) ++{ ++ const struct vdso_data *vd = __arch_get_vdso_data(); ++ const struct arch_vdso_data *avd = &vd->arch_data; ++ struct riscv_hwprobe *p = pairs; ++ struct riscv_hwprobe *end = pairs + pair_count; ++ unsigned char *c = (unsigned char *)cpus; ++ bool empty_cpus = true; ++ bool clear_all = false; ++ int i; ++ ++ if (!cpusetsize || !cpus) ++ return -EINVAL; ++ ++ for (i = 0; i < cpusetsize; i++) { ++ if (c[i]) { ++ empty_cpus = false; ++ break; ++ } ++ } ++ ++ if (empty_cpus || flags != RISCV_HWPROBE_WHICH_CPUS || !avd->homogeneous_cpus) ++ return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); ++ ++ while (p < end) { ++ if (riscv_hwprobe_key_is_valid(p->key)) { ++ struct riscv_hwprobe t = { ++ .key = p->key, ++ .value = avd->all_cpu_hwprobe_values[p->key], ++ }; ++ ++ if (!riscv_hwprobe_pair_cmp(&t, p)) ++ clear_all = true; ++ } else { ++ clear_all = true; ++ p->key = -1; ++ p->value = 0; ++ } ++ p++; ++ } ++ ++ if (clear_all) { ++ for (i = 0; i < cpusetsize; i++) ++ c[i] = 0; ++ } ++ ++ return 0; ++} ++ ++/* Add a prototype to avoid -Wmissing-prototypes warning. */ ++int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags); ++ ++int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags) ++{ ++ if (flags & RISCV_HWPROBE_WHICH_CPUS) ++ return riscv_vdso_get_cpus(pairs, pair_count, cpusetsize, ++ cpus, flags); ++ ++ return riscv_vdso_get_values(pairs, pair_count, cpusetsize, ++ cpus, flags); ++} diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c -index 81886fc36ed6..c05ac070379f 100644 +index 81886fc36ed6..a0d1841c4388 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -83,7 +83,8 @@ static bool insn_is_vector(u32 insn_buf) @@ -24518,8 +29266,116 @@ index 81886fc36ed6..c05ac070379f 100644 datap = kzalloc(riscv_v_vsize, GFP_KERNEL); if (!datap) return -ENOMEM; +@@ -136,8 +137,11 @@ bool riscv_v_first_use_handler(struct pt_regs *regs) + u32 __user *epc = (u32 __user *)regs->epc; + u32 insn = (u32)regs->badaddr; + ++ if (!has_vector()) ++ return false; ++ + /* Do not handle if V is not supported, or disabled */ +- if (!(ELF_HWCAP & COMPAT_HWCAP_ISA_V)) ++ if (!riscv_v_vstate_ctrl_user_allowed()) + return false; + + /* If V has been enabled then it is not the first-use trap */ +diff --git a/arch/riscv/kernel/vendor_extensions.c b/arch/riscv/kernel/vendor_extensions.c +new file mode 100644 +index 000000000000..aeb8839d2f8a +--- /dev/null ++++ b/arch/riscv/kernel/vendor_extensions.c +@@ -0,0 +1,56 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright 2024 Rivos, Inc ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[] = { ++#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES ++ &riscv_isa_vendor_ext_list_andes, ++#endif ++}; ++ ++const size_t riscv_isa_vendor_ext_list_size = ARRAY_SIZE(riscv_isa_vendor_ext_list); ++ ++/** ++ * __riscv_isa_vendor_extension_available() - Check whether given vendor ++ * extension is available or not. ++ * ++ * @cpu: check if extension is available on this cpu ++ * @vendor: vendor that the extension is a member of ++ * @bit: bit position of the desired extension ++ * Return: true or false ++ * ++ * NOTE: When cpu is -1, will check if extension is available on all cpus ++ */ ++bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit) ++{ ++ struct riscv_isavendorinfo *bmap; ++ struct riscv_isavendorinfo *cpu_bmap; ++ ++ switch (vendor) { ++ #ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES ++ case ANDES_VENDOR_ID: ++ bmap = &riscv_isa_vendor_ext_list_andes.all_harts_isa_bitmap; ++ cpu_bmap = &riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap[cpu]; ++ break; ++ #endif ++ default: ++ return false; ++ } ++ ++ if (cpu != -1) ++ bmap = &cpu_bmap[cpu]; ++ ++ if (bit >= RISCV_ISA_VENDOR_EXT_MAX) ++ return false; ++ ++ return test_bit(bit, bmap->isa) ? true : false; ++} ++EXPORT_SYMBOL_GPL(__riscv_isa_vendor_extension_available); +diff --git a/arch/riscv/kernel/vendor_extensions/Makefile b/arch/riscv/kernel/vendor_extensions/Makefile +new file mode 100644 +index 000000000000..6a61aed944f1 +--- /dev/null ++++ b/arch/riscv/kernel/vendor_extensions/Makefile +@@ -0,0 +1,3 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_ANDES) += andes.o +diff --git a/arch/riscv/kernel/vendor_extensions/andes.c b/arch/riscv/kernel/vendor_extensions/andes.c +new file mode 100644 +index 000000000000..4d8dfc974f00 +--- /dev/null ++++ b/arch/riscv/kernel/vendor_extensions/andes.c +@@ -0,0 +1,18 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++/* All Andes vendor extensions supported in Linux */ ++const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = { ++ __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU), ++}; ++ ++struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_andes = { ++ .ext_data_count = ARRAY_SIZE(riscv_isa_vendor_ext_andes), ++ .ext_data = riscv_isa_vendor_ext_andes, ++}; diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c -index 74bb27440527..29f0d1e299b8 100644 +index 74bb27440527..596209f1a6ff 100644 --- a/arch/riscv/kvm/aia.c +++ b/arch/riscv/kvm/aia.c @@ -10,12 +10,12 @@ @@ -24531,8 +29387,9 @@ index 74bb27440527..29f0d1e299b8 100644 #include #include #include - #include +-#include -#include ++#include struct aia_hgei_control { raw_spinlock_t lock; @@ -24655,6 +29512,71 @@ index c1585444f856..a8085cd8215e 100644 #define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64)) +diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c +index 48ae0d4b3932..225a435d9c9a 100644 +--- a/arch/riscv/kvm/main.c ++++ b/arch/riscv/kvm/main.c +@@ -11,7 +11,7 @@ + #include + #include + #include +-#include ++#include + #include + + long kvm_arch_dev_ioctl(struct file *filp, +diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c +index 44bc324aeeb0..23c0e82b5103 100644 +--- a/arch/riscv/kvm/tlb.c ++++ b/arch/riscv/kvm/tlb.c +@@ -12,7 +12,7 @@ + #include + #include + #include +-#include ++#include + #include + + #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL) +diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c +index 08ba48a395aa..030904d82b58 100644 +--- a/arch/riscv/kvm/vcpu_fp.c ++++ b/arch/riscv/kvm/vcpu_fp.c +@@ -11,7 +11,7 @@ + #include + #include + #include +-#include ++#include + + #ifdef CONFIG_FPU + void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c +index d520b25d8561..9e7e755163a9 100644 +--- a/arch/riscv/kvm/vcpu_onereg.c ++++ b/arch/riscv/kvm/vcpu_onereg.c +@@ -13,7 +13,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c +index b430cbb69521..b339a2682f25 100644 +--- a/arch/riscv/kvm/vcpu_vector.c ++++ b/arch/riscv/kvm/vcpu_vector.c +@@ -11,7 +11,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index a77342eb3489..32031a7d96d4 100644 --- a/arch/riscv/mm/dma-noncoherent.c @@ -24779,6 +29701,18 @@ index 324e8cd9b502..a9f4af9f7f3f 100644 for (i = 0; i < nr_ptes_in_range; ++i) { local_flush_tlb_page_asid(start, asid); start += stride; +diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig +index 67b08985ff6f..0a6355bc94ec 100644 +--- a/arch/sw_64/Kconfig ++++ b/arch/sw_64/Kconfig +@@ -418,7 +418,6 @@ source "kernel/livepatch/Kconfig" + config NUMA + bool "NUMA Support" + depends on SMP && !FLATMEM +- select ACPI_NUMA if ACPI + select OF_NUMA + help + Say Y to compile the kernel to support NUMA (Non-Uniform Memory diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 551829884734..dcfaa3812306 100644 --- a/arch/x86/include/asm/hw_irq.h @@ -24806,6 +29740,1419 @@ index 8e1ef5345b7a..a67bb8f982bd 100644 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); paravirt_tlb_remove_table(tlb, virt_to_page(pud)); } +diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile +index eaa09bf52f17..d367e649714f 100644 +--- a/drivers/acpi/Makefile ++++ b/drivers/acpi/Makefile +@@ -37,7 +37,7 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o + # ACPI Bus and Device Drivers + # + acpi-y += bus.o glue.o +-acpi-y += scan.o ++acpi-y += scan.o mipi-disco-img.o + acpi-y += resource.o + acpi-y += acpi_processor.o + acpi-y += processor_core.o +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c +index 98a2ab3b6844..1a418424d250 100644 +--- a/drivers/acpi/acpi_lpss.c ++++ b/drivers/acpi/acpi_lpss.c +@@ -579,25 +579,26 @@ static struct device *acpi_lpss_find_device(const char *hid, const char *uid) + static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle) + { + struct acpi_handle_list dep_devices; +- acpi_status status; ++ bool ret = false; + int i; + + if (!acpi_has_method(adev->handle, "_DEP")) + return false; + +- status = acpi_evaluate_reference(adev->handle, "_DEP", NULL, +- &dep_devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(adev->handle, "_DEP", NULL, &dep_devices)) { + dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n"); + return false; + } + + for (i = 0; i < dep_devices.count; i++) { +- if (dep_devices.handles[i] == handle) +- return true; ++ if (dep_devices.handles[i] == handle) { ++ ret = true; ++ break; ++ } + } + +- return false; ++ acpi_handle_list_free(&dep_devices); ++ return ret; + } + + static void acpi_lpss_link_consumer(struct device *dev1, +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c +index e0800d1f8ff7..5ae0f1aa57ce 100644 +--- a/drivers/acpi/bus.c ++++ b/drivers/acpi/bus.c +@@ -1179,6 +1179,9 @@ static int __init acpi_bus_init_irq(void) + message = "SWPIC"; + break; + #endif ++ case ACPI_IRQ_MODEL_RINTC: ++ message = "RINTC"; ++ break; + default: + pr_info("Unknown interrupt routing model\n"); + return -ENODEV; +@@ -1435,6 +1438,7 @@ static int __init acpi_init(void) + acpi_hest_init(); + acpi_ghes_init(); + acpi_arm_init(); ++ acpi_riscv_init(); + acpi_scan_init(); + acpi_ec_init(); + acpi_debugfs_init(); +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index 0592aebe0c39..510b2ee3c71c 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -274,4 +274,12 @@ void acpi_init_lpit(void); + static inline void acpi_init_lpit(void) { } + #endif + ++/*-------------------------------------------------------------------------- ++ ACPI _CRS CSI-2 and MIPI DisCo for Imaging ++ -------------------------------------------------------------------------- */ ++ ++void acpi_mipi_check_crs_csi2(acpi_handle handle); ++void acpi_mipi_scan_crs_csi2(void); ++void acpi_mipi_crs_csi2_cleanup(void); ++ + #endif /* _ACPI_INTERNAL_H_ */ +diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c +new file mode 100644 +index 000000000000..91281c8cb4f2 +--- /dev/null ++++ b/drivers/acpi/mipi-disco-img.c +@@ -0,0 +1,292 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * MIPI DisCo for Imaging support. ++ * ++ * Copyright (C) 2023 Intel Corporation ++ * ++ * Support MIPI DisCo for Imaging by parsing ACPI _CRS CSI-2 records defined in ++ * Section 6.4.3.8.2.4 "Camera Serial Interface (CSI-2) Connection Resource ++ * Descriptor" of ACPI 6.5. ++ * ++ * The implementation looks for the information in the ACPI namespace (CSI-2 ++ * resource descriptors in _CRS) and constructs software nodes compatible with ++ * Documentation/firmware-guide/acpi/dsd/graph.rst to represent the CSI-2 ++ * connection graph. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "internal.h" ++ ++static LIST_HEAD(acpi_mipi_crs_csi2_list); ++ ++static void acpi_mipi_data_tag(acpi_handle handle, void *context) ++{ ++} ++ ++/* Connection data extracted from one _CRS CSI-2 resource descriptor. */ ++struct crs_csi2_connection { ++ struct list_head entry; ++ struct acpi_resource_csi2_serialbus csi2_data; ++ acpi_handle remote_handle; ++ char remote_name[]; ++}; ++ ++/* Data extracted from _CRS CSI-2 resource descriptors for one device. */ ++struct crs_csi2 { ++ struct list_head entry; ++ acpi_handle handle; ++ struct acpi_device_software_nodes *swnodes; ++ struct list_head connections; ++ u32 port_count; ++}; ++ ++struct csi2_resources_walk_data { ++ acpi_handle handle; ++ struct list_head connections; ++}; ++ ++static acpi_status parse_csi2_resource(struct acpi_resource *res, void *context) ++{ ++ struct csi2_resources_walk_data *crwd = context; ++ struct acpi_resource_csi2_serialbus *csi2_res; ++ struct acpi_resource_source *csi2_res_src; ++ u16 csi2_res_src_length; ++ struct crs_csi2_connection *conn; ++ acpi_handle remote_handle; ++ ++ if (res->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) ++ return AE_OK; ++ ++ csi2_res = &res->data.csi2_serial_bus; ++ ++ if (csi2_res->type != ACPI_RESOURCE_SERIAL_TYPE_CSI2) ++ return AE_OK; ++ ++ csi2_res_src = &csi2_res->resource_source; ++ if (ACPI_FAILURE(acpi_get_handle(NULL, csi2_res_src->string_ptr, ++ &remote_handle))) { ++ acpi_handle_debug(crwd->handle, ++ "unable to find resource source\n"); ++ return AE_OK; ++ } ++ csi2_res_src_length = csi2_res_src->string_length; ++ if (!csi2_res_src_length) { ++ acpi_handle_debug(crwd->handle, ++ "invalid resource source string length\n"); ++ return AE_OK; ++ } ++ ++ conn = kmalloc(struct_size(conn, remote_name, csi2_res_src_length + 1), ++ GFP_KERNEL); ++ if (!conn) ++ return AE_OK; ++ ++ conn->csi2_data = *csi2_res; ++ strscpy(conn->remote_name, csi2_res_src->string_ptr, csi2_res_src_length); ++ conn->csi2_data.resource_source.string_ptr = conn->remote_name; ++ conn->remote_handle = remote_handle; ++ ++ list_add(&conn->entry, &crwd->connections); ++ ++ return AE_OK; ++} ++ ++static struct crs_csi2 *acpi_mipi_add_crs_csi2(acpi_handle handle, ++ struct list_head *list) ++{ ++ struct crs_csi2 *csi2; ++ ++ csi2 = kzalloc(sizeof(*csi2), GFP_KERNEL); ++ if (!csi2) ++ return NULL; ++ ++ csi2->handle = handle; ++ INIT_LIST_HEAD(&csi2->connections); ++ csi2->port_count = 1; ++ ++ if (ACPI_FAILURE(acpi_attach_data(handle, acpi_mipi_data_tag, csi2))) { ++ kfree(csi2); ++ return NULL; ++ } ++ ++ list_add(&csi2->entry, list); ++ ++ return csi2; ++} ++ ++static struct crs_csi2 *acpi_mipi_get_crs_csi2(acpi_handle handle) ++{ ++ struct crs_csi2 *csi2; ++ ++ if (ACPI_FAILURE(acpi_get_data_full(handle, acpi_mipi_data_tag, ++ (void **)&csi2, NULL))) ++ return NULL; ++ ++ return csi2; ++} ++ ++static void csi_csr2_release_connections(struct list_head *list) ++{ ++ struct crs_csi2_connection *conn, *conn_tmp; ++ ++ list_for_each_entry_safe(conn, conn_tmp, list, entry) { ++ list_del(&conn->entry); ++ kfree(conn); ++ } ++} ++ ++static void acpi_mipi_del_crs_csi2(struct crs_csi2 *csi2) ++{ ++ list_del(&csi2->entry); ++ acpi_detach_data(csi2->handle, acpi_mipi_data_tag); ++ kfree(csi2->swnodes); ++ csi_csr2_release_connections(&csi2->connections); ++ kfree(csi2); ++} ++ ++/** ++ * acpi_mipi_check_crs_csi2 - Look for CSI-2 resources in _CRS ++ * @handle: Device object handle to evaluate _CRS for. ++ * ++ * Find all CSI-2 resource descriptors in the given device's _CRS ++ * and collect them into a list. ++ */ ++void acpi_mipi_check_crs_csi2(acpi_handle handle) ++{ ++ struct csi2_resources_walk_data crwd = { ++ .handle = handle, ++ .connections = LIST_HEAD_INIT(crwd.connections), ++ }; ++ struct crs_csi2 *csi2; ++ ++ /* ++ * Avoid allocating _CRS CSI-2 objects for devices without any CSI-2 ++ * resource descriptions in _CRS to reduce overhead. ++ */ ++ acpi_walk_resources(handle, METHOD_NAME__CRS, parse_csi2_resource, &crwd); ++ if (list_empty(&crwd.connections)) ++ return; ++ ++ /* ++ * Create a _CRS CSI-2 entry to store the extracted connection ++ * information and add it to the global list. ++ */ ++ csi2 = acpi_mipi_add_crs_csi2(handle, &acpi_mipi_crs_csi2_list); ++ if (!csi2) { ++ csi_csr2_release_connections(&crwd.connections); ++ return; /* Nothing really can be done about this. */ ++ } ++ ++ list_replace(&crwd.connections, &csi2->connections); ++} ++ ++#define NO_CSI2_PORT (UINT_MAX - 1) ++ ++static void alloc_crs_csi2_swnodes(struct crs_csi2 *csi2) ++{ ++ size_t port_count = csi2->port_count; ++ struct acpi_device_software_nodes *swnodes; ++ size_t alloc_size; ++ unsigned int i; ++ ++ /* ++ * Allocate memory for ports, node pointers (number of nodes + ++ * 1 (guardian), nodes (root + number of ports * 2 (because for ++ * every port there is an endpoint)). ++ */ ++ if (check_mul_overflow(sizeof(*swnodes->ports) + ++ sizeof(*swnodes->nodes) * 2 + ++ sizeof(*swnodes->nodeptrs) * 2, ++ port_count, &alloc_size) || ++ check_add_overflow(sizeof(*swnodes) + ++ sizeof(*swnodes->nodes) + ++ sizeof(*swnodes->nodeptrs) * 2, ++ alloc_size, &alloc_size)) { ++ acpi_handle_info(csi2->handle, ++ "too many _CRS CSI-2 resource handles (%zu)", ++ port_count); ++ return; ++ } ++ ++ swnodes = kmalloc(alloc_size, GFP_KERNEL); ++ if (!swnodes) ++ return; ++ ++ swnodes->ports = (struct acpi_device_software_node_port *)(swnodes + 1); ++ swnodes->nodes = (struct software_node *)(swnodes->ports + port_count); ++ swnodes->nodeptrs = (const struct software_node **)(swnodes->nodes + 1 + ++ 2 * port_count); ++ swnodes->num_ports = port_count; ++ ++ for (i = 0; i < 2 * port_count + 1; i++) ++ swnodes->nodeptrs[i] = &swnodes->nodes[i]; ++ ++ swnodes->nodeptrs[i] = NULL; ++ ++ for (i = 0; i < port_count; i++) ++ swnodes->ports[i].port_nr = NO_CSI2_PORT; ++ ++ csi2->swnodes = swnodes; ++} ++ ++/** ++ * acpi_mipi_scan_crs_csi2 - Create ACPI _CRS CSI-2 software nodes ++ * ++ * Note that this function must be called before any struct acpi_device objects ++ * are bound to any ACPI drivers or scan handlers, so it cannot assume the ++ * existence of struct acpi_device objects for every device present in the ACPI ++ * namespace. ++ * ++ * acpi_scan_lock in scan.c must be held when calling this function. ++ */ ++void acpi_mipi_scan_crs_csi2(void) ++{ ++ struct crs_csi2 *csi2; ++ LIST_HEAD(aux_list); ++ ++ /* Count references to each ACPI handle in the CSI-2 connection graph. */ ++ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) { ++ struct crs_csi2_connection *conn; ++ ++ list_for_each_entry(conn, &csi2->connections, entry) { ++ struct crs_csi2 *remote_csi2; ++ ++ csi2->port_count++; ++ ++ remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle); ++ if (remote_csi2) { ++ remote_csi2->port_count++; ++ continue; ++ } ++ /* ++ * The remote endpoint has no _CRS CSI-2 list entry yet, ++ * so create one for it and add it to the list. ++ */ ++ acpi_mipi_add_crs_csi2(conn->remote_handle, &aux_list); ++ } ++ } ++ list_splice(&aux_list, &acpi_mipi_crs_csi2_list); ++ ++ /* Allocate software nodes for representing the CSI-2 information. */ ++ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) ++ alloc_crs_csi2_swnodes(csi2); ++} ++ ++/** ++ * acpi_mipi_crs_csi2_cleanup - Free _CRS CSI-2 temporary data ++ */ ++void acpi_mipi_crs_csi2_cleanup(void) ++{ ++ struct crs_csi2 *csi2, *csi2_tmp; ++ ++ list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry) ++ acpi_mipi_del_crs_csi2(csi2); ++} +diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig +index 67d1f40bfa9f..f33194d1e43f 100644 +--- a/drivers/acpi/numa/Kconfig ++++ b/drivers/acpi/numa/Kconfig +@@ -1,9 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + config ACPI_NUMA +- bool "NUMA support" +- depends on NUMA +- depends on (X86 || IA64 || ARM64 || LOONGARCH || SW64) +- default y if IA64 || ARM64 ++ def_bool NUMA && !X86 + + config ACPI_HMAT + bool "ACPI Heterogeneous Memory Attribute Table Support" +diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c +index aad85ccae2e0..e146e1b46806 100644 +--- a/drivers/acpi/numa/srat.c ++++ b/drivers/acpi/numa/srat.c +@@ -165,6 +165,19 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) + } + } + break; ++ ++ case ACPI_SRAT_TYPE_RINTC_AFFINITY: ++ { ++ struct acpi_srat_rintc_affinity *p = ++ (struct acpi_srat_rintc_affinity *)header; ++ pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n", ++ p->acpi_processor_uid, ++ p->proximity_domain, ++ (p->flags & ACPI_SRAT_RINTC_ENABLED) ? ++ "enabled" : "disabled"); ++ } ++ break; ++ + default: + pr_warn("Found unsupported SRAT entry (type = 0x%x)\n", + header->type); +@@ -206,7 +219,7 @@ int __init srat_disabled(void) + return acpi_numa < 0; + } + +-#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) ++#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) || defined(CONFIG_RISCV) + /* + * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for + * I/O localities since SRAT does not list them. I/O localities are +@@ -466,6 +479,21 @@ acpi_parse_memory_affinity(union acpi_subtable_headers * header, + return 0; + } + ++static int __init ++acpi_parse_rintc_affinity(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_srat_rintc_affinity *rintc_affinity; ++ ++ rintc_affinity = (struct acpi_srat_rintc_affinity *)header; ++ acpi_table_print_srat_entry(&header->common); ++ ++ /* let architecture-dependent part to do it */ ++ acpi_numa_rintc_affinity_init(rintc_affinity); ++ ++ return 0; ++} ++ + static int __init acpi_parse_srat(struct acpi_table_header *table) + { + struct acpi_table_srat *srat = (struct acpi_table_srat *)table; +@@ -501,7 +529,7 @@ int __init acpi_numa_init(void) + + /* SRAT: System Resource Affinity Table */ + if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { +- struct acpi_subtable_proc srat_proc[4]; ++ struct acpi_subtable_proc srat_proc[5]; + + memset(srat_proc, 0, sizeof(srat_proc)); + srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY; +@@ -512,6 +540,8 @@ int __init acpi_numa_init(void) + srat_proc[2].handler = acpi_parse_gicc_affinity; + srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY; + srat_proc[3].handler = acpi_parse_gi_affinity; ++ srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY; ++ srat_proc[4].handler = acpi_parse_rintc_affinity; + + acpi_table_parse_entries_array(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), +diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c +index aa1038b8aec4..b727db968f33 100644 +--- a/drivers/acpi/pci_link.c ++++ b/drivers/acpi/pci_link.c +@@ -748,6 +748,8 @@ static int acpi_pci_link_add(struct acpi_device *device, + if (result) + kfree(link); + ++ acpi_dev_clear_dependencies(device); ++ + return result < 0 ? result : 1; + } + +diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile +index 8b3b126e0b94..05d593d4bbb0 100644 +--- a/drivers/acpi/riscv/Makefile ++++ b/drivers/acpi/riscv/Makefile +@@ -1,2 +1,2 @@ + # SPDX-License-Identifier: GPL-2.0-only +-obj-y += rhct.o ++obj-y += rhct.o init.o irq.o +diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c +new file mode 100644 +index 000000000000..5ef97905a727 +--- /dev/null ++++ b/drivers/acpi/riscv/init.c +@@ -0,0 +1,13 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2023-2024, Ventana Micro Systems Inc ++ * Author: Sunil V L ++ */ ++ ++#include ++#include "init.h" ++ ++void __init acpi_riscv_init(void) ++{ ++ riscv_acpi_init_gsi_mapping(); ++} +diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h +new file mode 100644 +index 000000000000..0b9a07e4031f +--- /dev/null ++++ b/drivers/acpi/riscv/init.h +@@ -0,0 +1,4 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++#include ++ ++void __init riscv_acpi_init_gsi_mapping(void); +diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c +new file mode 100644 +index 000000000000..cced960c2aef +--- /dev/null ++++ b/drivers/acpi/riscv/irq.c +@@ -0,0 +1,335 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2023-2024, Ventana Micro Systems Inc ++ * Author: Sunil V L ++ */ ++ ++#include ++#include ++#include ++ ++#include "init.h" ++ ++struct riscv_ext_intc_list { ++ acpi_handle handle; ++ u32 gsi_base; ++ u32 nr_irqs; ++ u32 nr_idcs; ++ u32 id; ++ u32 type; ++ struct list_head list; ++}; ++ ++struct acpi_irq_dep_ctx { ++ int rc; ++ unsigned int index; ++ acpi_handle handle; ++}; ++ ++LIST_HEAD(ext_intc_list); ++ ++static int irqchip_cmp_func(const void *in0, const void *in1) ++{ ++ struct acpi_probe_entry *elem0 = (struct acpi_probe_entry *)in0; ++ struct acpi_probe_entry *elem1 = (struct acpi_probe_entry *)in1; ++ ++ return (elem0->type > elem1->type) - (elem0->type < elem1->type); ++} ++ ++/* ++ * On RISC-V, RINTC structures in MADT should be probed before any other ++ * interrupt controller structures and IMSIC before APLIC. The interrupt ++ * controller subtypes in MADT of ACPI spec for RISC-V are defined in ++ * the incremental order like RINTC(24)->IMSIC(25)->APLIC(26)->PLIC(27). ++ * Hence, simply sorting the subtypes in incremental order will ++ * establish the required order. ++ */ ++void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) ++{ ++ struct acpi_probe_entry *ape = ap_head; ++ ++ if (nr == 1 || !ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) ++ return; ++ sort(ape, nr, sizeof(*ape), irqchip_cmp_func, NULL); ++} ++ ++static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct list_head *i, *tmp; ++ ++ list_for_each_safe(i, tmp, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (gsi_base == ext_intc_element->gsi_base) { ++ ext_intc_element->handle = handle; ++ return AE_OK; ++ } ++ } ++ ++ return AE_NOT_FOUND; ++} ++ ++int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, ++ u32 *id, u32 *nr_irqs, u32 *nr_idcs) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct list_head *i; ++ ++ list_for_each(i, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (ext_intc_element->handle == ACPI_HANDLE_FWNODE(fwnode)) { ++ *gsi_base = ext_intc_element->gsi_base; ++ *id = ext_intc_element->id; ++ *nr_irqs = ext_intc_element->nr_irqs; ++ if (nr_idcs) ++ *nr_idcs = ext_intc_element->nr_idcs; ++ ++ return 0; ++ } ++ } ++ ++ return -ENODEV; ++} ++ ++struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct acpi_device *adev; ++ struct list_head *i; ++ ++ list_for_each(i, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (gsi >= ext_intc_element->gsi_base && ++ gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) { ++ adev = acpi_fetch_acpi_dev(ext_intc_element->handle); ++ if (!adev) ++ return NULL; ++ ++ return acpi_fwnode_handle(adev); ++ } ++ } ++ ++ return NULL; ++} ++ ++static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs, ++ u32 id, u32 type) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ ++ ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL); ++ if (!ext_intc_element) ++ return -ENOMEM; ++ ++ ext_intc_element->gsi_base = gsi_base; ++ ext_intc_element->nr_irqs = nr_irqs; ++ ext_intc_element->nr_idcs = nr_idcs; ++ ext_intc_element->id = id; ++ list_add_tail(&ext_intc_element->list, &ext_intc_list); ++ return 0; ++} ++ ++static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level, ++ void *context, void **return_value) ++{ ++ acpi_status status; ++ u64 gbase; ++ ++ if (!acpi_has_method(handle, "_GSB")) { ++ acpi_handle_err(handle, "_GSB method not found\n"); ++ return AE_ERROR; ++ } ++ ++ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase); ++ if (ACPI_FAILURE(status)) { ++ acpi_handle_err(handle, "failed to evaluate _GSB method\n"); ++ return status; ++ } ++ ++ status = riscv_acpi_update_gsi_handle((u32)gbase, handle); ++ if (ACPI_FAILURE(status)) { ++ acpi_handle_err(handle, "failed to find the GSI mapping entry\n"); ++ return status; ++ } ++ ++ return AE_OK; ++} ++ ++static int __init riscv_acpi_aplic_parse_madt(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_aplic *aplic = (struct acpi_madt_aplic *)header; ++ ++ return riscv_acpi_register_ext_intc(aplic->gsi_base, aplic->num_sources, aplic->num_idcs, ++ aplic->id, ACPI_RISCV_IRQCHIP_APLIC); ++} ++ ++static int __init riscv_acpi_plic_parse_madt(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_plic *plic = (struct acpi_madt_plic *)header; ++ ++ return riscv_acpi_register_ext_intc(plic->gsi_base, plic->num_irqs, 0, ++ plic->id, ACPI_RISCV_IRQCHIP_PLIC); ++} ++ ++void __init riscv_acpi_init_gsi_mapping(void) ++{ ++ /* There can be either PLIC or APLIC */ ++ if (acpi_table_parse_madt(ACPI_MADT_TYPE_PLIC, riscv_acpi_plic_parse_madt, 0) > 0) { ++ acpi_get_devices("RSCV0001", riscv_acpi_create_gsi_map, NULL, NULL); ++ return; ++ } ++ ++ if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0) ++ acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL); ++} ++ ++static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct list_head *i; ++ ++ list_for_each(i, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (gsi >= ext_intc_element->gsi_base && ++ gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) ++ return ext_intc_element->handle; ++ } ++ ++ return NULL; ++} ++ ++static acpi_status riscv_acpi_irq_get_parent(struct acpi_resource *ares, void *context) ++{ ++ struct acpi_irq_dep_ctx *ctx = context; ++ struct acpi_resource_irq *irq; ++ struct acpi_resource_extended_irq *eirq; ++ ++ switch (ares->type) { ++ case ACPI_RESOURCE_TYPE_IRQ: ++ irq = &ares->data.irq; ++ if (ctx->index >= irq->interrupt_count) { ++ ctx->index -= irq->interrupt_count; ++ return AE_OK; ++ } ++ ctx->handle = riscv_acpi_get_gsi_handle(irq->interrupts[ctx->index]); ++ return AE_CTRL_TERMINATE; ++ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: ++ eirq = &ares->data.extended_irq; ++ if (eirq->producer_consumer == ACPI_PRODUCER) ++ return AE_OK; ++ ++ if (ctx->index >= eirq->interrupt_count) { ++ ctx->index -= eirq->interrupt_count; ++ return AE_OK; ++ } ++ ++ /* Support GSIs only */ ++ if (eirq->resource_source.string_length) ++ return AE_OK; ++ ++ ctx->handle = riscv_acpi_get_gsi_handle(eirq->interrupts[ctx->index]); ++ return AE_CTRL_TERMINATE; ++ } ++ ++ return AE_OK; ++} ++ ++static int riscv_acpi_irq_get_dep(acpi_handle handle, unsigned int index, acpi_handle *gsi_handle) ++{ ++ struct acpi_irq_dep_ctx ctx = {-EINVAL, index, NULL}; ++ ++ if (!gsi_handle) ++ return 0; ++ ++ acpi_walk_resources(handle, METHOD_NAME__CRS, riscv_acpi_irq_get_parent, &ctx); ++ *gsi_handle = ctx.handle; ++ if (*gsi_handle) ++ return 1; ++ ++ return 0; ++} ++ ++static u32 riscv_acpi_add_prt_dep(acpi_handle handle) ++{ ++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; ++ struct acpi_pci_routing_table *entry; ++ struct acpi_handle_list dep_devices; ++ acpi_handle gsi_handle; ++ acpi_handle link_handle; ++ acpi_status status; ++ u32 count = 0; ++ ++ status = acpi_get_irq_routing_table(handle, &buffer); ++ if (ACPI_FAILURE(status)) { ++ acpi_handle_err(handle, "failed to get IRQ routing table\n"); ++ kfree(buffer.pointer); ++ return 0; ++ } ++ ++ entry = buffer.pointer; ++ while (entry && (entry->length > 0)) { ++ if (entry->source[0]) { ++ acpi_get_handle(handle, entry->source, &link_handle); ++ dep_devices.count = 1; ++ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); ++ if (!dep_devices.handles) { ++ acpi_handle_err(handle, "failed to allocate memory\n"); ++ continue; ++ } ++ ++ dep_devices.handles[0] = link_handle; ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ } else { ++ gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index); ++ dep_devices.count = 1; ++ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); ++ if (!dep_devices.handles) { ++ acpi_handle_err(handle, "failed to allocate memory\n"); ++ continue; ++ } ++ ++ dep_devices.handles[0] = gsi_handle; ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ } ++ ++ entry = (struct acpi_pci_routing_table *) ++ ((unsigned long)entry + entry->length); ++ } ++ ++ kfree(buffer.pointer); ++ return count; ++} ++ ++static u32 riscv_acpi_add_irq_dep(acpi_handle handle) ++{ ++ struct acpi_handle_list dep_devices; ++ acpi_handle gsi_handle; ++ u32 count = 0; ++ int i; ++ ++ for (i = 0; ++ riscv_acpi_irq_get_dep(handle, i, &gsi_handle); ++ i++) { ++ dep_devices.count = 1; ++ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); ++ if (!dep_devices.handles) { ++ acpi_handle_err(handle, "failed to allocate memory\n"); ++ continue; ++ } ++ ++ dep_devices.handles[0] = gsi_handle; ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ } ++ ++ return count; ++} ++ ++u32 arch_acpi_add_auto_dep(acpi_handle handle) ++{ ++ if (acpi_has_method(handle, "_PRT")) ++ return riscv_acpi_add_prt_dep(handle); ++ ++ return riscv_acpi_add_irq_dep(handle); ++} +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 9a40052d31f3..84dcd3b5ce83 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -870,6 +870,9 @@ static const char * const acpi_honor_dep_ids[] = { + "INTC1059", /* IVSC (TGL) driver must be loaded to allow i2c access to camera sensors */ + "INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */ + "INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */ ++ "RSCV0001", /* RISC-V PLIC */ ++ "RSCV0002", /* RISC-V APLIC */ ++ "PNP0C0F", /* PCI Link Device */ + NULL + }; + +@@ -2034,54 +2037,18 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val) + mutex_unlock(&acpi_scan_lock); + } + +-static void acpi_scan_init_hotplug(struct acpi_device *adev) +-{ +- struct acpi_hardware_id *hwid; +- +- if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) { +- acpi_dock_add(adev); +- return; +- } +- list_for_each_entry(hwid, &adev->pnp.ids, list) { +- struct acpi_scan_handler *handler; +- +- handler = acpi_scan_match_handler(hwid->id, NULL); +- if (handler) { +- adev->flags.hotplug_notify = true; +- break; +- } +- } +-} +- +-static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) ++int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices) + { +- struct acpi_handle_list dep_devices; +- acpi_status status; + u32 count; + int i; + +- /* +- * Check for _HID here to avoid deferring the enumeration of: +- * 1. PCI devices. +- * 2. ACPI nodes describing USB ports. +- * Still, checking for _HID catches more then just these cases ... +- */ +- if (!check_dep || !acpi_has_method(handle, "_DEP") || +- !acpi_has_method(handle, "_HID")) +- return 0; +- +- status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); +- if (ACPI_FAILURE(status)) { +- acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); +- return 0; +- } +- +- for (count = 0, i = 0; i < dep_devices.count; i++) { ++ for (count = 0, i = 0; i < dep_devices->count; i++) { + struct acpi_device_info *info; + struct acpi_dep_data *dep; + bool skip, honor_dep; ++ acpi_status status; + +- status = acpi_get_object_info(dep_devices.handles[i], &info); ++ status = acpi_get_object_info(dep_devices->handles[i], &info); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "Error reading _DEP device info\n"); + continue; +@@ -2100,19 +2067,79 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) + + count++; + +- dep->supplier = dep_devices.handles[i]; ++ dep->supplier = dep_devices->handles[i]; + dep->consumer = handle; + dep->honor_dep = honor_dep; + + mutex_lock(&acpi_dep_list_lock); +- list_add_tail(&dep->node , &acpi_dep_list); ++ list_add_tail(&dep->node, &acpi_dep_list); + mutex_unlock(&acpi_dep_list_lock); + } + ++ acpi_handle_list_free(dep_devices); + return count; + } + +-static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, ++static void acpi_scan_init_hotplug(struct acpi_device *adev) ++{ ++ struct acpi_hardware_id *hwid; ++ ++ if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) { ++ acpi_dock_add(adev); ++ return; ++ } ++ list_for_each_entry(hwid, &adev->pnp.ids, list) { ++ struct acpi_scan_handler *handler; ++ ++ handler = acpi_scan_match_handler(hwid->id, NULL); ++ if (handler) { ++ adev->flags.hotplug_notify = true; ++ break; ++ } ++ } ++} ++ ++u32 __weak arch_acpi_add_auto_dep(acpi_handle handle) { return 0; } ++ ++static u32 acpi_scan_check_dep(acpi_handle handle) ++{ ++ struct acpi_handle_list dep_devices; ++ u32 count = 0; ++ ++ /* ++ * Some architectures like RISC-V need to add dependencies for ++ * all devices which use GSI to the interrupt controller so that ++ * interrupt controller is probed before any of those devices. ++ * Instead of mandating _DEP on all the devices, detect the ++ * dependency and add automatically. ++ */ ++ count += arch_acpi_add_auto_dep(handle); ++ ++ /* ++ * Check for _HID here to avoid deferring the enumeration of: ++ * 1. PCI devices. ++ * 2. ACPI nodes describing USB ports. ++ * Still, checking for _HID catches more then just these cases ... ++ */ ++ if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID")) ++ return count; ++ ++ if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) { ++ acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); ++ return count; ++ } ++ ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ return count; ++} ++ ++static acpi_status acpi_scan_check_crs_csi2_cb(acpi_handle handle, u32 a, void *b, void **c) ++{ ++ acpi_mipi_check_crs_csi2(handle); ++ return AE_OK; ++} ++ ++static acpi_status acpi_bus_check_add(acpi_handle handle, bool first_pass, + struct acpi_device **adev_p) + { + struct acpi_device *device = acpi_fetch_acpi_dev(handle); +@@ -2130,9 +2157,25 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, + if (acpi_device_should_be_hidden(handle)) + return AE_OK; + +- /* Bail out if there are dependencies. */ +- if (acpi_scan_check_dep(handle, check_dep) > 0) +- return AE_CTRL_DEPTH; ++ if (first_pass) { ++ acpi_mipi_check_crs_csi2(handle); ++ ++ /* Bail out if there are dependencies. */ ++ if (acpi_scan_check_dep(handle) > 0) { ++ /* ++ * The entire CSI-2 connection graph needs to be ++ * extracted before any drivers or scan handlers ++ * are bound to struct device objects, so scan ++ * _CRS CSI-2 resource descriptors for all ++ * devices below the current handle. ++ */ ++ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ++ ACPI_UINT32_MAX, ++ acpi_scan_check_crs_csi2_cb, ++ NULL, NULL, NULL); ++ return AE_CTRL_DEPTH; ++ } ++ } + + fallthrough; + case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ +@@ -2155,10 +2198,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, + } + + /* +- * If check_dep is true at this point, the device has no dependencies, ++ * If first_pass is true at this point, the device has no dependencies, + * or the creation of the device object would have been postponed above. + */ +- acpi_add_single_object(&device, handle, type, !check_dep); ++ acpi_add_single_object(&device, handle, type, !first_pass); + if (!device) + return AE_CTRL_DEPTH; + +@@ -2581,12 +2624,21 @@ int acpi_bus_scan(acpi_handle handle) + if (!device) + return -ENODEV; + ++ /* ++ * Allocate ACPI _CRS CSI-2 software nodes using information extracted ++ * from the _CRS CSI-2 resource descriptors during the ACPI namespace ++ * walk above. ++ */ ++ acpi_mipi_scan_crs_csi2(); ++ + acpi_bus_attach(device, (void *)true); + + /* Pass 2: Enumerate all of the remaining devices. */ + + acpi_scan_postponed(); + ++ acpi_mipi_crs_csi2_cleanup(); ++ + return 0; + } + EXPORT_SYMBOL(acpi_bus_scan); +@@ -2735,6 +2787,8 @@ static int __init acpi_match_madt(union acpi_subtable_headers *header, + return 0; + } + ++void __weak arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) { } ++ + int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) + { + int count = 0; +@@ -2743,6 +2797,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) + return 0; + + mutex_lock(&acpi_probe_mutex); ++ arch_sort_irqchip_probe(ap_head, nr); + for (ape = ap_head; nr; ape++, nr--) { + if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) { + acpi_probe_count = 0; +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c +index 8263508415a8..9b5a1c786230 100644 +--- a/drivers/acpi/thermal.c ++++ b/drivers/acpi/thermal.c +@@ -297,9 +297,8 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + } + if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.trip.valid) { + memset(&devices, 0, sizeof(struct acpi_handle_list)); +- status = acpi_evaluate_reference(tz->device->handle, "_PSL", +- NULL, &devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(tz->device->handle, "_PSL", ++ NULL, &devices)) { + acpi_handle_info(tz->device->handle, + "Invalid passive threshold\n"); + tz->trips.passive.trip.valid = false; +@@ -307,10 +306,10 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + tz->trips.passive.trip.valid = true; + } + +- if (memcmp(&tz->trips.passive.devices, &devices, +- sizeof(struct acpi_handle_list))) { +- memcpy(&tz->trips.passive.devices, &devices, +- sizeof(struct acpi_handle_list)); ++ if (acpi_handle_list_equal(&tz->trips.passive.devices, &devices)) { ++ acpi_handle_list_free(&devices); ++ } else { ++ acpi_handle_list_replace(&tz->trips.passive.devices, &devices); + ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); + } + } +@@ -362,9 +361,8 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + name[2] = 'L'; + if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].trip.valid) { + memset(&devices, 0, sizeof(struct acpi_handle_list)); +- status = acpi_evaluate_reference(tz->device->handle, +- name, NULL, &devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(tz->device->handle, ++ name, NULL, &devices)) { + acpi_handle_info(tz->device->handle, + "Invalid active%d threshold\n", i); + tz->trips.active[i].trip.valid = false; +@@ -372,10 +370,10 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + tz->trips.active[i].trip.valid = true; + } + +- if (memcmp(&tz->trips.active[i].devices, &devices, +- sizeof(struct acpi_handle_list))) { +- memcpy(&tz->trips.active[i].devices, &devices, +- sizeof(struct acpi_handle_list)); ++ if (acpi_handle_list_equal(&tz->trips.active[i].devices, &devices)) { ++ acpi_handle_list_free(&devices); ++ } else { ++ acpi_handle_list_replace(&tz->trips.active[i].devices, &devices); + ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); + } + } +@@ -389,12 +387,14 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + + if (flag & ACPI_TRIPS_DEVICES) { + memset(&devices, 0, sizeof(devices)); +- status = acpi_evaluate_reference(tz->device->handle, "_TZD", +- NULL, &devices); +- if (ACPI_SUCCESS(status) && +- memcmp(&tz->devices, &devices, sizeof(devices))) { +- tz->devices = devices; +- ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); ++ if (acpi_evaluate_reference(tz->device->handle, "_TZD", ++ NULL, &devices)) { ++ if (acpi_handle_list_equal(&tz->devices, &devices)) { ++ acpi_handle_list_free(&devices); ++ } else { ++ acpi_handle_list_replace(&tz->devices, &devices); ++ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); ++ } + } + } + } +@@ -920,6 +920,18 @@ static void acpi_thermal_check_fn(struct work_struct *work) + mutex_unlock(&tz->thermal_check_lock); + } + ++static void acpi_thermal_free_thermal_zone(struct acpi_thermal *tz) ++{ ++ int i; ++ ++ acpi_handle_list_free(&tz->trips.passive.devices); ++ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) ++ acpi_handle_list_free(&tz->trips.active[i].devices); ++ acpi_handle_list_free(&tz->devices); ++ ++ kfree(tz); ++} ++ + static int acpi_thermal_add(struct acpi_device *device) + { + struct acpi_thermal *tz; +@@ -966,7 +978,7 @@ static int acpi_thermal_add(struct acpi_device *device) + flush_workqueue(acpi_thermal_pm_queue); + acpi_thermal_unregister_thermal_zone(tz); + free_memory: +- kfree(tz); ++ acpi_thermal_free_thermal_zone(tz); + + return result; + } +@@ -986,7 +998,7 @@ static void acpi_thermal_remove(struct acpi_device *device) + flush_workqueue(acpi_thermal_pm_queue); + acpi_thermal_unregister_thermal_zone(tz); + kfree(tz->trip_table); +- kfree(tz); ++ acpi_thermal_free_thermal_zone(tz); + } + + #ifdef CONFIG_PM_SLEEP +diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c +index 2ea14648a661..e84106a4ef59 100644 +--- a/drivers/acpi/utils.c ++++ b/drivers/acpi/utils.c +@@ -329,22 +329,18 @@ const char *acpi_get_subsystem_id(acpi_handle handle) + } + EXPORT_SYMBOL_GPL(acpi_get_subsystem_id); + +-acpi_status +-acpi_evaluate_reference(acpi_handle handle, +- acpi_string pathname, +- struct acpi_object_list *arguments, +- struct acpi_handle_list *list) ++bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, ++ struct acpi_object_list *arguments, ++ struct acpi_handle_list *list) + { +- acpi_status status = AE_OK; +- union acpi_object *package = NULL; +- union acpi_object *element = NULL; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; +- u32 i = 0; +- ++ union acpi_object *package; ++ acpi_status status; ++ bool ret = false; ++ u32 i; + +- if (!list) { +- return AE_BAD_PARAMETER; +- } ++ if (!list) ++ return false; + + /* Evaluate object. */ + +@@ -354,64 +350,106 @@ acpi_evaluate_reference(acpi_handle handle, + + package = buffer.pointer; + +- if ((buffer.length == 0) || !package) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- goto end; +- } +- if (package->type != ACPI_TYPE_PACKAGE) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- goto end; +- } +- if (!package->package.count) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- goto end; +- } ++ if (buffer.length == 0 || !package || ++ package->type != ACPI_TYPE_PACKAGE || !package->package.count) ++ goto err; + +- if (package->package.count > ACPI_MAX_HANDLES) { +- kfree(package); +- return AE_NO_MEMORY; +- } + list->count = package->package.count; ++ list->handles = kcalloc(list->count, sizeof(*list->handles), GFP_KERNEL); ++ if (!list->handles) ++ goto err_clear; + + /* Extract package data. */ + + for (i = 0; i < list->count; i++) { ++ union acpi_object *element = &(package->package.elements[i]); + +- element = &(package->package.elements[i]); ++ if (element->type != ACPI_TYPE_LOCAL_REFERENCE || ++ !element->reference.handle) ++ goto err_free; + +- if (element->type != ACPI_TYPE_LOCAL_REFERENCE) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- break; +- } +- +- if (!element->reference.handle) { +- status = AE_NULL_ENTRY; +- acpi_util_eval_error(handle, pathname, status); +- break; +- } + /* Get the acpi_handle. */ + + list->handles[i] = element->reference.handle; + acpi_handle_debug(list->handles[i], "Found in reference list\n"); + } + +- end: +- if (ACPI_FAILURE(status)) { +- list->count = 0; +- //kfree(list->handles); +- } ++ ret = true; + ++end: + kfree(buffer.pointer); + +- return status; ++ return ret; ++ ++err_free: ++ kfree(list->handles); ++ list->handles = NULL; ++ ++err_clear: ++ list->count = 0; ++ ++err: ++ acpi_util_eval_error(handle, pathname, status); ++ goto end; + } + + EXPORT_SYMBOL(acpi_evaluate_reference); + ++/** ++ * acpi_handle_list_equal - Check if two ACPI handle lists are the same ++ * @list1: First list to compare. ++ * @list2: Second list to compare. ++ * ++ * Return true if the given ACPI handle lists are of the same size and ++ * contain the same ACPI handles in the same order. Otherwise, return false. ++ */ ++bool acpi_handle_list_equal(struct acpi_handle_list *list1, ++ struct acpi_handle_list *list2) ++{ ++ return list1->count == list2->count && ++ !memcmp(list1->handles, list2->handles, ++ list1->count * sizeof(*list1->handles)); ++} ++EXPORT_SYMBOL_GPL(acpi_handle_list_equal); ++ ++/** ++ * acpi_handle_list_replace - Replace one ACPI handle list with another ++ * @dst: ACPI handle list to replace. ++ * @src: Source ACPI handle list. ++ * ++ * Free the handles table in @dst, move the handles table from @src to @dst, ++ * copy count from @src to @dst and clear @src. ++ */ ++void acpi_handle_list_replace(struct acpi_handle_list *dst, ++ struct acpi_handle_list *src) ++{ ++ if (dst->count) ++ kfree(dst->handles); ++ ++ dst->count = src->count; ++ dst->handles = src->handles; ++ ++ src->handles = NULL; ++ src->count = 0; ++} ++EXPORT_SYMBOL_GPL(acpi_handle_list_replace); ++ ++/** ++ * acpi_handle_list_free - Free the handles table in an ACPI handle list ++ * @list: ACPI handle list to free. ++ * ++ * Free the handles table in @list and clear its count field. ++ */ ++void acpi_handle_list_free(struct acpi_handle_list *list) ++{ ++ if (!list->count) ++ return; ++ ++ kfree(list->handles); ++ list->count = 0; ++} ++EXPORT_SYMBOL_GPL(acpi_handle_list_free); ++ + acpi_status + acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld) + { +diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c +index 96281de7010d..7f3ea78722fa 100644 +--- a/drivers/base/arch_numa.c ++++ b/drivers/base/arch_numa.c +@@ -530,7 +530,7 @@ static int __init arch_acpi_numa_init(void) + + ret = acpi_numa_init(); + if (ret) { +- pr_info("Failed to initialise from firmware\n"); ++ pr_debug("Failed to initialise from firmware\n"); + return ret; + } + diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 3348d4db5f1b..0d01890160f3 100644 --- a/drivers/base/platform-msi.c @@ -34568,11 +40915,24 @@ index 000000000000..8bf7a18776f8 + + return clk; +} +diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c +index da3071b387eb..6d9e344e98f8 100644 +--- a/drivers/clocksource/timer-riscv.c ++++ b/drivers/clocksource/timer-riscv.c +@@ -24,7 +24,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig -index 6265e0b94f8b..8e355170ab07 100644 +index fd709abd3d0e..57864ac5686b 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig -@@ -365,5 +365,14 @@ config QORIQ_CPUFREQ +@@ -356,5 +356,14 @@ config QORIQ_CPUFREQ This adds the CPUFreq driver support for Freescale QorIQ SoCs which are capable of changing the CPU's frequency dynamically. @@ -34588,13 +40948,13 @@ index 6265e0b94f8b..8e355170ab07 100644 endif endmenu diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile -index 0950869f1809..df2bb1149b4a 100644 +index 46c3aa314f97..63f81fbda8ba 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile -@@ -111,3 +111,4 @@ obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o +@@ -110,3 +110,4 @@ obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o + obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o - obj-$(CONFIG_SW64_CPUFREQ) += sw64_cpufreq.o - obj-$(CONFIG_SW64_CPUFREQ_DEBUGFS) += sw64_cpufreq_debugfs.o + obj-$(CONFIG_SW64_CPUFREQ) += sunway-cpufreq.o +obj-$(CONFIG_RISCV_XUANTIE_TH1520_CPUFREQ) += th1520-cpufreq.o diff --git a/drivers/cpufreq/th1520-cpufreq.c b/drivers/cpufreq/th1520-cpufreq.c new file mode 100644 @@ -35186,6 +41546,103 @@ index 000000000000..6e4186808c96 +MODULE_AUTHOR("fugang.duan "); +MODULE_DESCRIPTION("XuanTie TH1520 cpufreq driver"); +MODULE_LICENSE("GPL"); +diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c +index 71d433bb0ce6..50d128a4b343 100644 +--- a/drivers/cpuidle/cpuidle-riscv-sbi.c ++++ b/drivers/cpuidle/cpuidle-riscv-sbi.c +@@ -74,26 +74,6 @@ static inline bool sbi_is_domain_state_available(void) + return data->available; + } + +-static int sbi_suspend_finisher(unsigned long suspend_type, +- unsigned long resume_addr, +- unsigned long opaque) +-{ +- struct sbiret ret; +- +- ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, +- suspend_type, resume_addr, opaque, 0, 0, 0); +- +- return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; +-} +- +-static int sbi_suspend(u32 state) +-{ +- if (state & SBI_HSM_SUSP_NON_RET_BIT) +- return cpu_suspend(state, sbi_suspend_finisher); +- else +- return sbi_suspend_finisher(state, 0, 0); +-} +- + static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) + { +@@ -101,9 +81,9 @@ static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, + u32 state = states[idx]; + + if (state & SBI_HSM_SUSP_NON_RET_BIT) +- return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state); ++ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state); + else +- return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend, ++ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, + idx, state); + } + +@@ -134,7 +114,7 @@ static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, + else + state = states[idx]; + +- ret = sbi_suspend(state) ? -1 : idx; ++ ret = riscv_sbi_hart_suspend(state) ? -1 : idx; + + ct_cpuidle_exit(); + +@@ -207,17 +187,6 @@ static const struct of_device_id sbi_cpuidle_state_match[] = { + { }, + }; + +-static bool sbi_suspend_state_is_valid(u32 state) +-{ +- if (state > SBI_HSM_SUSPEND_RET_DEFAULT && +- state < SBI_HSM_SUSPEND_RET_PLATFORM) +- return false; +- if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && +- state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) +- return false; +- return true; +-} +- + static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) + { + int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state); +@@ -227,7 +196,7 @@ static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) + return err; + } + +- if (!sbi_suspend_state_is_valid(*state)) { ++ if (!riscv_sbi_suspend_state_is_valid(*state)) { + pr_warn("Invalid SBI suspend state %#x\n", *state); + return -EINVAL; + } +@@ -600,16 +569,8 @@ static int __init sbi_cpuidle_init(void) + int ret; + struct platform_device *pdev; + +- /* +- * The SBI HSM suspend function is only available when: +- * 1) SBI version is 0.3 or higher +- * 2) SBI HSM extension is available +- */ +- if ((sbi_spec_version < sbi_mk_version(0, 3)) || +- !sbi_probe_extension(SBI_EXT_HSM)) { +- pr_info("HSM suspend not available\n"); ++ if (!riscv_sbi_hsm_is_supported()) + return 0; +- } + + ret = platform_driver_register(&sbi_cpuidle_driver); + if (ret) diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 72fb40de58b3..2235a15930bc 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -37038,7 +43495,7 @@ index b882b26ab500..d502bb36434b 100644 .acpi_match_table = pca953x_acpi_ids, }, diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig -index a5b92adb8aff..486d14f1495a 100644 +index d1cad875d2f7..191c700fde97 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -390,6 +390,10 @@ source "drivers/gpu/drm/sprd/Kconfig" @@ -37053,7 +43510,7 @@ index a5b92adb8aff..486d14f1495a 100644 tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile -index 7ba2ec90c3f7..183e306f0c6e 100644 +index f93fd0ac8661..caff5405c5f4 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -184,6 +184,7 @@ obj-y += hisilicon/ @@ -37064,13 +43521,13 @@ index 7ba2ec90c3f7..183e306f0c6e 100644 obj-$(CONFIG_DRM_TVE200) += tve200/ obj-$(CONFIG_DRM_XEN) += xen/ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ -@@ -200,3 +201,4 @@ obj-$(CONFIG_DRM_SPRD) += sprd/ +@@ -199,3 +200,4 @@ obj-y += solomon/ + obj-$(CONFIG_DRM_SPRD) += sprd/ obj-y += loongson/ obj-$(CONFIG_DRM_PHYTIUM) += phytium/ - obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hygon/hydcu-fixup-header/ +obj-$(CONFIG_DRM_VERISILICON) += verisilicon/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -index 1f1198e7c93e..74d602dc575d 100644 +index 8136e49cb6d1..9a5b5dc210ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1109,6 +1109,8 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) @@ -344752,7 +351209,7 @@ index b3fffe7b5062..aa137ead5cc5 100644 tmp = pgprot_writecombine(tmp); else diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c -index 46ff9c75bb12..63a9b8d41b94 100644 +index 68b02e46c061..d7fa069480f9 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -187,7 +187,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo, @@ -344764,7 +351221,7 @@ index 46ff9c75bb12..63a9b8d41b94 100644 res->bo = bo; man = ttm_manager_type(bo->bdev, place->mem_type); -@@ -670,17 +670,18 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, +@@ -674,17 +674,18 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, } else { iter_io->needs_unmap = true; memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); @@ -357994,16 +364451,17 @@ index f4697c1a39c0..51b46cab7a2f 100644 obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c new file mode 100644 -index 000000000000..4a3ffe856d6c +index 000000000000..7cd6b646774b --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-direct.c -@@ -0,0 +1,323 @@ +@@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + ++#include +#include +#include +#include @@ -358189,17 +364647,22 @@ index 000000000000..4a3ffe856d6c +} + +static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index, -+ u32 *parent_hwirq, unsigned long *parent_hartid) ++ u32 *parent_hwirq, unsigned long *parent_hartid, ++ struct aplic_priv *priv) +{ + struct of_phandle_args parent; ++ unsigned long hartid; + int rc; + -+ /* -+ * Currently, only OF fwnode is supported so extend this -+ * function for ACPI support. -+ */ -+ if (!is_of_node(dev->fwnode)) -+ return -EINVAL; ++ if (!is_of_node(dev->fwnode)) { ++ hartid = acpi_rintc_ext_parent_to_hartid(priv->acpi_aplic_id, index); ++ if (hartid == INVALID_HARTID) ++ return -ENODEV; ++ ++ *parent_hartid = hartid; ++ *parent_hwirq = RV_IRQ_EXT; ++ return 0; ++ } + + rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent); + if (rc) @@ -358237,7 +364700,7 @@ index 000000000000..4a3ffe856d6c + /* Setup per-CPU IDC and target CPU mask */ + current_cpu = get_cpu(); + for (i = 0; i < priv->nr_idcs; i++) { -+ rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid); ++ rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid, priv); + if (rc) { + dev_warn(dev, "parent irq for IDC%d not found\n", i); + continue; @@ -358323,18 +364786,20 @@ index 000000000000..4a3ffe856d6c +} diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c new file mode 100644 -index 000000000000..4ed7a1db7776 +index 000000000000..93e7c51f944a --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-main.c -@@ -0,0 +1,211 @@ +@@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + ++#include +#include +#include ++#include +#include +#include +#include @@ -358454,39 +364919,50 @@ index 000000000000..4ed7a1db7776 + writel(0, priv->regs + APLIC_DOMAINCFG); +} + ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id aplic_acpi_match[] = { ++ { "RSCV0002", 0 }, ++ {} ++}; ++MODULE_DEVICE_TABLE(acpi, aplic_acpi_match); ++ ++#endif ++ +int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs) +{ + struct device_node *np = to_of_node(dev->fwnode); + struct of_phandle_args parent; + int rc; + -+ /* -+ * Currently, only OF fwnode is supported so extend this -+ * function for ACPI support. -+ */ -+ if (!np) -+ return -EINVAL; -+ + /* Save device pointer and register base */ + priv->dev = dev; + priv->regs = regs; + -+ /* Find out number of interrupt sources */ -+ rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs); -+ if (rc) { -+ dev_err(dev, "failed to get number of interrupt sources\n"); -+ return rc; -+ } ++ if (np) { ++ /* Find out number of interrupt sources */ ++ rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs); ++ if (rc) { ++ dev_err(dev, "failed to get number of interrupt sources\n"); ++ return rc; ++ } + -+ /* -+ * Find out number of IDCs based on parent interrupts -+ * -+ * If "msi-parent" property is present then we ignore the -+ * APLIC IDCs which forces the APLIC driver to use MSI mode. -+ */ -+ if (!of_property_present(np, "msi-parent")) { -+ while (!of_irq_parse_one(np, priv->nr_idcs, &parent)) -+ priv->nr_idcs++; ++ /* ++ * Find out number of IDCs based on parent interrupts ++ * ++ * If "msi-parent" property is present then we ignore the ++ * APLIC IDCs which forces the APLIC driver to use MSI mode. ++ */ ++ if (!of_property_present(np, "msi-parent")) { ++ while (!of_irq_parse_one(np, priv->nr_idcs, &parent)) ++ priv->nr_idcs++; ++ } ++ } else { ++ rc = riscv_acpi_get_gsi_info(dev->fwnode, &priv->gsi_base, &priv->acpi_aplic_id, ++ &priv->nr_irqs, &priv->nr_idcs); ++ if (rc) { ++ dev_err(dev, "failed to find GSI mapping\n"); ++ return rc; ++ } + } + + /* Setup initial state APLIC interrupts */ @@ -358513,7 +364989,11 @@ index 000000000000..4ed7a1db7776 + * If msi-parent property is present then setup APLIC MSI + * mode otherwise setup APLIC direct mode. + */ -+ msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent"); ++ if (is_of_node(dev->fwnode)) ++ msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent"); ++ else ++ msi_mode = imsic_acpi_get_fwnode(NULL) ? 1 : 0; ++ + if (msi_mode) + rc = aplic_msi_setup(dev, regs); + else @@ -358522,6 +365002,11 @@ index 000000000000..4ed7a1db7776 + dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n", + msi_mode ? "MSI" : "direct"); + ++#ifdef CONFIG_ACPI ++ if (!acpi_disabled) ++ acpi_dev_clear_dependencies(ACPI_COMPANION(dev)); ++#endif ++ + return rc; +} + @@ -358534,16 +365019,17 @@ index 000000000000..4ed7a1db7776 + .driver = { + .name = "riscv-aplic", + .of_match_table = aplic_match, ++ .acpi_match_table = ACPI_PTR(aplic_acpi_match), + }, + .probe = aplic_probe, +}; +builtin_platform_driver(aplic_driver); diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h new file mode 100644 -index 000000000000..4393927d8c80 +index 000000000000..b0ad8cde69b1 --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-main.h -@@ -0,0 +1,52 @@ +@@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -358574,6 +365060,7 @@ index 000000000000..4393927d8c80 + u32 gsi_base; + u32 nr_irqs; + u32 nr_idcs; ++ u32 acpi_aplic_id; + void __iomem *regs; + struct aplic_msicfg msicfg; +}; @@ -358598,10 +365085,10 @@ index 000000000000..4393927d8c80 +#endif diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c new file mode 100644 -index 000000000000..c4a5b375a75f +index 000000000000..ad5c57c0ed06 --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-msi.c -@@ -0,0 +1,278 @@ +@@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -358779,6 +365266,7 @@ index 000000000000..c4a5b375a75f +int aplic_msi_setup(struct device *dev, void __iomem *regs) +{ + const struct imsic_global_config *imsic_global; ++ struct irq_domain *msi_domain; + struct aplic_priv *priv; + struct aplic_msicfg *mc; + phys_addr_t pa; @@ -358861,8 +365349,14 @@ index 000000000000..c4a5b375a75f + * IMSIC and the IMSIC MSI domains are created later through + * the platform driver probing so we set it explicitly here. + */ -+ if (is_of_node(dev->fwnode)) ++ if (is_of_node(dev->fwnode)) { + of_msi_configure(dev, to_of_node(dev->fwnode)); ++ } else { ++ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev), ++ DOMAIN_BUS_PLATFORM_MSI); ++ if (msi_domain) ++ dev_set_msi_domain(dev, msi_domain); ++ } + + if (!dev_get_msi_domain(dev)) + return -EPROBE_DEFER; @@ -358882,10 +365376,10 @@ index 000000000000..c4a5b375a75f +} diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c new file mode 100644 -index 000000000000..886418ec06cb +index 000000000000..d586c579713d --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-early.c -@@ -0,0 +1,201 @@ +@@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -358893,13 +365387,16 @@ index 000000000000..886418ec06cb + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt ++#include +#include +#include +#include +#include +#include +#include ++#include +#include ++#include +#include +#include + @@ -359070,7 +365567,7 @@ index 000000000000..886418ec06cb + int rc; + + /* Setup IMSIC state */ -+ rc = imsic_setup_state(fwnode); ++ rc = imsic_setup_state(fwnode, NULL); + if (rc) { + pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc); + return rc; @@ -359087,12 +365584,71 @@ index 000000000000..886418ec06cb +} + +IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init); ++ ++#ifdef CONFIG_ACPI ++ ++static struct fwnode_handle *imsic_acpi_fwnode; ++ ++struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) ++{ ++ return imsic_acpi_fwnode; ++} ++ ++static int __init imsic_early_acpi_init(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)header; ++ int rc; ++ ++ imsic_acpi_fwnode = irq_domain_alloc_named_fwnode("imsic"); ++ if (!imsic_acpi_fwnode) { ++ pr_err("unable to allocate IMSIC FW node\n"); ++ return -ENOMEM; ++ } ++ ++ /* Setup IMSIC state */ ++ rc = imsic_setup_state(imsic_acpi_fwnode, imsic); ++ if (rc) { ++ pr_err("%pfwP: failed to setup state (error %d)\n", imsic_acpi_fwnode, rc); ++ return rc; ++ } ++ ++ /* Do early setup of IMSIC state and IPIs */ ++ rc = imsic_early_probe(imsic_acpi_fwnode); ++ if (rc) { ++ irq_domain_free_fwnode(imsic_acpi_fwnode); ++ imsic_acpi_fwnode = NULL; ++ return rc; ++ } ++ ++ rc = imsic_platform_acpi_probe(imsic_acpi_fwnode); ++ ++#ifdef CONFIG_PCI ++ if (!rc) ++ pci_msi_register_fwnode_provider(&imsic_acpi_get_fwnode); ++#endif ++ ++ if (rc) ++ pr_err("%pfwP: failed to register IMSIC for MSI functionality (error %d)\n", ++ imsic_acpi_fwnode, rc); ++ ++ /* ++ * Even if imsic_platform_acpi_probe() fails, the IPI part of IMSIC can ++ * continue to work. So, no need to return failure. This is similar to ++ * DT where IPI works but MSI probe fails for some reason. ++ */ ++ return 0; ++} ++ ++IRQCHIP_ACPI_DECLARE(riscv_imsic, ACPI_MADT_TYPE_IMSIC, NULL, ++ 1, imsic_early_acpi_init); ++#endif diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c new file mode 100644 -index 000000000000..c5ec66e0bfd3 +index 000000000000..c708780e8760 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-platform.c -@@ -0,0 +1,375 @@ +@@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -359100,6 +365656,7 @@ index 000000000000..c5ec66e0bfd3 + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt ++#include +#include +#include +#include @@ -359443,18 +366000,37 @@ index 000000000000..c5ec66e0bfd3 + return 0; +} + -+static int imsic_platform_probe(struct platform_device *pdev) ++static int imsic_platform_probe_common(struct fwnode_handle *fwnode) +{ -+ struct device *dev = &pdev->dev; -+ -+ if (imsic && imsic->fwnode != dev->fwnode) { -+ dev_err(dev, "fwnode mismatch\n"); ++ if (imsic && imsic->fwnode != fwnode) { ++ pr_err("%pfwP: fwnode mismatch\n", fwnode); + return -ENODEV; + } + + return imsic_irqdomain_init(); +} + ++static int imsic_platform_dt_probe(struct platform_device *pdev) ++{ ++ return imsic_platform_probe_common(pdev->dev.fwnode); ++} ++ ++#ifdef CONFIG_ACPI ++ ++/* ++ * On ACPI based systems, PCI enumeration happens early during boot in ++ * acpi_scan_init(). PCI enumeration expects MSI domain setup before ++ * it calls pci_set_msi_domain(). Hence, unlike in DT where ++ * imsic-platform drive probe happens late during boot, ACPI based ++ * systems need to setup the MSI domain early. ++ */ ++int imsic_platform_acpi_probe(struct fwnode_handle *fwnode) ++{ ++ return imsic_platform_probe_common(fwnode); ++} ++ ++#endif ++ +static const struct of_device_id imsic_platform_match[] = { + { .compatible = "riscv,imsics" }, + {} @@ -359465,15 +366041,15 @@ index 000000000000..c5ec66e0bfd3 + .name = "riscv-imsic", + .of_match_table = imsic_platform_match, + }, -+ .probe = imsic_platform_probe, ++ .probe = imsic_platform_dt_probe, +}; +builtin_platform_driver(imsic_platform_driver); diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c new file mode 100644 -index 000000000000..5479f872e62b +index 000000000000..b97e6cd89ed7 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-state.c -@@ -0,0 +1,865 @@ +@@ -0,0 +1,891 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -359481,6 +366057,7 @@ index 000000000000..5479f872e62b + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt ++#include +#include +#include +#include @@ -359986,18 +366563,90 @@ index 000000000000..5479f872e62b + return 0; +} + ++static int __init imsic_populate_global_dt(struct fwnode_handle *fwnode, ++ struct imsic_global_config *global, ++ u32 *nr_parent_irqs) ++{ ++ int rc; ++ ++ /* Find number of guest index bits in MSI address */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits", ++ &global->guest_index_bits); ++ if (rc) ++ global->guest_index_bits = 0; ++ ++ /* Find number of HART index bits */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits", ++ &global->hart_index_bits); ++ if (rc) { ++ /* Assume default value */ ++ global->hart_index_bits = __fls(*nr_parent_irqs); ++ if (BIT(global->hart_index_bits) < *nr_parent_irqs) ++ global->hart_index_bits++; ++ } ++ ++ /* Find number of group index bits */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits", ++ &global->group_index_bits); ++ if (rc) ++ global->group_index_bits = 0; ++ ++ /* ++ * Find first bit position of group index. ++ * If not specified assumed the default APLIC-IMSIC configuration. ++ */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift", ++ &global->group_index_shift); ++ if (rc) ++ global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2; ++ ++ /* Find number of interrupt identities */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids", ++ &global->nr_ids); ++ if (rc) { ++ pr_err("%pfwP: number of interrupt identities not found\n", fwnode); ++ return rc; ++ } ++ ++ /* Find number of guest interrupt identities */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids", ++ &global->nr_guest_ids); ++ if (rc) ++ global->nr_guest_ids = global->nr_ids; ++ ++ return 0; ++} ++ ++static int __init imsic_populate_global_acpi(struct fwnode_handle *fwnode, ++ struct imsic_global_config *global, ++ u32 *nr_parent_irqs, void *opaque) ++{ ++ struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)opaque; ++ ++ global->guest_index_bits = imsic->guest_index_bits; ++ global->hart_index_bits = imsic->hart_index_bits; ++ global->group_index_bits = imsic->group_index_bits; ++ global->group_index_shift = imsic->group_index_shift; ++ global->nr_ids = imsic->num_ids; ++ global->nr_guest_ids = imsic->num_guest_ids; ++ return 0; ++} ++ +static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode, + u32 index, unsigned long *hartid) +{ + struct of_phandle_args parent; + int rc; + -+ /* -+ * Currently, only OF fwnode is supported so extend this -+ * function for ACPI support. -+ */ -+ if (!is_of_node(fwnode)) -+ return -EINVAL; ++ if (!is_of_node(fwnode)) { ++ if (hartid) ++ *hartid = acpi_rintc_index_to_hartid(index); ++ ++ if (!hartid || (*hartid == INVALID_HARTID)) ++ return -EINVAL; ++ ++ return 0; ++ } + + rc = of_irq_parse_one(to_of_node(fwnode), index, &parent); + if (rc) @@ -360016,12 +366665,8 @@ index 000000000000..5479f872e62b +static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode, + u32 index, struct resource *res) +{ -+ /* -+ * Currently, only OF fwnode is supported so extend this -+ * function for ACPI support. -+ */ + if (!is_of_node(fwnode)) -+ return -EINVAL; ++ return acpi_rintc_get_imsic_mmio_info(index, res); + + return of_address_to_resource(to_of_node(fwnode), index, res); +} @@ -360029,20 +366674,14 @@ index 000000000000..5479f872e62b +static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode, + struct imsic_global_config *global, + u32 *nr_parent_irqs, -+ u32 *nr_mmios) ++ u32 *nr_mmios, ++ void *opaque) +{ + unsigned long hartid; + struct resource res; + int rc; + u32 i; + -+ /* -+ * Currently, only OF fwnode is supported so extend this -+ * function for ACPI support. -+ */ -+ if (!is_of_node(fwnode)) -+ return -EINVAL; -+ + *nr_parent_irqs = 0; + *nr_mmios = 0; + @@ -360054,50 +366693,13 @@ index 000000000000..5479f872e62b + return -EINVAL; + } + -+ /* Find number of guest index bits in MSI address */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits", -+ &global->guest_index_bits); -+ if (rc) -+ global->guest_index_bits = 0; -+ -+ /* Find number of HART index bits */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits", -+ &global->hart_index_bits); -+ if (rc) { -+ /* Assume default value */ -+ global->hart_index_bits = __fls(*nr_parent_irqs); -+ if (BIT(global->hart_index_bits) < *nr_parent_irqs) -+ global->hart_index_bits++; -+ } -+ -+ /* Find number of group index bits */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits", -+ &global->group_index_bits); -+ if (rc) -+ global->group_index_bits = 0; ++ if (is_of_node(fwnode)) ++ rc = imsic_populate_global_dt(fwnode, global, nr_parent_irqs); ++ else ++ rc = imsic_populate_global_acpi(fwnode, global, nr_parent_irqs, opaque); + -+ /* -+ * Find first bit position of group index. -+ * If not specified assumed the default APLIC-IMSIC configuration. -+ */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift", -+ &global->group_index_shift); + if (rc) -+ global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2; -+ -+ /* Find number of interrupt identities */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids", -+ &global->nr_ids); -+ if (rc) { -+ pr_err("%pfwP: number of interrupt identities not found\n", fwnode); + return rc; -+ } -+ -+ /* Find number of guest interrupt identities */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids", -+ &global->nr_guest_ids); -+ if (rc) -+ global->nr_guest_ids = global->nr_ids; + + /* Sanity check guest index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT; @@ -360164,7 +366766,7 @@ index 000000000000..5479f872e62b + return 0; +} + -+int __init imsic_setup_state(struct fwnode_handle *fwnode) ++int __init imsic_setup_state(struct fwnode_handle *fwnode, void *opaque) +{ + u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0; + struct imsic_global_config *global; @@ -360205,7 +366807,7 @@ index 000000000000..5479f872e62b + } + + /* Parse IMSIC fwnode */ -+ rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios); ++ rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios, opaque); + if (rc) + goto out_free_local; + @@ -360341,7 +366943,7 @@ index 000000000000..5479f872e62b +} diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h new file mode 100644 -index 000000000000..5ae2f69b035b +index 000000000000..391e44280827 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-state.h @@ -0,0 +1,108 @@ @@ -360449,12 +367051,12 @@ index 000000000000..5ae2f69b035b + +void imsic_state_online(void); +void imsic_state_offline(void); -+int imsic_setup_state(struct fwnode_handle *fwnode); ++int imsic_setup_state(struct fwnode_handle *fwnode, void *opaque); +int imsic_irqdomain_init(void); + +#endif diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c -index 627beae9649a..4f3a12383a1e 100644 +index 627beae9649a..c9f6451e23b7 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -19,6 +19,8 @@ @@ -360555,6 +367157,685 @@ index 627beae9649a..4f3a12383a1e 100644 return 0; } +@@ -229,14 +250,119 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init); + + #ifdef CONFIG_ACPI + ++struct rintc_data { ++ union { ++ u32 ext_intc_id; ++ struct { ++ u32 context_id : 16, ++ reserved : 8, ++ aplic_plic_id : 8; ++ }; ++ }; ++ unsigned long hart_id; ++ u64 imsic_addr; ++ u32 imsic_size; ++}; ++ ++static u32 nr_rintc; ++static struct rintc_data **rintc_acpi_data; ++ ++#define for_each_matching_plic(_plic_id) \ ++ unsigned int _plic; \ ++ \ ++ for (_plic = 0; _plic < nr_rintc; _plic++) \ ++ if (rintc_acpi_data[_plic]->aplic_plic_id != _plic_id) \ ++ continue; \ ++ else ++ ++unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id) ++{ ++ unsigned int nctx = 0; ++ ++ for_each_matching_plic(plic_id) ++ nctx++; ++ ++ return nctx; ++} ++ ++static struct rintc_data *get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) ++{ ++ unsigned int ctxt = 0; ++ ++ for_each_matching_plic(plic_id) { ++ if (ctxt == ctxt_idx) ++ return rintc_acpi_data[_plic]; ++ ++ ctxt++; ++ } ++ ++ return NULL; ++} ++ ++unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx) ++{ ++ struct rintc_data *data = get_plic_context(plic_id, ctxt_idx); ++ ++ return data ? data->hart_id : INVALID_HARTID; ++} ++ ++unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) ++{ ++ struct rintc_data *data = get_plic_context(plic_id, ctxt_idx); ++ ++ return data ? data->context_id : INVALID_CONTEXT; ++} ++ ++unsigned long acpi_rintc_index_to_hartid(u32 index) ++{ ++ return index >= nr_rintc ? INVALID_HARTID : rintc_acpi_data[index]->hart_id; ++} ++ ++int acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) ++{ ++ if (index >= nr_rintc) ++ return -1; ++ ++ res->start = rintc_acpi_data[index]->imsic_addr; ++ res->end = res->start + rintc_acpi_data[index]->imsic_size - 1; ++ res->flags = IORESOURCE_MEM; ++ return 0; ++} ++ ++static int __init riscv_intc_acpi_match(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ return 0; ++} ++ + static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, + const unsigned long end) + { + struct acpi_madt_rintc *rintc; + struct fwnode_handle *fn; ++ int count; + int rc; + ++ if (!rintc_acpi_data) { ++ count = acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, riscv_intc_acpi_match, 0); ++ if (count <= 0) ++ return -EINVAL; ++ ++ rintc_acpi_data = kcalloc(count, sizeof(*rintc_acpi_data), GFP_KERNEL); ++ if (!rintc_acpi_data) ++ return -ENOMEM; ++ } ++ + rintc = (struct acpi_madt_rintc *)header; ++ rintc_acpi_data[nr_rintc] = kzalloc(sizeof(*rintc_acpi_data[0]), GFP_KERNEL); ++ if (!rintc_acpi_data[nr_rintc]) ++ return -ENOMEM; ++ ++ rintc_acpi_data[nr_rintc]->ext_intc_id = rintc->ext_intc_id; ++ rintc_acpi_data[nr_rintc]->hart_id = rintc->hart_id; ++ rintc_acpi_data[nr_rintc]->imsic_addr = rintc->imsic_addr; ++ rintc_acpi_data[nr_rintc]->imsic_size = rintc->imsic_size; ++ nr_rintc++; + + /* + * The ACPI MADT will have one INTC for each CPU (or HART) +@@ -256,6 +382,8 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, + rc = riscv_intc_init_common(fn, &riscv_intc_chip); + if (rc) + irq_domain_free_fwnode(fn); ++ else ++ acpi_set_irq_model(ACPI_IRQ_MODEL_RINTC, riscv_acpi_get_gsi_domain_id); + + return rc; + } +diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c +index 572899669154..9362314e8dd0 100644 +--- a/drivers/irqchip/irq-sifive-plic.c ++++ b/drivers/irqchip/irq-sifive-plic.c +@@ -3,7 +3,8 @@ + * Copyright (C) 2017 SiFive + * Copyright (C) 2018 Christoph Hellwig + */ +-#define pr_fmt(fmt) "plic: " fmt ++#define pr_fmt(fmt) "riscv-plic: " fmt ++#include + #include + #include + #include +@@ -64,12 +65,15 @@ + #define PLIC_QUIRK_EDGE_INTERRUPT 0 + + struct plic_priv { ++ struct fwnode_handle *fwnode; + struct cpumask lmask; + struct irq_domain *irqdomain; + void __iomem *regs; + unsigned long plic_quirks; + unsigned int nr_irqs; + unsigned long *prio_save; ++ u32 gsi_base; ++ int acpi_plic_id; + }; + + struct plic_handler { +@@ -85,7 +89,7 @@ struct plic_handler { + struct plic_priv *priv; + }; + static int plic_parent_irq __ro_after_init; +-static bool plic_cpuhp_setup_done __ro_after_init; ++static bool plic_global_setup_done __ro_after_init; + static DEFINE_PER_CPU(struct plic_handler, plic_handlers); + + static int plic_irq_set_type(struct irq_data *d, unsigned int type); +@@ -103,9 +107,11 @@ static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) + + static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) + { +- raw_spin_lock(&handler->enable_lock); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&handler->enable_lock, flags); + __plic_toggle(handler->enable_base, hwirq, enable); +- raw_spin_unlock(&handler->enable_lock); ++ raw_spin_unlock_irqrestore(&handler->enable_lock, flags); + } + + static inline void plic_irq_toggle(const struct cpumask *mask, +@@ -163,15 +169,12 @@ static int plic_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) + { + unsigned int cpu; +- struct cpumask amask; + struct plic_priv *priv = irq_data_get_irq_chip_data(d); + +- cpumask_and(&amask, &priv->lmask, mask_val); +- + if (force) +- cpu = cpumask_first(&amask); ++ cpu = cpumask_first_and(&priv->lmask, mask_val); + else +- cpu = cpumask_any_and(&amask, cpu_online_mask); ++ cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; +@@ -243,6 +246,7 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type) + static int plic_irq_suspend(void) + { + unsigned int i, cpu; ++ unsigned long flags; + u32 __iomem *reg; + struct plic_priv *priv; + +@@ -260,12 +264,12 @@ static int plic_irq_suspend(void) + if (!handler->present) + continue; + +- raw_spin_lock(&handler->enable_lock); ++ raw_spin_lock_irqsave(&handler->enable_lock, flags); + for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { + reg = handler->enable_base + i * sizeof(u32); + handler->enable_save[i] = readl(reg); + } +- raw_spin_unlock(&handler->enable_lock); ++ raw_spin_unlock_irqrestore(&handler->enable_lock, flags); + } + + return 0; +@@ -274,6 +278,7 @@ static int plic_irq_suspend(void) + static void plic_irq_resume(void) + { + unsigned int i, index, cpu; ++ unsigned long flags; + u32 __iomem *reg; + struct plic_priv *priv; + +@@ -291,12 +296,12 @@ static void plic_irq_resume(void) + if (!handler->present) + continue; + +- raw_spin_lock(&handler->enable_lock); ++ raw_spin_lock_irqsave(&handler->enable_lock, flags); + for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { + reg = handler->enable_base + i * sizeof(u32); + writel(handler->enable_save[i], reg); + } +- raw_spin_unlock(&handler->enable_lock); ++ raw_spin_unlock_irqrestore(&handler->enable_lock, flags); + } + } + +@@ -324,6 +329,10 @@ static int plic_irq_domain_translate(struct irq_domain *d, + { + struct plic_priv *priv = d->host_data; + ++ /* For DT, gsi_base is always zero. */ ++ if (fwspec->param[0] >= priv->gsi_base) ++ fwspec->param[0] = fwspec->param[0] - priv->gsi_base; ++ + if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) + return irq_domain_translate_twocell(d, fwspec, hwirq, type); + +@@ -377,9 +386,10 @@ static void plic_handle_irq(struct irq_desc *desc) + while ((hwirq = readl(claim))) { + int err = generic_handle_domain_irq(handler->priv->irqdomain, + hwirq); +- if (unlikely(err)) +- pr_warn_ratelimited("can't find mapping for hwirq %lu\n", +- hwirq); ++ if (unlikely(err)) { ++ pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", ++ handler->priv->fwnode, hwirq); ++ } + } + + chained_irq_exit(chip, desc); +@@ -407,71 +417,176 @@ static int plic_starting_cpu(unsigned int cpu) + enable_percpu_irq(plic_parent_irq, + irq_get_trigger_type(plic_parent_irq)); + else +- pr_warn("cpu%d: parent irq not available\n", cpu); ++ pr_warn("%pfwP: cpu%d: parent irq not available\n", ++ handler->priv->fwnode, cpu); + plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); + + return 0; + } + +-static int __init __plic_init(struct device_node *node, +- struct device_node *parent, +- unsigned long plic_quirks) ++static const struct of_device_id plic_match[] = { ++ { .compatible = "sifive,plic-1.0.0" }, ++ { .compatible = "riscv,plic0" }, ++ { .compatible = "andestech,nceplic100", ++ .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, ++ { .compatible = "thead,c900-plic", ++ .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, ++ {} ++}; ++ ++#ifdef CONFIG_ACPI ++ ++static const struct acpi_device_id plic_acpi_match[] = { ++ { "RSCV0001", 0 }, ++ {} ++}; ++MODULE_DEVICE_TABLE(acpi, plic_acpi_match); ++ ++#endif ++static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, ++ u32 *nr_irqs, u32 *nr_contexts, ++ u32 *gsi_base, u32 *id) + { +- int error = 0, nr_contexts, nr_handlers = 0, i; +- u32 nr_irqs; +- struct plic_priv *priv; +- struct plic_handler *handler; +- unsigned int cpu; ++ int rc; + +- priv = kzalloc(sizeof(*priv), GFP_KERNEL); +- if (!priv) +- return -ENOMEM; ++ if (!is_of_node(fwnode)) { ++ rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL); ++ if (rc) { ++ pr_err("%pfwP: failed to find GSI mapping\n", fwnode); ++ return rc; ++ } + +- priv->plic_quirks = plic_quirks; ++ *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id); ++ if (WARN_ON(!*nr_contexts)) { ++ pr_err("%pfwP: no PLIC context available\n", fwnode); ++ return -EINVAL; ++ } + +- priv->regs = of_iomap(node, 0); +- if (WARN_ON(!priv->regs)) { +- error = -EIO; +- goto out_free_priv; ++ return 0; + } + +- error = -EINVAL; +- of_property_read_u32(node, "riscv,ndev", &nr_irqs); +- if (WARN_ON(!nr_irqs)) +- goto out_iounmap; ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs); ++ if (rc) { ++ pr_err("%pfwP: riscv,ndev property not available\n", fwnode); ++ return rc; ++ } + +- priv->nr_irqs = nr_irqs; ++ *nr_contexts = of_irq_count(to_of_node(fwnode)); ++ if (WARN_ON(!(*nr_contexts))) { ++ pr_err("%pfwP: no PLIC context available\n", fwnode); ++ return -EINVAL; ++ } + +- priv->prio_save = bitmap_alloc(nr_irqs, GFP_KERNEL); +- if (!priv->prio_save) +- goto out_free_priority_reg; ++ *gsi_base = 0; ++ *id = 0; + +- nr_contexts = of_irq_count(node); +- if (WARN_ON(!nr_contexts)) +- goto out_free_priority_reg; ++ return 0; ++} + +- error = -ENOMEM; +- priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, +- &plic_irqdomain_ops, priv); +- if (WARN_ON(!priv->irqdomain)) +- goto out_free_priority_reg; ++static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, ++ u32 *parent_hwirq, int *parent_cpu, u32 id) ++{ ++ struct of_phandle_args parent; ++ unsigned long hartid; ++ int rc; ++ ++ if (!is_of_node(fwnode)) { ++ hartid = acpi_rintc_ext_parent_to_hartid(id, context); ++ if (hartid == INVALID_HARTID) ++ return -EINVAL; ++ ++ *parent_cpu = riscv_hartid_to_cpuid(hartid); ++ *parent_hwirq = RV_IRQ_EXT; ++ return 0; ++ } + +- for (i = 0; i < nr_contexts; i++) { +- struct of_phandle_args parent; +- irq_hw_number_t hwirq; +- int cpu; +- unsigned long hartid; ++ rc = of_irq_parse_one(to_of_node(fwnode), context, &parent); ++ if (rc) ++ return rc; ++ ++ rc = riscv_of_parent_hartid(parent.np, &hartid); ++ if (rc) ++ return rc; ++ ++ *parent_hwirq = parent.args[0]; ++ *parent_cpu = riscv_hartid_to_cpuid(hartid); ++ return 0; ++} ++ ++static int plic_probe(struct fwnode_handle *fwnode) ++{ ++ int error = 0, nr_contexts, nr_handlers = 0, cpu, i; ++ unsigned long plic_quirks = 0; ++ struct plic_handler *handler; ++ u32 nr_irqs, parent_hwirq; ++ struct plic_priv *priv; ++ irq_hw_number_t hwirq; ++ void __iomem *regs; ++ int id, context_id; ++ u32 gsi_base; ++ ++ if (is_of_node(fwnode)) { ++ const struct of_device_id *id; + +- if (of_irq_parse_one(node, i, &parent)) { +- pr_err("failed to parse parent for context %d.\n", i); ++ id = of_match_node(plic_match, to_of_node(fwnode)); ++ if (id) ++ plic_quirks = (unsigned long)id->data; ++ ++ regs = of_iomap(to_of_node(fwnode), 0); ++ if (!regs) ++ return -ENOMEM; ++ } else { ++ regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0); ++ if (IS_ERR(regs)) ++ return PTR_ERR(regs); ++ } ++ ++ error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id); ++ if (error) ++ goto fail_free_regs; ++ ++ priv = kzalloc(sizeof(*priv), GFP_KERNEL); ++ if (!priv) { ++ error = -ENOMEM; ++ goto fail_free_regs; ++ } ++ ++ priv->fwnode = fwnode; ++ priv->plic_quirks = plic_quirks; ++ priv->nr_irqs = nr_irqs; ++ priv->regs = regs; ++ priv->gsi_base = gsi_base; ++ priv->acpi_plic_id = id; ++ ++ priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL); ++ if (!priv->prio_save) { ++ error = -ENOMEM; ++ goto fail_free_priv; ++ } ++ ++ for (i = 0; i < nr_contexts; i++) { ++ error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, ++ priv->acpi_plic_id); ++ if (error) { ++ pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); + continue; + } + ++ if (is_of_node(fwnode)) { ++ context_id = i; ++ } else { ++ context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i); ++ if (context_id == INVALID_CONTEXT) { ++ pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i); ++ continue; ++ } ++ } ++ + /* + * Skip contexts other than external interrupts for our + * privilege level. + */ +- if (parent.args[0] != RV_IRQ_EXT) { ++ if (parent_hwirq != RV_IRQ_EXT) { + /* Disable S-mode enable bits if running in M-mode. */ + if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { + void __iomem *enable_base = priv->regs + +@@ -484,26 +599,11 @@ static int __init __plic_init(struct device_node *node, + continue; + } + +- error = riscv_of_parent_hartid(parent.np, &hartid); +- if (error < 0) { +- pr_warn("failed to parse hart ID for context %d.\n", i); +- continue; +- } +- +- cpu = riscv_hartid_to_cpuid(hartid); + if (cpu < 0) { +- pr_warn("Invalid cpuid for context %d\n", i); ++ pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i); + continue; + } + +- /* Find parent domain and register chained handler */ +- if (!plic_parent_irq && irq_find_host(parent.np)) { +- plic_parent_irq = irq_of_parse_and_map(node, i); +- if (plic_parent_irq) +- irq_set_chained_handler(plic_parent_irq, +- plic_handle_irq); +- } +- + /* + * When running in M-mode we need to ignore the S-mode handler. + * Here we assume it always comes later, but that might be a +@@ -511,7 +611,7 @@ static int __init __plic_init(struct device_node *node, + */ + handler = per_cpu_ptr(&plic_handlers, cpu); + if (handler->present) { +- pr_warn("handler already present for context %d.\n", i); ++ pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i); + plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); + goto done; + } +@@ -519,16 +619,16 @@ static int __init __plic_init(struct device_node *node, + cpumask_set_cpu(cpu, &priv->lmask); + handler->present = true; + handler->hart_base = priv->regs + CONTEXT_BASE + +- i * CONTEXT_SIZE; ++ context_id * CONTEXT_SIZE; + raw_spin_lock_init(&handler->enable_lock); + handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + +- i * CONTEXT_ENABLE_SIZE; ++ context_id * CONTEXT_ENABLE_SIZE; + handler->priv = priv; + +- handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), +- sizeof(*handler->enable_save), GFP_KERNEL); ++ handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), ++ sizeof(*handler->enable_save), GFP_KERNEL); + if (!handler->enable_save) +- goto out_free_enable_reg; ++ goto fail_cleanup_contexts; + done: + for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { + plic_toggle(handler, hwirq, 0); +@@ -538,52 +638,101 @@ static int __init __plic_init(struct device_node *node, + nr_handlers++; + } + ++ priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1, ++ &plic_irqdomain_ops, priv); ++ if (WARN_ON(!priv->irqdomain)) ++ goto fail_cleanup_contexts; ++ + /* +- * We can have multiple PLIC instances so setup cpuhp state +- * and register syscore operations only when context handler +- * for current/boot CPU is present. ++ * We can have multiple PLIC instances so setup global state ++ * and register syscore operations only once after context ++ * handlers of all online CPUs are initialized. + */ +- handler = this_cpu_ptr(&plic_handlers); +- if (handler->present && !plic_cpuhp_setup_done) { +- cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, +- "irqchip/sifive/plic:starting", +- plic_starting_cpu, plic_dying_cpu); +- register_syscore_ops(&plic_irq_syscore_ops); +- plic_cpuhp_setup_done = true; ++ if (!plic_global_setup_done) { ++ struct irq_domain *domain; ++ bool global_setup = true; ++ ++ for_each_online_cpu(cpu) { ++ handler = per_cpu_ptr(&plic_handlers, cpu); ++ if (!handler->present) { ++ global_setup = false; ++ break; ++ } ++ } ++ ++ if (global_setup) { ++ /* Find parent domain and register chained handler */ ++ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); ++ if (domain) ++ plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); ++ if (plic_parent_irq) ++ irq_set_chained_handler(plic_parent_irq, plic_handle_irq); ++ ++ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, ++ "irqchip/sifive/plic:starting", ++ plic_starting_cpu, plic_dying_cpu); ++ register_syscore_ops(&plic_irq_syscore_ops); ++ plic_global_setup_done = true; ++ } + } + +- pr_info("%pOFP: mapped %d interrupts with %d handlers for" +- " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts); ++#ifdef CONFIG_ACPI ++ if (!acpi_disabled) ++ acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); ++#endif ++ ++ pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", ++ fwnode, nr_irqs, nr_handlers, nr_contexts); + return 0; + +-out_free_enable_reg: +- for_each_cpu(cpu, cpu_present_mask) { ++fail_cleanup_contexts: ++ for (i = 0; i < nr_contexts; i++) { ++ if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id)) ++ continue; ++ if (parent_hwirq != RV_IRQ_EXT || cpu < 0) ++ continue; ++ + handler = per_cpu_ptr(&plic_handlers, cpu); ++ handler->present = false; ++ handler->hart_base = NULL; ++ handler->enable_base = NULL; + kfree(handler->enable_save); ++ handler->enable_save = NULL; ++ handler->priv = NULL; + } +-out_free_priority_reg: +- kfree(priv->prio_save); +-out_iounmap: +- iounmap(priv->regs); +-out_free_priv: ++ bitmap_free(priv->prio_save); ++fail_free_priv: + kfree(priv); ++fail_free_regs: ++ iounmap(regs); + return error; + } + +-static int __init plic_init(struct device_node *node, +- struct device_node *parent) ++static int plic_platform_probe(struct platform_device *pdev) + { +- return __plic_init(node, parent, 0); ++ return plic_probe(pdev->dev.fwnode); + } + +-IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); +-IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ ++static struct platform_driver plic_driver = { ++ .driver = { ++ .name = "riscv-plic", ++ .of_match_table = plic_match, ++ .suppress_bind_attrs = true, ++ .acpi_match_table = ACPI_PTR(plic_acpi_match), ++ }, ++ .probe = plic_platform_probe, ++}; ++ ++static int __init plic_init(void) ++{ ++ return platform_driver_register(&plic_driver); ++} ++arch_initcall(plic_init); + +-static int __init plic_edge_init(struct device_node *node, +- struct device_node *parent) ++static int __init plic_early_probe(struct device_node *node, ++ struct device_node *parent) + { +- return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT)); ++ return plic_probe(&node->fwnode); + } + +-IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init); +-IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init); ++IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index bc2e265cb02d..a0186d27086f 100644 --- a/drivers/mailbox/Kconfig @@ -451675,6 +458956,203 @@ index 4eea161663b1..1222da5cdc89 100644 } } +diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c +index 05b7357bd258..7b6e191236ec 100644 +--- a/drivers/pci/pci-acpi.c ++++ b/drivers/pci/pci-acpi.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1518,3 +1519,184 @@ static int __init acpi_pci_init(void) + return 0; + } + arch_initcall(acpi_pci_init); ++ ++#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) ++ ++/* ++ * Try to assign the IRQ number when probing a new device ++ */ ++int pcibios_alloc_irq(struct pci_dev *dev) ++{ ++ if (!acpi_disabled) ++ acpi_pci_irq_enable(dev); ++ ++ return 0; ++} ++ ++struct acpi_pci_generic_root_info { ++ struct acpi_pci_root_info common; ++ struct pci_config_window *cfg; /* config space mapping */ ++}; ++ ++int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) ++{ ++ struct pci_config_window *cfg = bus->sysdata; ++ struct acpi_device *adev = to_acpi_device(cfg->parent); ++ struct acpi_pci_root *root = acpi_driver_data(adev); ++ ++ return root->segment; ++} ++ ++int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) ++{ ++ struct pci_config_window *cfg; ++ struct acpi_device *adev; ++ struct device *bus_dev; ++ ++ if (acpi_disabled) ++ return 0; ++ ++ cfg = bridge->bus->sysdata; ++ ++ /* ++ * On Hyper-V there is no corresponding ACPI device for a root bridge, ++ * therefore ->parent is set as NULL by the driver. And set 'adev' as ++ * NULL in this case because there is no proper ACPI device. ++ */ ++ if (!cfg->parent) ++ adev = NULL; ++ else ++ adev = to_acpi_device(cfg->parent); ++ ++ bus_dev = &bridge->bus->dev; ++ ++ ACPI_COMPANION_SET(&bridge->dev, adev); ++ set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev))); ++ ++ return 0; ++} ++ ++static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) ++{ ++ struct resource_entry *entry, *tmp; ++ int status; ++ ++ status = acpi_pci_probe_root_resources(ci); ++ resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { ++ if (!(entry->res->flags & IORESOURCE_WINDOW)) ++ resource_list_destroy_entry(entry); ++ } ++ return status; ++} ++ ++/* ++ * Lookup the bus range for the domain in MCFG, and set up config space ++ * mapping. ++ */ ++static struct pci_config_window * ++pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) ++{ ++ struct device *dev = &root->device->dev; ++ struct resource *bus_res = &root->secondary; ++ u16 seg = root->segment; ++ const struct pci_ecam_ops *ecam_ops; ++ struct resource cfgres; ++ struct acpi_device *adev; ++ struct pci_config_window *cfg; ++ int ret; ++ ++ ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); ++ if (ret) { ++ dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); ++ return NULL; ++ } ++ ++ adev = acpi_resource_consumer(&cfgres); ++ if (adev) ++ dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres, ++ dev_name(&adev->dev)); ++ else ++ dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n", ++ &cfgres); ++ ++ cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); ++ if (IS_ERR(cfg)) { ++ dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, ++ PTR_ERR(cfg)); ++ return NULL; ++ } ++ ++ return cfg; ++} ++ ++/* release_info: free resources allocated by init_info */ ++static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) ++{ ++ struct acpi_pci_generic_root_info *ri; ++ ++ ri = container_of(ci, struct acpi_pci_generic_root_info, common); ++ pci_ecam_free(ri->cfg); ++ kfree(ci->ops); ++ kfree(ri); ++} ++ ++/* Interface called from ACPI code to setup PCI host controller */ ++struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) ++{ ++ struct acpi_pci_generic_root_info *ri; ++ struct pci_bus *bus, *child; ++ struct acpi_pci_root_ops *root_ops; ++ struct pci_host_bridge *host; ++ ++ ri = kzalloc(sizeof(*ri), GFP_KERNEL); ++ if (!ri) ++ return NULL; ++ ++ root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); ++ if (!root_ops) { ++ kfree(ri); ++ return NULL; ++ } ++ ++ ri->cfg = pci_acpi_setup_ecam_mapping(root); ++ if (!ri->cfg) { ++ kfree(ri); ++ kfree(root_ops); ++ return NULL; ++ } ++ ++ root_ops->release_info = pci_acpi_generic_release_info; ++ root_ops->prepare_resources = pci_acpi_root_prepare_resources; ++ root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; ++ bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); ++ if (!bus) ++ return NULL; ++ ++ /* If we must preserve the resource configuration, claim now */ ++ host = pci_find_host_bridge(bus); ++ if (host->preserve_config) ++ pci_bus_claim_resources(bus); ++ ++ /* ++ * Assign whatever was left unassigned. If we didn't claim above, ++ * this will reassign everything. ++ */ ++ pci_assign_unassigned_root_bus_resources(bus); ++ ++ list_for_each_entry(child, &bus->children, node) ++ pcie_bus_configure_settings(child); ++ ++ return bus; ++} ++ ++void pcibios_add_bus(struct pci_bus *bus) ++{ ++ acpi_pci_add_bus(bus); ++} ++ ++void pcibios_remove_bus(struct pci_bus *bus) ++{ ++ acpi_pci_remove_bus(bus); ++} ++ ++#endif diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c index 46fad0d813b2..560b3a236d84 100644 --- a/drivers/pci/pcie/portdrv.c @@ -451688,6 +459166,31 @@ index 46fad0d813b2..560b3a236d84 100644 /* * If the user specified "pcie_ports=native", use the PCIe services regardless +diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig +index 7b7b15f9bb6f..9aaea6b3b8ac 100644 +--- a/drivers/perf/Kconfig ++++ b/drivers/perf/Kconfig +@@ -86,6 +86,20 @@ config RISCV_PMU_SBI + full perf feature support i.e. counter overflow, privilege mode + filtering, counter configuration. + ++config ANDES_CUSTOM_PMU ++ bool "Andes custom PMU support" ++ depends on ARCH_RENESAS && RISCV_ALTERNATIVE && RISCV_PMU_SBI ++ default y ++ help ++ The Andes cores implement the PMU overflow extension very ++ similar to the standard Sscofpmf and Smcntrpmf extension. ++ ++ This will patch the overflow and pending CSRs and handle the ++ non-standard behaviour via the regular SBI PMU driver and ++ interface. ++ ++ If you don't know what to do here, say "Y". ++ + config ARM_PMU_ACPI + depends on ARM_PMU && ACPI + def_bool y diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 31e491e7f206..2946422539fb 100644 --- a/drivers/perf/arm_smmuv3_pmu.c @@ -451710,6 +459213,108 @@ index 31e491e7f206..2946422539fb 100644 if (ret) { dev_warn(dev, "failed to allocate MSIs\n"); return; +diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c +index 901da688ea3f..685938868681 100644 +--- a/drivers/perf/riscv_pmu_sbi.c ++++ b/drivers/perf/riscv_pmu_sbi.c +@@ -19,10 +19,36 @@ + #include + #include + #include ++#include + + #include + #include +-#include ++#include ++#include ++#include ++ ++#define ALT_SBI_PMU_OVERFLOW(__ovl) \ ++asm volatile(ALTERNATIVE_2( \ ++ "csrr %0, " __stringify(CSR_SSCOUNTOVF), \ ++ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \ ++ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \ ++ CONFIG_ERRATA_THEAD_PMU, \ ++ "csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF), \ ++ ANDES_VENDOR_ID, \ ++ RISCV_ISA_VENDOR_EXT_XANDESPMU + RISCV_VENDOR_EXT_ALTERNATIVES_BASE, \ ++ CONFIG_ANDES_CUSTOM_PMU) \ ++ : "=r" (__ovl) : \ ++ : "memory") ++ ++#define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask) \ ++asm volatile(ALTERNATIVE( \ ++ "csrc " __stringify(CSR_IP) ", %0\n\t", \ ++ "csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t", \ ++ ANDES_VENDOR_ID, \ ++ RISCV_ISA_VENDOR_EXT_XANDESPMU + RISCV_VENDOR_EXT_ALTERNATIVES_BASE, \ ++ CONFIG_ANDES_CUSTOM_PMU) \ ++ : : "r"(__irq_mask) \ ++ : "memory") + + #define SYSCTL_NO_USER_ACCESS 0 + #define SYSCTL_USER_ACCESS 1 +@@ -61,6 +87,7 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS; + static union sbi_pmu_ctr_info *pmu_ctr_list; + static bool riscv_pmu_use_irq; + static unsigned int riscv_pmu_irq_num; ++static unsigned int riscv_pmu_irq_mask; + static unsigned int riscv_pmu_irq; + + /* Cache the available counters in a bitmask */ +@@ -694,7 +721,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) + + event = cpu_hw_evt->events[fidx]; + if (!event) { +- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); ++ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); + return IRQ_NONE; + } + +@@ -708,7 +735,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) + * Overflow interrupt pending bit should only be cleared after stopping + * all the counters to avoid any race condition. + */ +- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); ++ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); + + /* No overflow bit is set */ + if (!overflow) +@@ -780,8 +807,7 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) + + if (riscv_pmu_use_irq) { + cpu_hw_evt->irq = riscv_pmu_irq; +- csr_clear(CSR_IP, BIT(riscv_pmu_irq_num)); +- csr_set(CSR_IE, BIT(riscv_pmu_irq_num)); ++ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); + enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE); + } + +@@ -792,7 +818,6 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) + { + if (riscv_pmu_use_irq) { + disable_percpu_irq(riscv_pmu_irq); +- csr_clear(CSR_IE, BIT(riscv_pmu_irq_num)); + } + + /* Disable all counters access for user mode now */ +@@ -816,8 +841,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde + riscv_cached_mimpid(0) == 0) { + riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU; + riscv_pmu_use_irq = true; ++ } else if (riscv_has_vendor_extension_unlikely(ANDES_VENDOR_ID, ++ RISCV_ISA_VENDOR_EXT_XANDESPMU) && ++ IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) { ++ riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI; ++ riscv_pmu_use_irq = true; + } + ++ riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG); ++ + if (!riscv_pmu_use_irq) + return -EOPNOTSUPP; + diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index e4502958fd62..50f729360df1 100644 --- a/drivers/phy/Kconfig @@ -456796,11 +464401,48 @@ index 000000000000..f3a30f0275b1 +ssize_t pinmux_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size); +#endif +diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c +index 897cdd9c3aae..b8eaf6b9f8e1 100644 +--- a/drivers/platform/surface/surface_acpi_notify.c ++++ b/drivers/platform/surface/surface_acpi_notify.c +@@ -740,24 +740,26 @@ static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle) + { + struct acpi_handle_list dep_devices; + acpi_handle supplier = ACPI_HANDLE(&pdev->dev); +- acpi_status status; ++ bool ret = false; + int i; + + if (!acpi_has_method(handle, "_DEP")) + return false; + +- status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) { + san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n"); + return false; + } + + for (i = 0; i < dep_devices.count; i++) { +- if (dep_devices.handles[i] == supplier) +- return true; ++ if (dep_devices.handles[i] == supplier) { ++ ret = true; ++ break; ++ } + } + +- return false; ++ acpi_handle_list_free(&dep_devices); ++ return ret; + } + + static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl, diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig -index 8ebcddf91f7b..1aa6ad3947f8 100644 +index da57f4a2bde0..689b7dc4c768 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig -@@ -637,6 +637,17 @@ config PWM_TEGRA +@@ -649,6 +649,17 @@ config PWM_TEGRA To compile this driver as a module, choose M here: the module will be called pwm-tegra. @@ -456819,10 +464461,10 @@ index 8ebcddf91f7b..1aa6ad3947f8 100644 tristate "ECAP PWM support" depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile -index c822389c2a24..d18c8c54b5b3 100644 +index 5d5b64c25b7f..b825630e7de6 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile -@@ -50,6 +50,7 @@ obj-$(CONFIG_PWM_RZ_MTU3) += pwm-rz-mtu3.o +@@ -51,6 +51,7 @@ obj-$(CONFIG_PWM_RZ_MTU3) += pwm-rz-mtu3.o obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o obj-$(CONFIG_PWM_SL28CPLD) += pwm-sl28cpld.o @@ -456830,7 +464472,7 @@ index c822389c2a24..d18c8c54b5b3 100644 obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o obj-$(CONFIG_PWM_SPRD) += pwm-sprd.o obj-$(CONFIG_PWM_STI) += pwm-sti.o -@@ -59,6 +60,7 @@ obj-$(CONFIG_PWM_STMPE) += pwm-stmpe.o +@@ -60,6 +61,7 @@ obj-$(CONFIG_PWM_STMPE) += pwm-stmpe.o obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o obj-$(CONFIG_PWM_SUNPLUS) += pwm-sunplus.o obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o @@ -571759,7 +579401,7 @@ index fe1493d4bbe5..9422571d469d 100644 +obj-$(CONFIG_USB_DWC3_RTK) += dwc3-rtk.o +obj-$(CONFIG_USB_DWC3_XUANTIE) += dwc3-xuantie.o diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c -index 318ae24a41f4..ecf5c9afdc8b 100644 +index 30404461ef7d..7f1a85f471e5 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -42,6 +42,14 @@ @@ -571777,7 +579419,7 @@ index 318ae24a41f4..ecf5c9afdc8b 100644 /** * dwc3_get_dr_mode - Validates and sets dr_mode * @dwc: pointer to our context structure -@@ -1518,6 +1526,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) +@@ -1533,6 +1541,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) */ hird_threshold = 12; @@ -571787,7 +579429,7 @@ index 318ae24a41f4..ecf5c9afdc8b 100644 /* * default to a TXFIFO size large enough to fit 6 max packets. This * allows for systems with larger bus latencies to have some headroom -@@ -1525,11 +1536,16 @@ static void dwc3_get_properties(struct dwc3 *dwc) +@@ -1540,11 +1551,16 @@ static void dwc3_get_properties(struct dwc3 *dwc) */ tx_fifo_resize_max_num = 6; @@ -572581,6 +580223,113 @@ index 000000000000..80cb0b1e3d64 +MODULE_AUTHOR("Wei.Liu "); +MODULE_DESCRIPTION("PMIC Watchdog Driver for TH1520"); +MODULE_LICENSE("GPL"); +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h +index 7c49c8a35005..396909300897 100644 +--- a/include/acpi/acpi_bus.h ++++ b/include/acpi/acpi_bus.h +@@ -13,11 +13,9 @@ + #include + #include + +-/* TBD: Make dynamic */ +-#define ACPI_MAX_HANDLES 10 + struct acpi_handle_list { + u32 count; +- acpi_handle handles[ACPI_MAX_HANDLES]; ++ acpi_handle *handles; + }; + + /* acpi_utils.h */ +@@ -28,11 +26,14 @@ acpi_status + acpi_evaluate_integer(acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, unsigned long long *data); +-acpi_status +-acpi_evaluate_reference(acpi_handle handle, +- acpi_string pathname, +- struct acpi_object_list *arguments, +- struct acpi_handle_list *list); ++bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, ++ struct acpi_object_list *arguments, ++ struct acpi_handle_list *list); ++bool acpi_handle_list_equal(struct acpi_handle_list *list1, ++ struct acpi_handle_list *list2); ++void acpi_handle_list_replace(struct acpi_handle_list *dst, ++ struct acpi_handle_list *src); ++void acpi_handle_list_free(struct acpi_handle_list *list); + acpi_status + acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code, + struct acpi_buffer *status_buf); +@@ -369,6 +370,24 @@ struct acpi_device_data { + + struct acpi_gpio_mapping; + ++struct acpi_device_software_node_port { ++ unsigned int port_nr; ++}; ++ ++/** ++ * struct acpi_device_software_nodes - Software nodes for an ACPI device ++ * @nodes: Software nodes for root as well as ports and endpoints. ++ * @nodeprts: Array of software node pointers, for (un)registering them. ++ * @ports: Information related to each port and endpoint within a port. ++ * @num_ports: The number of ports. ++ */ ++struct acpi_device_software_nodes { ++ struct software_node *nodes; ++ const struct software_node **nodeptrs; ++ struct acpi_device_software_node_port *ports; ++ unsigned int num_ports; ++}; ++ + /* Device */ + struct acpi_device { + u32 pld_crc; +@@ -830,6 +849,8 @@ static inline void acpi_put_acpi_dev(struct acpi_device *adev) + + int acpi_wait_for_acpi_ipmi(void); + ++int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices); ++u32 arch_acpi_add_auto_dep(acpi_handle handle); + #else /* CONFIG_ACPI */ + + static inline int register_acpi_bus_type(void *bus) { return 0; } +diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h +index c080d579a546..e32149d605dc 100644 +--- a/include/acpi/actbl3.h ++++ b/include/acpi/actbl3.h +@@ -192,7 +192,8 @@ enum acpi_srat_type { + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */ + ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */ + ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY = 6, /* ACPI 6.4 */ +- ACPI_SRAT_TYPE_RESERVED = 7 /* 7 and greater are reserved */ ++ ACPI_SRAT_TYPE_RINTC_AFFINITY = 7, /* ACPI 6.6 */ ++ ACPI_SRAT_TYPE_RESERVED = 8 /* 8 and greater are reserved */ + }; + + /* +@@ -296,6 +297,21 @@ struct acpi_srat_generic_affinity { + #define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */ + #define ACPI_SRAT_ARCHITECTURAL_TRANSACTIONS (1<<1) /* ACPI 6.4 */ + ++/* 7: RINTC Affinity Structure(ACPI 6.6) */ ++ ++struct acpi_srat_rintc_affinity { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u32 proximity_domain; ++ u32 acpi_processor_uid; ++ u32 flags; ++ u32 clock_domain; ++}; ++ ++/* Flags for ACPI_SRAT_RINTC_AFFINITY */ ++ ++#define ACPI_SRAT_RINTC_ENABLED (1) /* 00: Use affinity structure */ ++ + /******************************************************************************* + * + * STAO - Status Override Table (_STA override) - ACPI 6.0 diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index c75d4a753849..879e5f8aa5e9 100644 --- a/include/asm-generic/pgalloc.h @@ -574403,6 +582152,53 @@ index 000000000000..c0370797443f +#define IOPMP_AUDIO1 29 + +#endif /* __DT_XUANTIE_TH1520_IOPMP_H__ */ +diff --git a/include/linux/acpi.h b/include/linux/acpi.h +index 83bb76c7d5a1..f3aa975327b6 100644 +--- a/include/linux/acpi.h ++++ b/include/linux/acpi.h +@@ -98,6 +98,7 @@ enum acpi_irq_model_id { + #ifdef CONFIG_SW64 + ACPI_IRQ_MODEL_SWPIC, + #endif ++ ACPI_IRQ_MODEL_RINTC, + ACPI_IRQ_MODEL_COUNT + }; + +@@ -291,6 +292,12 @@ acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } + + int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); + ++#ifdef CONFIG_RISCV ++void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa); ++#else ++static inline void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) { } ++#endif ++ + #ifndef PHYS_CPUID_INVALID + typedef u32 phys_cpuid_t; + #define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) +@@ -1348,6 +1355,8 @@ struct acpi_probe_entry { + kernel_ulong_t driver_data; + }; + ++void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr); ++ + #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ + valid, data, fn) \ + static const struct acpi_probe_entry __acpi_probe_##name \ +@@ -1551,6 +1560,12 @@ void acpi_arm_init(void); + static inline void acpi_arm_init(void) { } + #endif + ++#ifdef CONFIG_RISCV ++void acpi_riscv_init(void); ++#else ++static inline void acpi_riscv_init(void) { } ++#endif ++ + #ifdef CONFIG_ACPI_PCC + void acpi_init_pcc(void); + #else diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 8f3b474f3a70..61c981c1ec9d 100644 --- a/include/linux/cpuhotplug.h @@ -574856,10 +582652,10 @@ index 000000000000..ec8f7df50583 +#endif diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h new file mode 100644 -index 000000000000..faf0b800b1b0 +index 000000000000..7494952c5518 --- /dev/null +++ b/include/linux/irqchip/riscv-imsic.h -@@ -0,0 +1,87 @@ +@@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -574870,6 +582666,8 @@ index 000000000000..faf0b800b1b0 + +#include +#include ++#include ++#include +#include + +#define IMSIC_MMIO_PAGE_SHIFT 12 @@ -574946,6 +582744,13 @@ index 000000000000..faf0b800b1b0 + +#endif + ++#ifdef CONFIG_ACPI ++int imsic_platform_acpi_probe(struct fwnode_handle *fwnode); ++struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev); ++#else ++static inline struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) { return NULL; } ++#endif ++ +#endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 8594cd9b642e..2992c1851b63 100644 @@ -575002,7 +582807,7 @@ index 27f42f713c89..7617930d3157 100644 #define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \ (dev_cap).num_ports * MIN_MSIX_P_PORT) diff --git a/include/linux/mm.h b/include/linux/mm.h -index c127b74f2c90..53c09345618a 100644 +index b974880cf283..fd0b3a434aaf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3182,6 +3182,22 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) @@ -575551,10 +583356,10 @@ index 000000000000..cfb1f017480c + +#endif /* __VS_DRM_H__ */ diff --git a/init/Kconfig b/init/Kconfig -index 4c566c4bbfa4..0e63ee288041 100644 +index 486f3a333f95..dafecdd40443 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -2158,6 +2158,9 @@ source "kernel/Kconfig.locks" +@@ -2203,6 +2203,9 @@ source "kernel/Kconfig.locks" config ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE bool @@ -576015,10 +583820,10 @@ index ef9f9a4e928d..824220b20ad7 100644 /* * This thread may hit another WARN() in the panic path. diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 7dc4ceebd5ec..f8f3117e0c42 100644 +index fa8c8e5853f1..aee77c39db91 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -6665,7 +6665,9 @@ static void __sched notrace __schedule(unsigned int sched_mode) +@@ -6678,7 +6678,9 @@ static void __sched notrace __schedule(unsigned int sched_mode) * if (signal_pending_state()) if (p->state & @state) * * Also, the membarrier system call requires a full memory barrier @@ -576029,7 +583834,7 @@ index 7dc4ceebd5ec..f8f3117e0c42 100644 */ rq_lock(rq, &rf); smp_mb__after_spinlock(); -@@ -6743,6 +6745,13 @@ static void __sched notrace __schedule(unsigned int sched_mode) +@@ -6756,6 +6758,13 @@ static void __sched notrace __schedule(unsigned int sched_mode) * architectures where spin_unlock is a full barrier, * - switch_to() for arm64 (weakly-ordered, spin_unlock * is a RELEASE barrier), @@ -576044,10 +583849,10 @@ index 7dc4ceebd5ec..f8f3117e0c42 100644 ++*switch_count; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index c530d501bb48..e7bf6ef150be 100644 +index 21bd2ca4172d..8d03ab533865 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -10614,6 +10614,9 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) +@@ -10710,6 +10710,9 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) if (kthread_is_per_cpu(p)) return 0; @@ -576785,7 +584590,7 @@ index a11cd7d6295f..03ecfa43bc3a 100644 return -EPIPE; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c -index 986c38a50166..d6683050c62b 100644 +index cb8f46028159..da563420ba29 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -298,8 +298,7 @@ enum { @@ -587741,6 +595546,421 @@ index 000000000000..d89d90500cc7 + } + +] +diff --git a/tools/testing/selftests/riscv/hwprobe/Makefile b/tools/testing/selftests/riscv/hwprobe/Makefile +index ebdbb3c22e54..f224b84591fb 100644 +--- a/tools/testing/selftests/riscv/hwprobe/Makefile ++++ b/tools/testing/selftests/riscv/hwprobe/Makefile +@@ -2,9 +2,14 @@ + # Copyright (C) 2021 ARM Limited + # Originally tools/testing/arm64/abi/Makefile + +-TEST_GEN_PROGS := hwprobe ++CFLAGS += -I$(top_srcdir)/tools/include ++ ++TEST_GEN_PROGS := hwprobe cbo + + include ../../lib.mk + + $(OUTPUT)/hwprobe: hwprobe.c sys_hwprobe.S +- $(CC) -o$@ $(CFLAGS) $(LDFLAGS) $^ ++ $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^ ++ ++$(OUTPUT)/cbo: cbo.c sys_hwprobe.S ++ $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^ +diff --git a/tools/testing/selftests/riscv/hwprobe/cbo.c b/tools/testing/selftests/riscv/hwprobe/cbo.c +new file mode 100644 +index 000000000000..50a2cc8aef38 +--- /dev/null ++++ b/tools/testing/selftests/riscv/hwprobe/cbo.c +@@ -0,0 +1,228 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2023 Ventana Micro Systems Inc. ++ * ++ * Run with 'taskset -c cbo' to only execute hwprobe on a ++ * subset of cpus, as well as only executing the tests on those cpus. ++ */ ++#define _GNU_SOURCE ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "hwprobe.h" ++#include "../../kselftest.h" ++ ++#define MK_CBO(fn) cpu_to_le32((fn) << 20 | 10 << 15 | 2 << 12 | 0 << 7 | 15) ++ ++static char mem[4096] __aligned(4096) = { [0 ... 4095] = 0xa5 }; ++ ++static bool illegal_insn; ++ ++static void sigill_handler(int sig, siginfo_t *info, void *context) ++{ ++ unsigned long *regs = (unsigned long *)&((ucontext_t *)context)->uc_mcontext; ++ uint32_t insn = *(uint32_t *)regs[0]; ++ ++ assert(insn == MK_CBO(regs[11])); ++ ++ illegal_insn = true; ++ regs[0] += 4; ++} ++ ++static void cbo_insn(char *base, int fn) ++{ ++ uint32_t insn = MK_CBO(fn); ++ ++ asm volatile( ++ "mv a0, %0\n" ++ "li a1, %1\n" ++ ".4byte %2\n" ++ : : "r" (base), "i" (fn), "i" (insn) : "a0", "a1", "memory"); ++} ++ ++static void cbo_inval(char *base) { cbo_insn(base, 0); } ++static void cbo_clean(char *base) { cbo_insn(base, 1); } ++static void cbo_flush(char *base) { cbo_insn(base, 2); } ++static void cbo_zero(char *base) { cbo_insn(base, 4); } ++ ++static void test_no_zicbom(void *arg) ++{ ++ ksft_print_msg("Testing Zicbom instructions remain privileged\n"); ++ ++ illegal_insn = false; ++ cbo_clean(&mem[0]); ++ ksft_test_result(illegal_insn, "No cbo.clean\n"); ++ ++ illegal_insn = false; ++ cbo_flush(&mem[0]); ++ ksft_test_result(illegal_insn, "No cbo.flush\n"); ++ ++ illegal_insn = false; ++ cbo_inval(&mem[0]); ++ ksft_test_result(illegal_insn, "No cbo.inval\n"); ++} ++ ++static void test_no_zicboz(void *arg) ++{ ++ ksft_print_msg("No Zicboz, testing cbo.zero remains privileged\n"); ++ ++ illegal_insn = false; ++ cbo_zero(&mem[0]); ++ ksft_test_result(illegal_insn, "No cbo.zero\n"); ++} ++ ++static bool is_power_of_2(__u64 n) ++{ ++ return n != 0 && (n & (n - 1)) == 0; ++} ++ ++static void test_zicboz(void *arg) ++{ ++ struct riscv_hwprobe pair = { ++ .key = RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE, ++ }; ++ cpu_set_t *cpus = (cpu_set_t *)arg; ++ __u64 block_size; ++ int i, j; ++ long rc; ++ ++ rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)cpus, 0); ++ block_size = pair.value; ++ ksft_test_result(rc == 0 && pair.key == RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE && ++ is_power_of_2(block_size), "Zicboz block size\n"); ++ ksft_print_msg("Zicboz block size: %ld\n", block_size); ++ ++ illegal_insn = false; ++ cbo_zero(&mem[block_size]); ++ ksft_test_result(!illegal_insn, "cbo.zero\n"); ++ ++ if (illegal_insn || !is_power_of_2(block_size)) { ++ ksft_test_result_skip("cbo.zero check\n"); ++ return; ++ } ++ ++ assert(block_size <= 1024); ++ ++ for (i = 0; i < 4096 / block_size; ++i) { ++ if (i % 2) ++ cbo_zero(&mem[i * block_size]); ++ } ++ ++ for (i = 0; i < 4096 / block_size; ++i) { ++ char expected = i % 2 ? 0x0 : 0xa5; ++ ++ for (j = 0; j < block_size; ++j) { ++ if (mem[i * block_size + j] != expected) { ++ ksft_test_result_fail("cbo.zero check\n"); ++ ksft_print_msg("cbo.zero check: mem[%d] != 0x%x\n", ++ i * block_size + j, expected); ++ return; ++ } ++ } ++ } ++ ++ ksft_test_result_pass("cbo.zero check\n"); ++} ++ ++static void check_no_zicboz_cpus(cpu_set_t *cpus) ++{ ++ struct riscv_hwprobe pair = { ++ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, ++ }; ++ cpu_set_t one_cpu; ++ int i = 0, c = 0; ++ long rc; ++ ++ while (i++ < CPU_COUNT(cpus)) { ++ while (!CPU_ISSET(c, cpus)) ++ ++c; ++ ++ CPU_ZERO(&one_cpu); ++ CPU_SET(c, &one_cpu); ++ ++ rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&one_cpu, 0); ++ assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0); ++ ++ if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ) ++ ksft_exit_fail_msg("Zicboz is only present on a subset of harts.\n" ++ "Use taskset to select a set of harts where Zicboz\n" ++ "presence (present or not) is consistent for each hart\n"); ++ ++c; ++ } ++} ++ ++enum { ++ TEST_ZICBOZ, ++ TEST_NO_ZICBOZ, ++ TEST_NO_ZICBOM, ++}; ++ ++static struct test_info { ++ bool enabled; ++ unsigned int nr_tests; ++ void (*test_fn)(void *arg); ++} tests[] = { ++ [TEST_ZICBOZ] = { .nr_tests = 3, test_zicboz }, ++ [TEST_NO_ZICBOZ] = { .nr_tests = 1, test_no_zicboz }, ++ [TEST_NO_ZICBOM] = { .nr_tests = 3, test_no_zicbom }, ++}; ++ ++int main(int argc, char **argv) ++{ ++ struct sigaction act = { ++ .sa_sigaction = &sigill_handler, ++ .sa_flags = SA_SIGINFO, ++ }; ++ struct riscv_hwprobe pair; ++ unsigned int plan = 0; ++ cpu_set_t cpus; ++ long rc; ++ int i; ++ ++ if (argc > 1 && !strcmp(argv[1], "--sigill")) { ++ rc = sigaction(SIGILL, &act, NULL); ++ assert(rc == 0); ++ tests[TEST_NO_ZICBOZ].enabled = true; ++ tests[TEST_NO_ZICBOM].enabled = true; ++ } ++ ++ rc = sched_getaffinity(0, sizeof(cpu_set_t), &cpus); ++ assert(rc == 0); ++ ++ ksft_print_header(); ++ ++ pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0; ++ rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&cpus, 0); ++ if (rc < 0) ++ ksft_exit_fail_msg("hwprobe() failed with %d\n", rc); ++ assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0); ++ ++ if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ) { ++ tests[TEST_ZICBOZ].enabled = true; ++ tests[TEST_NO_ZICBOZ].enabled = false; ++ } else { ++ check_no_zicboz_cpus(&cpus); ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(tests); ++i) ++ plan += tests[i].enabled ? tests[i].nr_tests : 0; ++ ++ if (plan == 0) ++ ksft_print_msg("No tests enabled.\n"); ++ else ++ ksft_set_plan(plan); ++ ++ for (i = 0; i < ARRAY_SIZE(tests); ++i) { ++ if (tests[i].enabled) ++ tests[i].test_fn(&cpus); ++ } ++ ++ ksft_finished(); ++} +diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.c b/tools/testing/selftests/riscv/hwprobe/hwprobe.c +index 09f290a67420..d53e0889b59e 100644 +--- a/tools/testing/selftests/riscv/hwprobe/hwprobe.c ++++ b/tools/testing/selftests/riscv/hwprobe/hwprobe.c +@@ -1,14 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0-only +-#include +-#include +- +-/* +- * Rather than relying on having a new enough libc to define this, just do it +- * ourselves. This way we don't need to be coupled to a new-enough libc to +- * contain the call. +- */ +-long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, unsigned int flags); ++#include "hwprobe.h" ++#include "../../kselftest.h" + + int main(int argc, char **argv) + { +@@ -16,6 +8,9 @@ int main(int argc, char **argv) + unsigned long cpus; + long out; + ++ ksft_print_header(); ++ ksft_set_plan(5); ++ + /* Fake the CPU_SET ops. */ + cpus = -1; + +@@ -25,13 +20,16 @@ int main(int argc, char **argv) + */ + for (long i = 0; i < 8; i++) + pairs[i].key = i; ++ + out = riscv_hwprobe(pairs, 8, 1, &cpus, 0); + if (out != 0) +- return -1; ++ ksft_exit_fail_msg("hwprobe() failed with %ld\n", out); ++ + for (long i = 0; i < 4; ++i) { + /* Fail if the kernel claims not to recognize a base key. */ + if ((i < 4) && (pairs[i].key != i)) +- return -2; ++ ksft_exit_fail_msg("Failed to recognize base key: key != i, " ++ "key=%ld, i=%ld\n", pairs[i].key, i); + + if (pairs[i].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR) + continue; +@@ -39,52 +37,30 @@ int main(int argc, char **argv) + if (pairs[i].value & RISCV_HWPROBE_BASE_BEHAVIOR_IMA) + continue; + +- return -3; ++ ksft_exit_fail_msg("Unexpected pair: (%ld, %ld)\n", pairs[i].key, pairs[i].value); + } + +- /* +- * This should also work with a NULL CPU set, but should not work +- * with an improperly supplied CPU set. +- */ + out = riscv_hwprobe(pairs, 8, 0, 0, 0); +- if (out != 0) +- return -4; ++ ksft_test_result(out == 0, "NULL CPU set\n"); + + out = riscv_hwprobe(pairs, 8, 0, &cpus, 0); +- if (out == 0) +- return -5; ++ ksft_test_result(out != 0, "Bad CPU set\n"); + + out = riscv_hwprobe(pairs, 8, 1, 0, 0); +- if (out == 0) +- return -6; ++ ksft_test_result(out != 0, "NULL CPU set with non-zero size\n"); + +- /* +- * Check that keys work by providing one that we know exists, and +- * checking to make sure the resultig pair is what we asked for. +- */ + pairs[0].key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR; + out = riscv_hwprobe(pairs, 1, 1, &cpus, 0); +- if (out != 0) +- return -7; +- if (pairs[0].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR) +- return -8; ++ ksft_test_result(out == 0 && pairs[0].key == RISCV_HWPROBE_KEY_BASE_BEHAVIOR, ++ "Existing key is maintained\n"); + +- /* +- * Check that an unknown key gets overwritten with -1, +- * but doesn't block elements after it. +- */ + pairs[0].key = 0x5555; + pairs[1].key = 1; + pairs[1].value = 0xAAAA; + out = riscv_hwprobe(pairs, 2, 0, 0, 0); +- if (out != 0) +- return -9; +- +- if (pairs[0].key != -1) +- return -10; +- +- if ((pairs[1].key != 1) || (pairs[1].value == 0xAAAA)) +- return -11; ++ ksft_test_result(out == 0 && pairs[0].key == -1 && ++ pairs[1].key == 1 && pairs[1].value != 0xAAAA, ++ "Unknown key overwritten with -1 and doesn't block other elements\n"); + +- return 0; ++ ksft_finished(); + } +diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.h b/tools/testing/selftests/riscv/hwprobe/hwprobe.h +new file mode 100644 +index 000000000000..e3fccb390c4d +--- /dev/null ++++ b/tools/testing/selftests/riscv/hwprobe/hwprobe.h +@@ -0,0 +1,15 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++#ifndef SELFTEST_RISCV_HWPROBE_H ++#define SELFTEST_RISCV_HWPROBE_H ++#include ++#include ++ ++/* ++ * Rather than relying on having a new enough libc to define this, just do it ++ * ourselves. This way we don't need to be coupled to a new-enough libc to ++ * contain the call. ++ */ ++long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, unsigned int flags); ++ ++#endif +diff --git a/tools/testing/selftests/riscv/vector/vstate_prctl.c b/tools/testing/selftests/riscv/vector/vstate_prctl.c +index 8ad94e08ff4d..27668fb3b6d0 100644 +--- a/tools/testing/selftests/riscv/vector/vstate_prctl.c ++++ b/tools/testing/selftests/riscv/vector/vstate_prctl.c +@@ -1,20 +1,12 @@ + // SPDX-License-Identifier: GPL-2.0-only + #include + #include +-#include + #include + #include + ++#include "../hwprobe/hwprobe.h" + #include "../../kselftest.h" + +-/* +- * Rather than relying on having a new enough libc to define this, just do it +- * ourselves. This way we don't need to be coupled to a new-enough libc to +- * contain the call. +- */ +-long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, unsigned int flags); +- + #define NEXT_PROGRAM "./vstate_exec_nolibc" + static int launch_test(int test_inherit) + { -- 2.34.1 diff --git a/kernel.spec b/kernel.spec index 8d3ed666..7beaa03a 100644 --- a/kernel.spec +++ b/kernel.spec @@ -42,7 +42,7 @@ rm -f test_openEuler_sign.ko test_openEuler_sign.ko.sig %global upstream_sublevel 0 %global devel_release 95 %global maintenance_release .0.0 -%global pkg_release .99 +%global pkg_release .100 %global openeuler_lts 1 %global openeuler_major 2403 @@ -1138,6 +1138,12 @@ fi %endif %changelog +* Fri Jun 06 2025 Mingzheng Xing - 6.6.0-95.0.0.100 +- RISC-V kernel upgrade to 6.6.0-95.0.0 +- Hwprobe related backport +- Add ACPI NUMA support for RISC-V +- Backport RISC-V external interrupt controller support for ACPI + * Wed Jun 04 2025 Li Nan - 6.6.0-95.0.0.99 - !16482 [openEuler-24.03-LTS][linux-6.6.y sync] Backport 6.6.69-6.6.70 LTS Conflicts Patches - !16211 [OLK-6.6] Fix CVE-2025-22028 -- Gitee