summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-04-26 09:19:39 -0400
committerMike Pagano <mpagano@gentoo.org>2023-04-26 09:19:39 -0400
commit9d02faaed4123c2992080e3366d6b7caa044c9ce (patch)
tree3148782909b7ba73742902de361d870f2feb1fa3
parentLinux patch 6.1.25 (diff)
downloadlinux-patches-9d02faaed4123c2992080e3366d6b7caa044c9ce.tar.gz
linux-patches-9d02faaed4123c2992080e3366d6b7caa044c9ce.tar.bz2
linux-patches-9d02faaed4123c2992080e3366d6b7caa044c9ce.zip
Linux patch 6.1.266.1-30
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1025_linux-6.1.26.patch3310
2 files changed, 3314 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 7c4d9865..2dab4c18 100644
--- a/0000_README
+++ b/0000_README
@@ -143,6 +143,10 @@ Patch: 1024_linux-6.1.25.patch
From: https://www.kernel.org
Desc: Linux 6.1.25
+Patch: 1025_linux-6.1.26.patch
+From: https://www.kernel.org
+Desc: Linux 6.1.26
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1025_linux-6.1.26.patch b/1025_linux-6.1.26.patch
new file mode 100644
index 00000000..f8f558ac
--- /dev/null
+++ b/1025_linux-6.1.26.patch
@@ -0,0 +1,3310 @@
+diff --git a/Makefile b/Makefile
+index 1a4c4af370db8..d2eff3747f2f7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 25
++SUBLEVEL = 26
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index 2ca76b69add78..511ca864c1b2d 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -942,7 +942,7 @@
+ status = "disabled";
+ };
+
+- spdif: sound@ff88b0000 {
++ spdif: sound@ff8b0000 {
+ compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif";
+ reg = <0x0 0xff8b0000 0x0 0x10000>;
+ #sound-dai-cells = <0>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+index 131a8a5a9f5a0..88b848c65b0d2 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+@@ -1571,10 +1571,9 @@
+
+ dmc: bus@38000 {
+ compatible = "simple-bus";
+- reg = <0x0 0x38000 0x0 0x400>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+- ranges = <0x0 0x0 0x0 0x38000 0x0 0x400>;
++ ranges = <0x0 0x0 0x0 0x38000 0x0 0x2000>;
+
+ canvas: video-lut@48 {
+ compatible = "amlogic,canvas";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+index 7d6317d95b131..1dd0617477fdf 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+@@ -193,7 +193,7 @@
+ rohm,reset-snvs-powered;
+
+ #clock-cells = <0>;
+- clocks = <&osc_32k 0>;
++ clocks = <&osc_32k>;
+ clock-output-names = "clk-32k-out";
+
+ regulators {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index 59445f916d7fa..b4aef79650c69 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -95,7 +95,7 @@
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */
+- off-on-delay = <500000>;
++ off-on-delay-us = <500000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_eth>;
+ regulator-always-on;
+@@ -135,7 +135,7 @@
+ enable-active-high;
+ /* Verdin SD_1_PWR_EN (SODIMM 76) */
+ gpio = <&gpio3 5 GPIO_ACTIVE_HIGH>;
+- off-on-delay = <100000>;
++ off-on-delay-us = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
+ regulator-max-microvolt = <3300000>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
+index cefabe65b2520..c8b521d45fca1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
+@@ -12,7 +12,7 @@
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio_expander_21 4 GPIO_ACTIVE_HIGH>; /* ETH_PWR_EN */
+- off-on-delay = <500000>;
++ off-on-delay-us = <500000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "+V3.3_ETH";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+index 5dcd1de586b52..371144eb40188 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+@@ -86,7 +86,7 @@
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */
+- off-on-delay = <500000>;
++ off-on-delay-us = <500000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_eth>;
+ regulator-always-on;
+@@ -127,7 +127,7 @@
+ enable-active-high;
+ /* Verdin SD_1_PWR_EN (SODIMM 76) */
+ gpio = <&gpio4 22 GPIO_ACTIVE_HIGH>;
+- off-on-delay = <100000>;
++ off-on-delay-us = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
+ regulator-max-microvolt = <3300000>;
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
+index 7143c936de61e..bb0a838891f64 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
++++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
+@@ -59,11 +59,11 @@
+ perst-gpios = <&tlmm 58 0x1>;
+ };
+
+-&pcie_phy0 {
++&pcie_qmp0 {
+ status = "okay";
+ };
+
+-&pcie_phy1 {
++&pcie_qmp1 {
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
+index db4b87944cdf2..a695686afadfc 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
+@@ -22,7 +22,7 @@
+ };
+
+ &blsp1_spi1 {
+- status = "ok";
++ status = "okay";
+
+ flash@0 {
+ #address-cells = <1>;
+@@ -34,33 +34,33 @@
+ };
+
+ &blsp1_uart5 {
+- status = "ok";
++ status = "okay";
+ };
+
+ &pcie0 {
+- status = "ok";
++ status = "okay";
+ perst-gpios = <&tlmm 58 0x1>;
+ };
+
+ &pcie1 {
+- status = "ok";
++ status = "okay";
+ perst-gpios = <&tlmm 61 0x1>;
+ };
+
+-&pcie_phy0 {
+- status = "ok";
++&pcie_qmp0 {
++ status = "okay";
+ };
+
+-&pcie_phy1 {
+- status = "ok";
++&pcie_qmp1 {
++ status = "okay";
+ };
+
+ &qpic_bam {
+- status = "ok";
++ status = "okay";
+ };
+
+ &qpic_nand {
+- status = "ok";
++ status = "okay";
+
+ nand@0 {
+ reg = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
+index 24836b6b9bbc9..be0df0856df9b 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
+@@ -15,8 +15,9 @@
+ #size-cells = <0>;
+
+ pmk8280_pon: pon@1300 {
+- compatible = "qcom,pm8998-pon";
+- reg = <0x1300>;
++ compatible = "qcom,pmk8350-pon";
++ reg = <0x1300>, <0x800>;
++ reg-names = "hlos", "pbs";
+
+ pmk8280_pon_pwrkey: pwrkey {
+ compatible = "qcom,pmk8350-pwrkey";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+index 5bcd4be329643..4d494b53a71ab 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+@@ -540,7 +540,7 @@
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>;
+- sd-uhs-sdr104;
++ sd-uhs-sdr50;
+ vmmc-supply = <&vcc3v3_sys>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 45e2136322ba2..e2b45c937c58a 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -449,9 +449,22 @@ struct kvm_vcpu_arch {
+ ({ \
+ __build_check_flag(v, flagset, f, m); \
+ \
+- v->arch.flagset & (m); \
++ READ_ONCE(v->arch.flagset) & (m); \
+ })
+
++/*
++ * Note that the set/clear accessors must be preempt-safe in order to
++ * avoid nesting them with load/put which also manipulate flags...
++ */
++#ifdef __KVM_NVHE_HYPERVISOR__
++/* the nVHE hypervisor is always non-preemptible */
++#define __vcpu_flags_preempt_disable()
++#define __vcpu_flags_preempt_enable()
++#else
++#define __vcpu_flags_preempt_disable() preempt_disable()
++#define __vcpu_flags_preempt_enable() preempt_enable()
++#endif
++
+ #define __vcpu_set_flag(v, flagset, f, m) \
+ do { \
+ typeof(v->arch.flagset) *fset; \
+@@ -459,9 +472,11 @@ struct kvm_vcpu_arch {
+ __build_check_flag(v, flagset, f, m); \
+ \
+ fset = &v->arch.flagset; \
++ __vcpu_flags_preempt_disable(); \
+ if (HWEIGHT(m) > 1) \
+ *fset &= ~(m); \
+ *fset |= (f); \
++ __vcpu_flags_preempt_enable(); \
+ } while (0)
+
+ #define __vcpu_clear_flag(v, flagset, f, m) \
+@@ -471,7 +486,9 @@ struct kvm_vcpu_arch {
+ __build_check_flag(v, flagset, f, m); \
+ \
+ fset = &v->arch.flagset; \
++ __vcpu_flags_preempt_disable(); \
+ *fset &= ~(m); \
++ __vcpu_flags_preempt_enable(); \
+ } while (0)
+
+ #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
+diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
+index c9f401fa01a93..950e35b993d2b 100644
+--- a/arch/arm64/kvm/hypercalls.c
++++ b/arch/arm64/kvm/hypercalls.c
+@@ -397,6 +397,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ u64 val;
+ int wa_level;
+
++ if (KVM_REG_SIZE(reg->id) != sizeof(val))
++ return -ENOENT;
+ if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
+index b07974218393d..f6177f1334776 100644
+--- a/arch/loongarch/include/asm/cpu-features.h
++++ b/arch/loongarch/include/asm/cpu-features.h
+@@ -42,6 +42,7 @@
+ #define cpu_has_fpu cpu_opt(LOONGARCH_CPU_FPU)
+ #define cpu_has_lsx cpu_opt(LOONGARCH_CPU_LSX)
+ #define cpu_has_lasx cpu_opt(LOONGARCH_CPU_LASX)
++#define cpu_has_crc32 cpu_opt(LOONGARCH_CPU_CRC32)
+ #define cpu_has_complex cpu_opt(LOONGARCH_CPU_COMPLEX)
+ #define cpu_has_crypto cpu_opt(LOONGARCH_CPU_CRYPTO)
+ #define cpu_has_lvz cpu_opt(LOONGARCH_CPU_LVZ)
+diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
+index 754f285067913..9275770552636 100644
+--- a/arch/loongarch/include/asm/cpu.h
++++ b/arch/loongarch/include/asm/cpu.h
+@@ -78,25 +78,26 @@ enum cpu_type_enum {
+ #define CPU_FEATURE_FPU 3 /* CPU has FPU */
+ #define CPU_FEATURE_LSX 4 /* CPU has LSX (128-bit SIMD) */
+ #define CPU_FEATURE_LASX 5 /* CPU has LASX (256-bit SIMD) */
+-#define CPU_FEATURE_COMPLEX 6 /* CPU has Complex instructions */
+-#define CPU_FEATURE_CRYPTO 7 /* CPU has Crypto instructions */
+-#define CPU_FEATURE_LVZ 8 /* CPU has Virtualization extension */
+-#define CPU_FEATURE_LBT_X86 9 /* CPU has X86 Binary Translation */
+-#define CPU_FEATURE_LBT_ARM 10 /* CPU has ARM Binary Translation */
+-#define CPU_FEATURE_LBT_MIPS 11 /* CPU has MIPS Binary Translation */
+-#define CPU_FEATURE_TLB 12 /* CPU has TLB */
+-#define CPU_FEATURE_CSR 13 /* CPU has CSR */
+-#define CPU_FEATURE_WATCH 14 /* CPU has watchpoint registers */
+-#define CPU_FEATURE_VINT 15 /* CPU has vectored interrupts */
+-#define CPU_FEATURE_CSRIPI 16 /* CPU has CSR-IPI */
+-#define CPU_FEATURE_EXTIOI 17 /* CPU has EXT-IOI */
+-#define CPU_FEATURE_PREFETCH 18 /* CPU has prefetch instructions */
+-#define CPU_FEATURE_PMP 19 /* CPU has perfermance counter */
+-#define CPU_FEATURE_SCALEFREQ 20 /* CPU supports cpufreq scaling */
+-#define CPU_FEATURE_FLATMODE 21 /* CPU has flat mode */
+-#define CPU_FEATURE_EIODECODE 22 /* CPU has EXTIOI interrupt pin decode mode */
+-#define CPU_FEATURE_GUESTID 23 /* CPU has GuestID feature */
+-#define CPU_FEATURE_HYPERVISOR 24 /* CPU has hypervisor (running in VM) */
++#define CPU_FEATURE_CRC32 6 /* CPU has CRC32 instructions */
++#define CPU_FEATURE_COMPLEX 7 /* CPU has Complex instructions */
++#define CPU_FEATURE_CRYPTO 8 /* CPU has Crypto instructions */
++#define CPU_FEATURE_LVZ 9 /* CPU has Virtualization extension */
++#define CPU_FEATURE_LBT_X86 10 /* CPU has X86 Binary Translation */
++#define CPU_FEATURE_LBT_ARM 11 /* CPU has ARM Binary Translation */
++#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */
++#define CPU_FEATURE_TLB 13 /* CPU has TLB */
++#define CPU_FEATURE_CSR 14 /* CPU has CSR */
++#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */
++#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */
++#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */
++#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */
++#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */
++#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */
++#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */
++#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */
++#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */
++#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */
++#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */
+
+ #define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
+ #define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
+@@ -104,6 +105,7 @@ enum cpu_type_enum {
+ #define LOONGARCH_CPU_FPU BIT_ULL(CPU_FEATURE_FPU)
+ #define LOONGARCH_CPU_LSX BIT_ULL(CPU_FEATURE_LSX)
+ #define LOONGARCH_CPU_LASX BIT_ULL(CPU_FEATURE_LASX)
++#define LOONGARCH_CPU_CRC32 BIT_ULL(CPU_FEATURE_CRC32)
+ #define LOONGARCH_CPU_COMPLEX BIT_ULL(CPU_FEATURE_COMPLEX)
+ #define LOONGARCH_CPU_CRYPTO BIT_ULL(CPU_FEATURE_CRYPTO)
+ #define LOONGARCH_CPU_LVZ BIT_ULL(CPU_FEATURE_LVZ)
+diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
+index 7f8d57a61c8bd..62835d84a647d 100644
+--- a/arch/loongarch/include/asm/loongarch.h
++++ b/arch/loongarch/include/asm/loongarch.h
+@@ -117,7 +117,7 @@ static inline u32 read_cpucfg(u32 reg)
+ #define CPUCFG1_EP BIT(22)
+ #define CPUCFG1_RPLV BIT(23)
+ #define CPUCFG1_HUGEPG BIT(24)
+-#define CPUCFG1_IOCSRBRD BIT(25)
++#define CPUCFG1_CRC32 BIT(25)
+ #define CPUCFG1_MSGINT BIT(26)
+
+ #define LOONGARCH_CPUCFG2 0x2
+diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
+index 3a3fce2d78461..5adf0f736c6d7 100644
+--- a/arch/loongarch/kernel/cpu-probe.c
++++ b/arch/loongarch/kernel/cpu-probe.c
+@@ -60,7 +60,7 @@ static inline void set_elf_platform(int cpu, const char *plat)
+
+ /* MAP BASE */
+ unsigned long vm_map_base;
+-EXPORT_SYMBOL_GPL(vm_map_base);
++EXPORT_SYMBOL(vm_map_base);
+
+ static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
+ {
+@@ -94,13 +94,18 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
+ c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
+ LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+
+- elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32;
++ elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
+
+ config = read_cpucfg(LOONGARCH_CPUCFG1);
+ if (config & CPUCFG1_UAL) {
+ c->options |= LOONGARCH_CPU_UAL;
+ elf_hwcap |= HWCAP_LOONGARCH_UAL;
+ }
++ if (config & CPUCFG1_CRC32) {
++ c->options |= LOONGARCH_CPU_CRC32;
++ elf_hwcap |= HWCAP_LOONGARCH_CRC32;
++ }
++
+
+ config = read_cpucfg(LOONGARCH_CPUCFG2);
+ if (config & CPUCFG2_LAM) {
+diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
+index 5c67cc4fd56d5..0d82907b5404c 100644
+--- a/arch/loongarch/kernel/proc.c
++++ b/arch/loongarch/kernel/proc.c
+@@ -76,6 +76,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ if (cpu_has_fpu) seq_printf(m, " fpu");
+ if (cpu_has_lsx) seq_printf(m, " lsx");
+ if (cpu_has_lasx) seq_printf(m, " lasx");
++ if (cpu_has_crc32) seq_printf(m, " crc32");
+ if (cpu_has_complex) seq_printf(m, " complex");
+ if (cpu_has_crypto) seq_printf(m, " crypto");
+ if (cpu_has_lvz) seq_printf(m, " lvz");
+diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
+index 080061793c859..c7e9c96719fa3 100644
+--- a/arch/loongarch/mm/init.c
++++ b/arch/loongarch/mm/init.c
+@@ -41,7 +41,7 @@
+ * don't have to care about aliases on other CPUs.
+ */
+ unsigned long empty_zero_page, zero_page_mask;
+-EXPORT_SYMBOL_GPL(empty_zero_page);
++EXPORT_SYMBOL(empty_zero_page);
+ EXPORT_SYMBOL(zero_page_mask);
+
+ void setup_zero_pages(void)
+@@ -231,7 +231,7 @@ pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+ #endif
+ #ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+-EXPORT_SYMBOL_GPL(invalid_pmd_table);
++EXPORT_SYMBOL(invalid_pmd_table);
+ #endif
+ pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+ EXPORT_SYMBOL(invalid_pte_table);
+diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
+index 1f98947fe715d..91d6a5360bb9c 100644
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -15,6 +15,8 @@
+ #define EMITS_PT_NOTE
+ #endif
+
++#define RUNTIME_DISCARD_EXIT
++
+ #include <asm-generic/vmlinux.lds.h>
+
+ #undef mips
+diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
+index dd58e1d993972..659e21862077b 100644
+--- a/arch/riscv/purgatory/Makefile
++++ b/arch/riscv/purgatory/Makefile
+@@ -74,9 +74,7 @@ CFLAGS_string.o += $(PURGATORY_CFLAGS)
+ CFLAGS_REMOVE_ctype.o += $(PURGATORY_CFLAGS_REMOVE)
+ CFLAGS_ctype.o += $(PURGATORY_CFLAGS)
+
+-AFLAGS_REMOVE_entry.o += -Wa,-gdwarf-2
+-AFLAGS_REMOVE_memcpy.o += -Wa,-gdwarf-2
+-AFLAGS_REMOVE_memset.o += -Wa,-gdwarf-2
++asflags-remove-y += $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x))
+
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ $(call if_changed,ld)
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 53e0209229f87..092b16b4dd4f6 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -474,9 +474,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ }
+ return 0;
+ case PTRACE_GET_LAST_BREAK:
+- put_user(child->thread.last_break,
+- (unsigned long __user *) data);
+- return 0;
++ return put_user(child->thread.last_break, (unsigned long __user *)data);
+ case PTRACE_ENABLE_TE:
+ if (!MACHINE_HAS_TE)
+ return -EIO;
+@@ -824,9 +822,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ }
+ return 0;
+ case PTRACE_GET_LAST_BREAK:
+- put_user(child->thread.last_break,
+- (unsigned int __user *) data);
+- return 0;
++ return put_user(child->thread.last_break, (unsigned int __user *)data);
+ }
+ return compat_ptrace_request(child, request, addr, data);
+ }
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 17f09dc263811..82fec66d46d29 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -69,8 +69,7 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
+ CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
+ CFLAGS_string.o += $(PURGATORY_CFLAGS)
+
+-AFLAGS_REMOVE_setup-x86_$(BITS).o += -Wa,-gdwarf-2
+-AFLAGS_REMOVE_entry64.o += -Wa,-gdwarf-2
++asflags-remove-y += $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x))
+
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ $(call if_changed,ld)
+diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
+index 727704431f618..13918c8c839ea 100644
+--- a/drivers/fpga/fpga-bridge.c
++++ b/drivers/fpga/fpga-bridge.c
+@@ -360,7 +360,6 @@ fpga_bridge_register(struct device *parent, const char *name,
+ bridge->dev.parent = parent;
+ bridge->dev.of_node = parent->of_node;
+ bridge->dev.id = id;
+- of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
+
+ ret = dev_set_name(&bridge->dev, "br%d", id);
+ if (ret)
+@@ -372,6 +371,8 @@ fpga_bridge_register(struct device *parent, const char *name,
+ return ERR_PTR(ret);
+ }
+
++ of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
++
+ return bridge;
+
+ error_device:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index 89011bae7588e..ca5dc51600fac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -653,6 +653,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+ if (!src->enabled_types || !src->funcs->set)
+ return -EINVAL;
+
++ if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
++ return -EINVAL;
++
+ if (atomic_dec_and_test(&src->enabled_types[type]))
+ return amdgpu_irq_update(adev, src, type);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index ce64ca1c6e669..5c1193dd7d88c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -26,6 +26,7 @@
+
+ #include <linux/firmware.h>
+ #include <linux/module.h>
++#include <linux/dmi.h>
+ #include <linux/pci.h>
+ #include <linux/debugfs.h>
+ #include <drm/drm_drv.h>
+@@ -84,6 +85,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ {
+ unsigned long bo_size;
+ const char *fw_name;
++ const char *bios_ver;
+ const struct common_firmware_header *hdr;
+ unsigned char fw_check;
+ unsigned int fw_shared_size, log_offset;
+@@ -159,6 +161,21 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
++ /*
++ * Some Steam Deck's BIOS versions are incompatible with the
++ * indirect SRAM mode, leading to amdgpu being unable to get
++ * properly probed (and even potentially crashing the kernel).
++ * Hence, check for these versions here - notice this is
++ * restricted to Vangogh (Deck's APU).
++ */
++ bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
++
++ if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
++ !strncmp("F7A0114", bios_ver, 7))) {
++ adev->vcn.indirect_sram = false;
++ dev_info(adev->dev,
++ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
++ }
+ break;
+ case IP_VERSION(3, 0, 16):
+ fw_name = FIRMWARE_DIMGREY_CAVEFISH;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index b87f50e8fa615..1ec643a0d00d2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -167,10 +167,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
+ if (rc)
+ return rc;
+
+- irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
++ if (amdgpu_in_reset(adev)) {
++ irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
++ /* During gpu-reset we disable and then enable vblank irq, so
++ * don't use amdgpu_irq_get/put() to avoid refcount change.
++ */
++ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
++ rc = -EBUSY;
++ } else {
++ rc = (enable)
++ ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
++ : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
++ }
+
+- if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+- return -EBUSY;
++ if (rc)
++ return rc;
+
+ skip:
+ if (amdgpu_in_reset(adev))
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+index 7dd0845d1bd9f..8e416433184cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+@@ -222,7 +222,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+- .line_buffer_fixed_bpp = 49,
++ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
+index 3d1f50f481cfd..7098f125b54a9 100644
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -146,8 +146,8 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
+ unsigned int order;
+ u64 root_size;
+
+- root_size = rounddown_pow_of_two(size);
+- order = ilog2(root_size) - ilog2(chunk_size);
++ order = ilog2(size) - ilog2(chunk_size);
++ root_size = chunk_size << order;
+
+ root = drm_block_alloc(mm, NULL, order, offset);
+ if (!root)
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+index 48c375c65a418..7f3f2d50e6cde 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+@@ -165,7 +165,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ DP_AUX_CH_CTL_TIME_OUT_MAX |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+- DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
++ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 8cecf81a5ae03..3c05ce01f73b8 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -840,6 +840,8 @@ static void vop2_enable(struct vop2 *vop2)
+ return;
+ }
+
++ regcache_sync(vop2->map);
++
+ if (vop2->data->soc_id == 3566)
+ vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
+
+@@ -868,6 +870,8 @@ static void vop2_disable(struct vop2 *vop2)
+
+ pm_runtime_put_sync(vop2->dev);
+
++ regcache_mark_dirty(vop2->map);
++
+ clk_disable_unprepare(vop2->aclk);
+ clk_disable_unprepare(vop2->hclk);
+ }
+diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
+index 62f69589a72d3..a699fc0dc8579 100644
+--- a/drivers/gpu/drm/tests/drm_buddy_test.c
++++ b/drivers/gpu/drm/tests/drm_buddy_test.c
+@@ -89,7 +89,8 @@ static int check_block(struct kunit *test, struct drm_buddy *mm,
+ err = -EINVAL;
+ }
+
+- if (!is_power_of_2(block_size)) {
++ /* We can't use is_power_of_2() for a u64 on 32-bit systems. */
++ if (block_size & (block_size - 1)) {
+ kunit_err(test, "block size not power of two\n");
+ err = -EINVAL;
+ }
+diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
+index 870f4cb609237..3ad5678f26135 100644
+--- a/drivers/iio/adc/at91-sama5d2_adc.c
++++ b/drivers/iio/adc/at91-sama5d2_adc.c
+@@ -1409,7 +1409,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
+ trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
+ iio_device_id(indio), trigger_name);
+ if (!trig)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ trig->dev.parent = indio->dev.parent;
+ iio_trigger_set_drvdata(trig, indio);
+diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
+index beadfa938d2da..404865e354602 100644
+--- a/drivers/iio/dac/ad5755.c
++++ b/drivers/iio/dac/ad5755.c
+@@ -802,6 +802,7 @@ static struct ad5755_platform_data *ad5755_parse_fw(struct device *dev)
+ return pdata;
+
+ error_out:
++ fwnode_handle_put(pp);
+ devm_kfree(dev, pdata);
+ return NULL;
+ }
+diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
+index dd9051f1cc1a0..13a6c3d078616 100644
+--- a/drivers/iio/light/tsl2772.c
++++ b/drivers/iio/light/tsl2772.c
+@@ -601,6 +601,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
+ return -EINVAL;
+ }
+ }
++ chip->settings.prox_diode = prox_diode_mask;
+
+ return 0;
+ }
+diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
+index d836d3dcc6a24..a68da2988f9cd 100644
+--- a/drivers/input/tablet/pegasus_notetaker.c
++++ b/drivers/input/tablet/pegasus_notetaker.c
+@@ -296,6 +296,12 @@ static int pegasus_probe(struct usb_interface *intf,
+ pegasus->intf = intf;
+
+ pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
++ /* Sanity check that pipe's type matches endpoint's type */
++ if (usb_pipe_type_check(dev, pipe)) {
++ error = -EINVAL;
++ goto err_free_mem;
++ }
++
+ pegasus->data_len = usb_maxpacket(dev, pipe);
+
+ pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
+diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
+index 660df7d269fac..d410e2e78a3d3 100644
+--- a/drivers/memstick/core/memstick.c
++++ b/drivers/memstick/core/memstick.c
+@@ -410,6 +410,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
+ return card;
+ err_out:
+ host->card = old_card;
++ kfree_const(card->dev.kobj.name);
+ kfree(card);
+ return NULL;
+ }
+@@ -468,8 +469,10 @@ static void memstick_check(struct work_struct *work)
+ put_device(&card->dev);
+ host->card = NULL;
+ }
+- } else
++ } else {
++ kfree_const(card->dev.kobj.name);
+ kfree(card);
++ }
+ }
+
+ out_power_off:
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index 101581d83982a..8e22b375247ef 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -351,8 +351,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
+ */
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_MMC_HS:
+- case MMC_TIMING_UHS_SDR12:
+- case MMC_TIMING_UHS_SDR25:
+ val &= ~SDHCI_CTRL_HISPD;
+ }
+ }
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index cda57cb863089..75e694791d8d9 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -3272,7 +3272,19 @@ static struct spi_mem_driver spi_nor_driver = {
+ .remove = spi_nor_remove,
+ .shutdown = spi_nor_shutdown,
+ };
+-module_spi_mem_driver(spi_nor_driver);
++
++static int __init spi_nor_module_init(void)
++{
++ return spi_mem_driver_register(&spi_nor_driver);
++}
++module_init(spi_nor_module_init);
++
++static void __exit spi_nor_module_exit(void)
++{
++ spi_mem_driver_unregister(&spi_nor_driver);
++ spi_nor_debugfs_shutdown();
++}
++module_exit(spi_nor_module_exit);
+
+ MODULE_LICENSE("GPL v2");
+ MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
+index d18dafeb020ab..00bf0d0e955a0 100644
+--- a/drivers/mtd/spi-nor/core.h
++++ b/drivers/mtd/spi-nor/core.h
+@@ -709,8 +709,10 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+
+ #ifdef CONFIG_DEBUG_FS
+ void spi_nor_debugfs_register(struct spi_nor *nor);
++void spi_nor_debugfs_shutdown(void);
+ #else
+ static inline void spi_nor_debugfs_register(struct spi_nor *nor) {}
++static inline void spi_nor_debugfs_shutdown(void) {}
+ #endif
+
+ #endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
+diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
+index df76cb5de3f93..5f56b23205d8b 100644
+--- a/drivers/mtd/spi-nor/debugfs.c
++++ b/drivers/mtd/spi-nor/debugfs.c
+@@ -226,13 +226,13 @@ static void spi_nor_debugfs_unregister(void *data)
+ nor->debugfs_root = NULL;
+ }
+
++static struct dentry *rootdir;
++
+ void spi_nor_debugfs_register(struct spi_nor *nor)
+ {
+- struct dentry *rootdir, *d;
++ struct dentry *d;
+ int ret;
+
+- /* Create rootdir once. Will never be deleted again. */
+- rootdir = debugfs_lookup(SPI_NOR_DEBUGFS_ROOT, NULL);
+ if (!rootdir)
+ rootdir = debugfs_create_dir(SPI_NOR_DEBUGFS_ROOT, NULL);
+
+@@ -247,3 +247,8 @@ void spi_nor_debugfs_register(struct spi_nor *nor)
+ debugfs_create_file("capabilities", 0444, d, nor,
+ &spi_nor_capabilities_fops);
+ }
++
++void spi_nor_debugfs_shutdown(void)
++{
++ debugfs_remove(rootdir);
++}
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 9f6824a6537bc..9f44c86a591dd 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1776,14 +1776,15 @@ void bond_lower_state_changed(struct slave *slave)
+
+ /* The bonding driver uses ether_setup() to convert a master bond device
+ * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
+- * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
++ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
++ * if they were set
+ */
+ static void bond_ether_setup(struct net_device *bond_dev)
+ {
+- unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
++ unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
+
+ ether_setup(bond_dev);
+- bond_dev->flags |= IFF_MASTER | slave_flag;
++ bond_dev->flags |= IFF_MASTER | flags;
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ }
+
+diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
+index 70887e0aece33..d9434ed9450df 100644
+--- a/drivers/net/dsa/b53/b53_mmap.c
++++ b/drivers/net/dsa/b53/b53_mmap.c
+@@ -216,6 +216,18 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
+ return 0;
+ }
+
++static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
++ u16 *value)
++{
++ return -EIO;
++}
++
++static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
++ u16 value)
++{
++ return -EIO;
++}
++
+ static const struct b53_io_ops b53_mmap_ops = {
+ .read8 = b53_mmap_read8,
+ .read16 = b53_mmap_read16,
+@@ -227,6 +239,8 @@ static const struct b53_io_ops b53_mmap_ops = {
+ .write32 = b53_mmap_write32,
+ .write48 = b53_mmap_write48,
+ .write64 = b53_mmap_write64,
++ .phy_read16 = b53_mmap_phy_read16,
++ .phy_write16 = b53_mmap_phy_write16,
+ };
+
+ static int b53_mmap_probe_of(struct platform_device *pdev,
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index c6e36603bd2db..e3e5a427222f6 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7597,7 +7597,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+ u8 flags;
+ int rc;
+
+- if (bp->hwrm_spec_code < 0x10801) {
++ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
+ rc = -ENODEV;
+ goto no_ptp;
+ }
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 55cf2f62bb308..db8e06157da29 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5293,31 +5293,6 @@ static void e1000_watchdog_task(struct work_struct *work)
+ ew32(TARC(0), tarc0);
+ }
+
+- /* disable TSO for pcie and 10/100 speeds, to avoid
+- * some hardware issues
+- */
+- if (!(adapter->flags & FLAG_TSO_FORCE)) {
+- switch (adapter->link_speed) {
+- case SPEED_10:
+- case SPEED_100:
+- e_info("10/100 speed: disabling TSO\n");
+- netdev->features &= ~NETIF_F_TSO;
+- netdev->features &= ~NETIF_F_TSO6;
+- break;
+- case SPEED_1000:
+- netdev->features |= NETIF_F_TSO;
+- netdev->features |= NETIF_F_TSO6;
+- break;
+- default:
+- /* oops */
+- break;
+- }
+- if (hw->mac.type == e1000_pch_spt) {
+- netdev->features &= ~NETIF_F_TSO;
+- netdev->features &= ~NETIF_F_TSO6;
+- }
+- }
+-
+ /* enable transmits in the hardware, need to do this
+ * after setting TARC(0)
+ */
+@@ -7532,6 +7507,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM);
+
++ /* disable TSO for pcie and 10/100 speeds to avoid
++ * some hardware issues and for i219 to fix transfer
++ * speed being capped at 60%
++ */
++ if (!(adapter->flags & FLAG_TSO_FORCE)) {
++ switch (adapter->link_speed) {
++ case SPEED_10:
++ case SPEED_100:
++ e_info("10/100 speed: disabling TSO\n");
++ netdev->features &= ~NETIF_F_TSO;
++ netdev->features &= ~NETIF_F_TSO6;
++ break;
++ case SPEED_1000:
++ netdev->features |= NETIF_F_TSO;
++ netdev->features |= NETIF_F_TSO6;
++ break;
++ default:
++ /* oops */
++ break;
++ }
++ if (hw->mac.type == e1000_pch_spt) {
++ netdev->features &= ~NETIF_F_TSO;
++ netdev->features &= ~NETIF_F_TSO6;
++ }
++ }
++
+ /* Set user-changeable features (subset of all device features) */
+ netdev->hw_features = netdev->features;
+ netdev->hw_features |= NETIF_F_RXFCS;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index da0cf87d3a1ca..68f390ce4f6e2 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -11058,8 +11058,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ pf->hw.aq.asq_last_status));
+ }
+ /* reinit the misc interrupt */
+- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
++ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ ret = i40e_setup_misc_vector(pf);
++ if (ret)
++ goto end_unlock;
++ }
+
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+@@ -14098,15 +14101,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ vsi->id = ctxt.vsi_number;
+ }
+
+- vsi->active_filters = 0;
+- clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
++ vsi->active_filters = 0;
+ /* If macvlan filters already exist, force them to get loaded */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ f->state = I40E_FILTER_NEW;
+ f_count++;
+ }
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
++ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+
+ if (f_count) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+index 017d68f1e1232..972c571b41587 100644
+--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
++++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+@@ -31,6 +31,8 @@ mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
+
+ if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
++ if (!multi)
++ return NULL;
+ tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+index 48dbfea0a2a1d..7cdf0ce24f288 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+@@ -26,7 +26,7 @@
+ #define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
+
+ #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
+-#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200
++#define MLXSW_PCI_SW_RESET_WAIT_MSECS 400
+ #define MLXSW_PCI_FW_READY 0xA1844
+ #define MLXSW_PCI_FW_READY_MASK 0xFFFF
+ #define MLXSW_PCI_FW_READY_MAGIC 0x5E
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index 6a1bff54bc6c3..e6aedd8ebd750 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -541,7 +541,6 @@ int efx_net_open(struct net_device *net_dev)
+ else
+ efx->state = STATE_NET_UP;
+
+- efx_selftest_async_start(efx);
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
+index c2224e41a694d..ee1cabe3e2429 100644
+--- a/drivers/net/ethernet/sfc/efx_common.c
++++ b/drivers/net/ethernet/sfc/efx_common.c
+@@ -544,6 +544,8 @@ void efx_start_all(struct efx_nic *efx)
+ /* Start the hardware monitor if there is one */
+ efx_start_monitor(efx);
+
++ efx_selftest_async_start(efx);
++
+ /* Link state detection is normally event-driven; we have
+ * to poll now because we could have missed a change
+ */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 20b1b34a092ad..3f1883814ce21 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -724,8 +724,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
+ int page_off,
+ unsigned int *len)
+ {
+- struct page *page = alloc_page(GFP_ATOMIC);
++ int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++ struct page *page;
+
++ if (page_off + *len + tailroom > PAGE_SIZE)
++ return NULL;
++
++ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return NULL;
+
+@@ -733,7 +738,6 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
+ page_off += *len;
+
+ while (--*num_buf) {
+- int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ unsigned int buflen;
+ void *buf;
+ int off;
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 5c266062c08f0..c35c085dbc877 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -996,10 +996,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+
+ /* No crossing a page as the payload mustn't fragment. */
+ if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
+- netdev_err(queue->vif->dev,
+- "txreq.offset: %u, size: %u, end: %lu\n",
+- txreq.offset, txreq.size,
+- (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
++ netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
++ txreq.offset, txreq.size);
+ xenvif_fatal_tx_err(queue->vif);
+ break;
+ }
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index bb80192c16b6b..8f17cbec5a0e4 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1604,22 +1604,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
+ if (ret)
+ goto err_init_connect;
+
+- queue->rd_enabled = true;
+ set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
+- nvme_tcp_init_recv_ctx(queue);
+-
+- write_lock_bh(&queue->sock->sk->sk_callback_lock);
+- queue->sock->sk->sk_user_data = queue;
+- queue->state_change = queue->sock->sk->sk_state_change;
+- queue->data_ready = queue->sock->sk->sk_data_ready;
+- queue->write_space = queue->sock->sk->sk_write_space;
+- queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
+- queue->sock->sk->sk_state_change = nvme_tcp_state_change;
+- queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+-#ifdef CONFIG_NET_RX_BUSY_POLL
+- queue->sock->sk->sk_ll_usec = 1;
+-#endif
+- write_unlock_bh(&queue->sock->sk->sk_callback_lock);
+
+ return 0;
+
+@@ -1639,7 +1624,7 @@ err_destroy_mutex:
+ return ret;
+ }
+
+-static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
++static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
+ {
+ struct socket *sock = queue->sock;
+
+@@ -1654,7 +1639,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
+ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
+ {
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+- nvme_tcp_restore_sock_calls(queue);
++ nvme_tcp_restore_sock_ops(queue);
+ cancel_work_sync(&queue->io_work);
+ }
+
+@@ -1672,21 +1657,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+ mutex_unlock(&queue->queue_lock);
+ }
+
++static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
++{
++ write_lock_bh(&queue->sock->sk->sk_callback_lock);
++ queue->sock->sk->sk_user_data = queue;
++ queue->state_change = queue->sock->sk->sk_state_change;
++ queue->data_ready = queue->sock->sk->sk_data_ready;
++ queue->write_space = queue->sock->sk->sk_write_space;
++ queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
++ queue->sock->sk->sk_state_change = nvme_tcp_state_change;
++ queue->sock->sk->sk_write_space = nvme_tcp_write_space;
++#ifdef CONFIG_NET_RX_BUSY_POLL
++ queue->sock->sk->sk_ll_usec = 1;
++#endif
++ write_unlock_bh(&queue->sock->sk->sk_callback_lock);
++}
++
+ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
+ {
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
++ struct nvme_tcp_queue *queue = &ctrl->queues[idx];
+ int ret;
+
++ queue->rd_enabled = true;
++ nvme_tcp_init_recv_ctx(queue);
++ nvme_tcp_setup_sock_ops(queue);
++
+ if (idx)
+ ret = nvmf_connect_io_queue(nctrl, idx);
+ else
+ ret = nvmf_connect_admin_queue(nctrl);
+
+ if (!ret) {
+- set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
++ set_bit(NVME_TCP_Q_LIVE, &queue->flags);
+ } else {
+- if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
+- __nvme_tcp_stop_queue(&ctrl->queues[idx]);
++ if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
++ __nvme_tcp_stop_queue(queue);
+ dev_err(nctrl->device,
+ "failed to connect queue: %d ret=%d\n", idx, ret);
+ }
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index cb15acdf14a30..e2c9a68d12df9 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -464,7 +464,8 @@ static const struct dmi_system_id asus_quirks[] = {
+ .ident = "ASUS ROG FLOW X13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "GV301Q"),
++ /* Match GV301** */
++ DMI_MATCH(DMI_PRODUCT_NAME, "GV301"),
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
+diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
+index 322cfaeda17ba..2a426040f749e 100644
+--- a/drivers/platform/x86/gigabyte-wmi.c
++++ b/drivers/platform/x86/gigabyte-wmi.c
+@@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
+ }}
+
+ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
++ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("A320M-S2H V2-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H WIFI-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
+@@ -150,6 +151,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
++ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B650 AORUS ELITE AX"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
+@@ -159,6 +161,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
++ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570S AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"),
+ { }
+ };
+diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
+index bb81b8b1f7e9b..483bb65651665 100644
+--- a/drivers/platform/x86/intel/vsec.c
++++ b/drivers/platform/x86/intel/vsec.c
+@@ -141,6 +141,7 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
+
+ ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
+ if (ret < 0) {
++ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+ return ret;
+ }
+diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
+index dac1fb584fa35..ecd5a50c61660 100644
+--- a/drivers/regulator/fan53555.c
++++ b/drivers/regulator/fan53555.c
+@@ -8,18 +8,19 @@
+ // Copyright (c) 2012 Marvell Technology Ltd.
+ // Yunfan Zhang <yfzhang@marvell.com>
+
++#include <linux/bits.h>
++#include <linux/err.h>
++#include <linux/i2c.h>
+ #include <linux/module.h>
++#include <linux/of_device.h>
+ #include <linux/param.h>
+-#include <linux/err.h>
+ #include <linux/platform_device.h>
++#include <linux/regmap.h>
+ #include <linux/regulator/driver.h>
++#include <linux/regulator/fan53555.h>
+ #include <linux/regulator/machine.h>
+ #include <linux/regulator/of_regulator.h>
+-#include <linux/of_device.h>
+-#include <linux/i2c.h>
+ #include <linux/slab.h>
+-#include <linux/regmap.h>
+-#include <linux/regulator/fan53555.h>
+
+ /* Voltage setting */
+ #define FAN53555_VSEL0 0x00
+@@ -60,7 +61,7 @@
+ #define TCS_VSEL1_MODE (1 << 6)
+
+ #define TCS_SLEW_SHIFT 3
+-#define TCS_SLEW_MASK (0x3 < 3)
++#define TCS_SLEW_MASK GENMASK(4, 3)
+
+ enum fan53555_vendor {
+ FAN53526_VENDOR_FAIRCHILD = 0,
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index d265a2d9d0824..13ee8e4c4f570 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3299,7 +3299,7 @@ fw_crash_buffer_show(struct device *cdev,
+
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ buff_offset = instance->fw_crash_buffer_offset;
+- if (!instance->crash_dump_buf &&
++ if (!instance->crash_dump_buf ||
+ !((instance->fw_crash_state == AVAILABLE) ||
+ (instance->fw_crash_state == COPYING))) {
+ dev_err(&instance->pdev->dev,
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 24c4c92543599..3cda5d26b66ca 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -314,11 +314,18 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
+ if (result)
+ return -EIO;
+
+- /* Sanity check that we got the page back that we asked for */
++ /*
++ * Sanity check that we got the page back that we asked for and that
++ * the page size is not 0.
++ */
+ if (buffer[1] != page)
+ return -EIO;
+
+- return get_unaligned_be16(&buffer[2]) + 4;
++ result = get_unaligned_be16(&buffer[2]);
++ if (!result)
++ return -EIO;
++
++ return result + 4;
+ }
+
+ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
+index bd87d3c92dd33..69347b6bf60cd 100644
+--- a/drivers/spi/spi-rockchip-sfc.c
++++ b/drivers/spi/spi-rockchip-sfc.c
+@@ -632,7 +632,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+
+- return ret;
++ goto err_irq;
+ }
+
+ ret = rockchip_sfc_init(sfc);
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index b8ae02aa632e3..4abbe4b352533 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -523,7 +523,7 @@ void replace_extent_mapping(struct extent_map_tree *tree,
+ setup_extent_mapping(tree, new, modified);
+ }
+
+-static struct extent_map *next_extent_map(struct extent_map *em)
++static struct extent_map *next_extent_map(const struct extent_map *em)
+ {
+ struct rb_node *next;
+
+@@ -533,6 +533,35 @@ static struct extent_map *next_extent_map(struct extent_map *em)
+ return container_of(next, struct extent_map, rb_node);
+ }
+
++/*
++ * Get the extent map that immediately follows another one.
++ *
++ * @tree: The extent map tree that the extent map belong to.
++ * Holding read or write access on the tree's lock is required.
++ * @em: An extent map from the given tree. The caller must ensure that
++ * between getting @em and between calling this function, the
++ * extent map @em is not removed from the tree - for example, by
++ * holding the tree's lock for the duration of those 2 operations.
++ *
++ * Returns the extent map that immediately follows @em, or NULL if @em is the
++ * last extent map in the tree.
++ */
++struct extent_map *btrfs_next_extent_map(const struct extent_map_tree *tree,
++ const struct extent_map *em)
++{
++ struct extent_map *next;
++
++ /* The lock must be acquired either in read mode or write mode. */
++ lockdep_assert_held(&tree->lock);
++ ASSERT(extent_map_in_tree(em));
++
++ next = next_extent_map(em);
++ if (next)
++ refcount_inc(&next->refs);
++
++ return next;
++}
++
+ static struct extent_map *prev_extent_map(struct extent_map *em)
+ {
+ struct rb_node *prev;
+diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
+index ad311864272a0..68d3f2c9ea1d8 100644
+--- a/fs/btrfs/extent_map.h
++++ b/fs/btrfs/extent_map.h
+@@ -87,6 +87,8 @@ static inline u64 extent_map_block_end(struct extent_map *em)
+ void extent_map_tree_init(struct extent_map_tree *tree);
+ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len);
++struct extent_map *btrfs_next_extent_map(const struct extent_map_tree *tree,
++ const struct extent_map *em);
+ int add_extent_mapping(struct extent_map_tree *tree,
+ struct extent_map *em, int modified);
+ void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 1bda59c683602..77202addead83 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -3248,40 +3248,50 @@ static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end
+ */
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, start, len);
+- read_unlock(&em_tree->lock);
++ if (!em) {
++ read_unlock(&em_tree->lock);
++ return (delalloc_len > 0);
++ }
+
+ /* extent_map_end() returns a non-inclusive end offset. */
+- em_end = em ? extent_map_end(em) : 0;
++ em_end = extent_map_end(em);
+
+ /*
+ * If we have a hole/prealloc extent map, check the next one if this one
+ * ends before our range's end.
+ */
+- if (em && (em->block_start == EXTENT_MAP_HOLE ||
+- test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) && em_end < end) {
++ if ((em->block_start == EXTENT_MAP_HOLE ||
++ test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) && em_end < end) {
+ struct extent_map *next_em;
+
+- read_lock(&em_tree->lock);
+- next_em = lookup_extent_mapping(em_tree, em_end, len - em_end);
+- read_unlock(&em_tree->lock);
+-
++ next_em = btrfs_next_extent_map(em_tree, em);
+ free_extent_map(em);
+- em_end = next_em ? extent_map_end(next_em) : 0;
++
++ /*
++ * There's no next extent map or the next one starts beyond our
++ * range, return the range found in the io tree (if any).
++ */
++ if (!next_em || next_em->start > end) {
++ read_unlock(&em_tree->lock);
++ free_extent_map(next_em);
++ return (delalloc_len > 0);
++ }
++
++ em_end = extent_map_end(next_em);
+ em = next_em;
+ }
+
+- if (em && (em->block_start == EXTENT_MAP_HOLE ||
+- test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
+- free_extent_map(em);
+- em = NULL;
+- }
++ read_unlock(&em_tree->lock);
+
+ /*
+- * No extent map or one for a hole or prealloc extent. Use the delalloc
+- * range we found in the io tree if we have one.
++ * We have a hole or prealloc extent that ends at or beyond our range's
++ * end, return the range found in the io tree (if any).
+ */
+- if (!em)
++ if (em->block_start == EXTENT_MAP_HOLE ||
++ test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
++ free_extent_map(em);
+ return (delalloc_len > 0);
++ }
+
+ /*
+ * We don't have any range as EXTENT_DELALLOC in the io tree, so the
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 9958d40207712..aa33c39be1829 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -974,6 +974,16 @@ restart:
+ continue;
+ }
+
++ /*
++ * If wb_tryget fails, the wb has been shutdown, skip it.
++ *
++ * Pin @wb so that it stays on @bdi->wb_list. This allows
++ * continuing iteration from @wb after dropping and
++ * regrabbing rcu read lock.
++ */
++ if (!wb_tryget(wb))
++ continue;
++
+ /* alloc failed, execute synchronously using on-stack fallback */
+ work = &fallback_work;
+ *work = *base_work;
+@@ -982,13 +992,6 @@ restart:
+ work->done = &fallback_work_done;
+
+ wb_queue_work(wb, work);
+-
+- /*
+- * Pin @wb so that it stays on @bdi->wb_list. This allows
+- * continuing iteration from @wb after dropping and
+- * regrabbing rcu read lock.
+- */
+- wb_get(wb);
+ last_wb = wb;
+
+ rcu_read_unlock();
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index bb97a384dc5dd..904673a4f6902 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -214,7 +214,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
+ if (inode && fuse_is_bad(inode))
+ goto invalid;
+ else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
+- (flags & (LOOKUP_EXCL | LOOKUP_REVAL))) {
++ (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) {
+ struct fuse_entry_out outarg;
+ FUSE_ARGS(args);
+ struct fuse_forget_link *forget;
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 63d96a1733b2a..101f2ce6ba376 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
+ return 0;
+ }
+
++/**
++ * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
++ * @sci: segment constructor object
++ *
++ * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
++ * the current segment summary block.
++ */
++static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
++{
++ struct nilfs_segsum_pointer *ssp;
++
++ ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
++ if (ssp->offset < ssp->bh->b_size)
++ memset(ssp->bh->b_data + ssp->offset, 0,
++ ssp->bh->b_size - ssp->offset);
++}
++
+ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
+ {
+ sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
+@@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
+ * The current segment is filled up
+ * (internal code)
+ */
++ nilfs_segctor_zeropad_segsum(sci);
+ sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
+ return nilfs_segctor_reset_segment_buffer(sci);
+ }
+@@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
+ goto retry;
+ }
+ if (unlikely(required)) {
++ nilfs_segctor_zeropad_segsum(sci);
+ err = nilfs_segbuf_extend_segsum(segbuf);
+ if (unlikely(err))
+ goto failed;
+@@ -1531,6 +1550,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
+ nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
+ sci->sc_stage = prev_stage;
+ }
++ nilfs_segctor_zeropad_segsum(sci);
+ nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
+ return 0;
+
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index cc694846617a5..154c103eca751 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1966,8 +1966,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+ ret = -EFAULT;
+ if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
+ goto out;
+- /* Ignore unsupported features (userspace built against newer kernel) */
+- features = uffdio_api.features & UFFD_API_FEATURES;
++ features = uffdio_api.features;
++ ret = -EINVAL;
++ if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
++ goto err_out;
+ ret = -EPERM;
+ if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+ goto err_out;
+diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
+index e38ae3c346184..30b17647ce3c7 100644
+--- a/include/linux/kmsan.h
++++ b/include/linux/kmsan.h
+@@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr);
+ * @page_shift: page_shift passed to vmap_range_noflush().
+ *
+ * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+- * vmalloc metadata address range.
++ * vmalloc metadata address range. Returns 0 on success, callers must check
++ * for non-zero return value.
+ */
+-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+- pgprot_t prot, struct page **pages,
+- unsigned int page_shift);
++int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
++ pgprot_t prot, struct page **pages,
++ unsigned int page_shift);
+
+ /**
+ * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+@@ -159,11 +160,12 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+ * @page_shift: page_shift argument passed to vmap_range_noflush().
+ *
+ * KMSAN creates new metadata pages for the physical pages mapped into the
+- * virtual memory.
++ * virtual memory. Returns 0 on success, callers must check for non-zero return
++ * value.
+ */
+-void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+- phys_addr_t phys_addr, pgprot_t prot,
+- unsigned int page_shift);
++int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
++ phys_addr_t phys_addr, pgprot_t prot,
++ unsigned int page_shift);
+
+ /**
+ * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+@@ -281,12 +283,13 @@ static inline void kmsan_kfree_large(const void *ptr)
+ {
+ }
+
+-static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
+- unsigned long end,
+- pgprot_t prot,
+- struct page **pages,
+- unsigned int page_shift)
++static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
++ unsigned long end,
++ pgprot_t prot,
++ struct page **pages,
++ unsigned int page_shift)
+ {
++ return 0;
+ }
+
+ static inline void kmsan_vunmap_range_noflush(unsigned long start,
+@@ -294,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ {
+ }
+
+-static inline void kmsan_ioremap_page_range(unsigned long start,
+- unsigned long end,
+- phys_addr_t phys_addr,
+- pgprot_t prot,
+- unsigned int page_shift)
++static inline int kmsan_ioremap_page_range(unsigned long start,
++ unsigned long end,
++ phys_addr_t phys_addr, pgprot_t prot,
++ unsigned int page_shift)
+ {
++ return 0;
+ }
+
+ static inline void kmsan_iounmap_page_range(unsigned long start,
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 7be5bb4c94b6d..20ca1613f2e3e 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -291,6 +291,7 @@ struct nf_bridge_info {
+ u8 pkt_otherhost:1;
+ u8 in_prerouting:1;
+ u8 bridged_dnat:1;
++ u8 sabotage_in_done:1;
+ __u16 frag_max_size;
+ struct net_device *physindev;
+
+@@ -4684,7 +4685,7 @@ static inline void nf_reset_ct(struct sk_buff *skb)
+
+ static inline void nf_reset_trace(struct sk_buff *skb)
+ {
+-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
+ skb->nf_trace = 0;
+ #endif
+ }
+@@ -4704,7 +4705,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
+ dst->_nfct = src->_nfct;
+ nf_conntrack_get(skb_nfct(src));
+ #endif
+-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
+ if (copy)
+ dst->nf_trace = src->nf_trace;
+ #endif
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 1daededfa75ed..6bacbf57ac175 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1078,6 +1078,10 @@ struct nft_chain {
+ };
+
+ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain);
++int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
++ const struct nft_set_iter *iter,
++ struct nft_set_elem *elem);
++int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set);
+
+ enum nft_chain_types {
+ NFT_CHAIN_T_DEFAULT = 0,
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index e57f867191ef1..eb53e96b7a29c 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -505,7 +505,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+- __field(nid_t, nid[3])
++ __array(nid_t, nid, 3)
+ __field(int, depth)
+ __field(int, err)
+ ),
+diff --git a/init/Kconfig b/init/Kconfig
+index 0c214af99085d..2028ed4d50f5b 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -892,18 +892,14 @@ config CC_IMPLICIT_FALLTHROUGH
+ default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
+ default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
+
+-# Currently, disable gcc-11,12 array-bounds globally.
+-# We may want to target only particular configurations some day.
++# Currently, disable gcc-11+ array-bounds globally.
++# It's still broken in gcc-13, so no upper bound yet.
+ config GCC11_NO_ARRAY_BOUNDS
+ def_bool y
+
+-config GCC12_NO_ARRAY_BOUNDS
+- def_bool y
+-
+ config CC_NO_ARRAY_BOUNDS
+ bool
+- default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_ARRAY_BOUNDS
+- default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS
++ default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
+
+ #
+ # For architectures that know their GCC __int128 support is sound
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index ea21e008bf856..8db2ed564939b 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2682,6 +2682,21 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
+ }
+ } else if (opcode == BPF_EXIT) {
+ return -ENOTSUPP;
++ } else if (BPF_SRC(insn->code) == BPF_X) {
++ if (!(*reg_mask & (dreg | sreg)))
++ return 0;
++ /* dreg <cond> sreg
++ * Both dreg and sreg need precision before
++ * this insn. If only sreg was marked precise
++ * before it would be equally necessary to
++ * propagate it to dreg.
++ */
++ *reg_mask |= (sreg | dreg);
++ /* else dreg <cond> K
++ * Only dreg still needs precision before
++ * this insn, so for the K-based conditional
++ * there is nothing new to be marked.
++ */
+ }
+ } else if (class == BPF_LD) {
+ if (!(*reg_mask & dreg))
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index ec2d913280e6a..f70c4a7fb4ef3 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4465,12 +4465,16 @@ static inline int util_fits_cpu(unsigned long util,
+ * For uclamp_max, we can tolerate a drop in performance level as the
+ * goal is to cap the task. So it's okay if it's getting less.
+ *
+- * In case of capacity inversion, which is not handled yet, we should
+- * honour the inverted capacity for both uclamp_min and uclamp_max all
+- * the time.
++ * In case of capacity inversion we should honour the inverted capacity
++ * for both uclamp_min and uclamp_max all the time.
+ */
+- capacity_orig = capacity_orig_of(cpu);
+- capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
++ capacity_orig = cpu_in_capacity_inversion(cpu);
++ if (capacity_orig) {
++ capacity_orig_thermal = capacity_orig;
++ } else {
++ capacity_orig = capacity_orig_of(cpu);
++ capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
++ }
+
+ /*
+ * We want to force a task to fit a cpu as implied by uclamp_max.
+@@ -8866,16 +8870,82 @@ static unsigned long scale_rt_capacity(int cpu)
+
+ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
+ {
++ unsigned long capacity_orig = arch_scale_cpu_capacity(cpu);
+ unsigned long capacity = scale_rt_capacity(cpu);
+ struct sched_group *sdg = sd->groups;
++ struct rq *rq = cpu_rq(cpu);
+
+- cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
++ rq->cpu_capacity_orig = capacity_orig;
+
+ if (!capacity)
+ capacity = 1;
+
+- cpu_rq(cpu)->cpu_capacity = capacity;
+- trace_sched_cpu_capacity_tp(cpu_rq(cpu));
++ rq->cpu_capacity = capacity;
++
++ /*
++ * Detect if the performance domain is in capacity inversion state.
++ *
++ * Capacity inversion happens when another perf domain with equal or
++ * lower capacity_orig_of() ends up having higher capacity than this
++ * domain after subtracting thermal pressure.
++ *
++ * We only take into account thermal pressure in this detection as it's
++ * the only metric that actually results in *real* reduction of
++ * capacity due to performance points (OPPs) being dropped/become
++ * unreachable due to thermal throttling.
++ *
++ * We assume:
++ * * That all cpus in a perf domain have the same capacity_orig
++ * (same uArch).
++ * * Thermal pressure will impact all cpus in this perf domain
++ * equally.
++ */
++ if (sched_energy_enabled()) {
++ unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
++ struct perf_domain *pd;
++
++ rcu_read_lock();
++
++ pd = rcu_dereference(rq->rd->pd);
++ rq->cpu_capacity_inverted = 0;
++
++ for (; pd; pd = pd->next) {
++ struct cpumask *pd_span = perf_domain_span(pd);
++ unsigned long pd_cap_orig, pd_cap;
++
++ /* We can't be inverted against our own pd */
++ if (cpumask_test_cpu(cpu_of(rq), pd_span))
++ continue;
++
++ cpu = cpumask_any(pd_span);
++ pd_cap_orig = arch_scale_cpu_capacity(cpu);
++
++ if (capacity_orig < pd_cap_orig)
++ continue;
++
++ /*
++ * handle the case of multiple perf domains have the
++ * same capacity_orig but one of them is under higher
++ * thermal pressure. We record it as capacity
++ * inversion.
++ */
++ if (capacity_orig == pd_cap_orig) {
++ pd_cap = pd_cap_orig - thermal_load_avg(cpu_rq(cpu));
++
++ if (pd_cap > inv_cap) {
++ rq->cpu_capacity_inverted = inv_cap;
++ break;
++ }
++ } else if (pd_cap_orig > inv_cap) {
++ rq->cpu_capacity_inverted = inv_cap;
++ break;
++ }
++ }
++
++ rcu_read_unlock();
++ }
++
++ trace_sched_cpu_capacity_tp(rq);
+
+ sdg->sgc->capacity = capacity;
+ sdg->sgc->min_capacity = capacity;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index d6d488e8eb554..5f18460f62f0f 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1041,6 +1041,7 @@ struct rq {
+
+ unsigned long cpu_capacity;
+ unsigned long cpu_capacity_orig;
++ unsigned long cpu_capacity_inverted;
+
+ struct balance_callback *balance_callback;
+
+@@ -2878,6 +2879,24 @@ static inline unsigned long capacity_orig_of(int cpu)
+ return cpu_rq(cpu)->cpu_capacity_orig;
+ }
+
++/*
++ * Returns inverted capacity if the CPU is in capacity inversion state.
++ * 0 otherwise.
++ *
++ * Capacity inversion detection only considers thermal impact where actual
++ * performance points (OPPs) gets dropped.
++ *
++ * Capacity inversion state happens when another performance domain that has
++ * equal or lower capacity_orig_of() becomes effectively larger than the perf
++ * domain this CPU belongs to due to thermal pressure throttling it hard.
++ *
++ * See comment in update_cpu_capacity().
++ */
++static inline unsigned long cpu_in_capacity_inversion(int cpu)
++{
++ return cpu_rq(cpu)->cpu_capacity_inverted;
++}
++
+ /**
+ * enum cpu_util_type - CPU utilization type
+ * @FREQUENCY_UTIL: Utilization used to select frequency
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 88b31f096fb2d..c85e1abf7b7c7 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -664,6 +664,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
+ struct cred *new;
+ int retval;
+ kuid_t kruid, keuid, ksuid;
++ bool ruid_new, euid_new, suid_new;
+
+ kruid = make_kuid(ns, ruid);
+ keuid = make_kuid(ns, euid);
+@@ -678,25 +679,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
+ if ((suid != (uid_t) -1) && !uid_valid(ksuid))
+ return -EINVAL;
+
++ old = current_cred();
++
++ /* check for no-op */
++ if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
++ (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
++ uid_eq(keuid, old->fsuid))) &&
++ (suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
++ return 0;
++
++ ruid_new = ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
++ !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
++ euid_new = euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
++ !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
++ suid_new = suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
++ !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
++ if ((ruid_new || euid_new || suid_new) &&
++ !ns_capable_setid(old->user_ns, CAP_SETUID))
++ return -EPERM;
++
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+- old = current_cred();
+-
+- retval = -EPERM;
+- if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
+- if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
+- !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
+- goto error;
+- if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
+- !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
+- goto error;
+- if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
+- !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
+- goto error;
+- }
+-
+ if (ruid != (uid_t) -1) {
+ new->uid = kruid;
+ if (!uid_eq(kruid, old->uid)) {
+@@ -761,6 +766,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
+ struct cred *new;
+ int retval;
+ kgid_t krgid, kegid, ksgid;
++ bool rgid_new, egid_new, sgid_new;
+
+ krgid = make_kgid(ns, rgid);
+ kegid = make_kgid(ns, egid);
+@@ -773,23 +779,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
+ if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
+ return -EINVAL;
+
++ old = current_cred();
++
++ /* check for no-op */
++ if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
++ (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
++ gid_eq(kegid, old->fsgid))) &&
++ (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
++ return 0;
++
++ rgid_new = rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
++ !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
++ egid_new = egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
++ !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
++ sgid_new = sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
++ !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
++ if ((rgid_new || egid_new || sgid_new) &&
++ !ns_capable_setid(old->user_ns, CAP_SETGID))
++ return -EPERM;
++
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+- old = current_cred();
+-
+- retval = -EPERM;
+- if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
+- if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
+- !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
+- goto error;
+- if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
+- !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
+- goto error;
+- if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
+- !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
+- goto error;
+- }
+
+ if (rgid != (gid_t) -1)
+ new->gid = krgid;
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 39f34ea7a9be5..9fe25ce9937b8 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -1293,26 +1293,21 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+ node = mas->alloc;
+ node->request_count = 0;
+ while (requested) {
+- max_req = MAPLE_ALLOC_SLOTS;
+- if (node->node_count) {
+- unsigned int offset = node->node_count;
+-
+- slots = (void **)&node->slot[offset];
+- max_req -= offset;
+- } else {
+- slots = (void **)&node->slot;
+- }
+-
++ max_req = MAPLE_ALLOC_SLOTS - node->node_count;
++ slots = (void **)&node->slot[node->node_count];
+ max_req = min(requested, max_req);
+ count = mt_alloc_bulk(gfp, max_req, slots);
+ if (!count)
+ goto nomem_bulk;
+
++ if (node->node_count == 0) {
++ node->slot[0]->node_count = 0;
++ node->slot[0]->request_count = 0;
++ }
++
+ node->node_count += count;
+ allocated += count;
+ node = node->slot[0];
+- node->node_count = 0;
+- node->request_count = 0;
+ requested -= count;
+ }
+ mas->alloc->total = allocated;
+@@ -4968,7 +4963,8 @@ not_found:
+ * Return: True if found in a leaf, false otherwise.
+ *
+ */
+-static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
++static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
++ unsigned long *gap_min, unsigned long *gap_max)
+ {
+ enum maple_type type = mte_node_type(mas->node);
+ struct maple_node *node = mas_mn(mas);
+@@ -5033,8 +5029,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+
+ if (unlikely(ma_is_leaf(type))) {
+ mas->offset = offset;
+- mas->min = min;
+- mas->max = min + gap - 1;
++ *gap_min = min;
++ *gap_max = min + gap - 1;
+ return true;
+ }
+
+@@ -5058,10 +5054,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
+ {
+ enum maple_type type = mte_node_type(mas->node);
+ unsigned long pivot, min, gap = 0;
+- unsigned char offset;
+- unsigned long *gaps;
+- unsigned long *pivots = ma_pivots(mas_mn(mas), type);
+- void __rcu **slots = ma_slots(mas_mn(mas), type);
++ unsigned char offset, data_end;
++ unsigned long *gaps, *pivots;
++ void __rcu **slots;
++ struct maple_node *node;
+ bool found = false;
+
+ if (ma_is_dense(type)) {
+@@ -5069,13 +5065,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
+ return true;
+ }
+
+- gaps = ma_gaps(mte_to_node(mas->node), type);
++ node = mas_mn(mas);
++ pivots = ma_pivots(node, type);
++ slots = ma_slots(node, type);
++ gaps = ma_gaps(node, type);
+ offset = mas->offset;
+ min = mas_safe_min(mas, pivots, offset);
+- for (; offset < mt_slots[type]; offset++) {
+- pivot = mas_safe_pivot(mas, pivots, offset, type);
+- if (offset && !pivot)
+- break;
++ data_end = ma_data_end(node, type, pivots, mas->max);
++ for (; offset <= data_end; offset++) {
++ pivot = mas_logical_pivot(mas, pivots, offset, type);
+
+ /* Not within lower bounds */
+ if (mas->index > pivot)
+@@ -5310,6 +5308,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
+ unsigned long *pivots;
+ enum maple_type mt;
+
++ if (min >= max)
++ return -EINVAL;
++
+ if (mas_is_start(mas))
+ mas_start(mas);
+ else if (mas->offset >= 2)
+@@ -5364,6 +5365,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ {
+ struct maple_enode *last = mas->node;
+
++ if (min >= max)
++ return -EINVAL;
++
+ if (mas_is_start(mas)) {
+ mas_start(mas);
+ mas->offset = mas_data_end(mas);
+@@ -5383,7 +5387,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ mas->index = min;
+ mas->last = max;
+
+- while (!mas_rev_awalk(mas, size)) {
++ while (!mas_rev_awalk(mas, size, &min, &max)) {
+ if (last == mas->node) {
+ if (!mas_rewind_node(mas))
+ return -EBUSY;
+@@ -5398,17 +5402,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
+ return -EBUSY;
+
+- /*
+- * mas_rev_awalk() has set mas->min and mas->max to the gap values. If
+- * the maximum is outside the window we are searching, then use the last
+- * location in the search.
+- * mas->max and mas->min is the range of the gap.
+- * mas->index and mas->last are currently set to the search range.
+- */
+-
+ /* Trim the upper limit to the max. */
+- if (mas->max <= mas->last)
+- mas->last = mas->max;
++ if (max <= mas->last)
++ mas->last = max;
+
+ mas->index = mas->last - size + 1;
+ return 0;
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index c30419a5e1197..bf5525c2e561a 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -380,6 +380,15 @@ static LIST_HEAD(offline_cgwbs);
+ static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
+ static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);
+
++static void cgwb_free_rcu(struct rcu_head *rcu_head)
++{
++ struct bdi_writeback *wb = container_of(rcu_head,
++ struct bdi_writeback, rcu);
++
++ percpu_ref_exit(&wb->refcnt);
++ kfree(wb);
++}
++
+ static void cgwb_release_workfn(struct work_struct *work)
+ {
+ struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
+@@ -402,11 +411,10 @@ static void cgwb_release_workfn(struct work_struct *work)
+ list_del(&wb->offline_node);
+ spin_unlock_irq(&cgwb_lock);
+
+- percpu_ref_exit(&wb->refcnt);
+ wb_exit(wb);
+ bdi_put(bdi);
+ WARN_ON_ONCE(!list_empty(&wb->b_attached));
+- kfree_rcu(wb, rcu);
++ call_rcu(&wb->rcu, cgwb_free_rcu);
+ }
+
+ static void cgwb_release(struct percpu_ref *refcnt)
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index b8d654963df87..b20fef29e5bb5 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1805,10 +1805,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ if (is_swap_pmd(*pmd)) {
+ swp_entry_t entry = pmd_to_swp_entry(*pmd);
+ struct page *page = pfn_swap_entry_to_page(entry);
++ pmd_t newpmd;
+
+ VM_BUG_ON(!is_pmd_migration_entry(*pmd));
+ if (is_writable_migration_entry(entry)) {
+- pmd_t newpmd;
+ /*
+ * A protection check is difficult so
+ * just be safe and disable write
+@@ -1822,8 +1822,16 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ newpmd = pmd_swp_mksoft_dirty(newpmd);
+ if (pmd_swp_uffd_wp(*pmd))
+ newpmd = pmd_swp_mkuffd_wp(newpmd);
+- set_pmd_at(mm, addr, pmd, newpmd);
++ } else {
++ newpmd = *pmd;
+ }
++
++ if (uffd_wp)
++ newpmd = pmd_swp_mkuffd_wp(newpmd);
++ else if (uffd_wp_resolve)
++ newpmd = pmd_swp_clear_uffd_wp(newpmd);
++ if (!pmd_same(*pmd, newpmd))
++ set_pmd_at(mm, addr, pmd, newpmd);
+ goto unlock;
+ }
+ #endif
+@@ -2647,9 +2655,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+
+ is_hzp = is_huge_zero_page(&folio->page);
+- VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
+- if (is_hzp)
++ if (is_hzp) {
++ pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
+ return -EBUSY;
++ }
+
+ if (folio_test_writeback(folio))
+ return -EBUSY;
+@@ -3233,6 +3242,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+ pmdswp = swp_entry_to_pmd(entry);
+ if (pmd_soft_dirty(pmdval))
+ pmdswp = pmd_swp_mksoft_dirty(pmdswp);
++ if (pmd_uffd_wp(pmdval))
++ pmdswp = pmd_swp_mkuffd_wp(pmdswp);
+ set_pmd_at(mm, address, pvmw->pmd, pmdswp);
+ page_remove_rmap(page, vma, true);
+ put_page(page);
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 77a76bcf15f57..ef72d3df4b65b 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -561,6 +561,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
+ result = SCAN_PTE_NON_PRESENT;
+ goto out;
+ }
++ if (pte_uffd_wp(pteval)) {
++ result = SCAN_PTE_UFFD_WP;
++ goto out;
++ }
+ page = vm_normal_page(vma, address, pteval);
+ if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
+ result = SCAN_PAGE_NULL;
+diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
+index 3807502766a3e..ec0da72e65aa0 100644
+--- a/mm/kmsan/hooks.c
++++ b/mm/kmsan/hooks.c
+@@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
+ * into the virtual memory. If those physical pages already had shadow/origin,
+ * those are ignored.
+ */
+-void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
+- phys_addr_t phys_addr, pgprot_t prot,
+- unsigned int page_shift)
++int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
++ phys_addr_t phys_addr, pgprot_t prot,
++ unsigned int page_shift)
+ {
+ gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
+ struct page *shadow, *origin;
+ unsigned long off = 0;
+- int nr;
++ int nr, err = 0, clean = 0, mapped;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+- return;
++ return 0;
+
+ nr = (end - start) / PAGE_SIZE;
+ kmsan_enter_runtime();
+- for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
++ for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
+ shadow = alloc_pages(gfp_mask, 1);
+ origin = alloc_pages(gfp_mask, 1);
+- __vmap_pages_range_noflush(
++ if (!shadow || !origin) {
++ err = -ENOMEM;
++ goto ret;
++ }
++ mapped = __vmap_pages_range_noflush(
+ vmalloc_shadow(start + off),
+ vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
+ PAGE_SHIFT);
+- __vmap_pages_range_noflush(
++ if (mapped) {
++ err = mapped;
++ goto ret;
++ }
++ shadow = NULL;
++ mapped = __vmap_pages_range_noflush(
+ vmalloc_origin(start + off),
+ vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
+ PAGE_SHIFT);
++ if (mapped) {
++ __vunmap_range_noflush(
++ vmalloc_shadow(start + off),
++ vmalloc_shadow(start + off + PAGE_SIZE));
++ err = mapped;
++ goto ret;
++ }
++ origin = NULL;
++ }
++ /* Page mapping loop finished normally, nothing to clean up. */
++ clean = 0;
++
++ret:
++ if (clean > 0) {
++ /*
++ * Something went wrong. Clean up shadow/origin pages allocated
++ * on the last loop iteration, then delete mappings created
++ * during the previous iterations.
++ */
++ if (shadow)
++ __free_pages(shadow, 1);
++ if (origin)
++ __free_pages(origin, 1);
++ __vunmap_range_noflush(
++ vmalloc_shadow(start),
++ vmalloc_shadow(start + clean * PAGE_SIZE));
++ __vunmap_range_noflush(
++ vmalloc_origin(start),
++ vmalloc_origin(start + clean * PAGE_SIZE));
+ }
+ flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
+ flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
+ kmsan_leave_runtime();
++ return err;
+ }
+
+ void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
+diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
+index a787c04e9583c..b8bb95eea5e3d 100644
+--- a/mm/kmsan/shadow.c
++++ b/mm/kmsan/shadow.c
+@@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
+ kmsan_leave_runtime();
+ }
+
+-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+- pgprot_t prot, struct page **pages,
+- unsigned int page_shift)
++int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
++ pgprot_t prot, struct page **pages,
++ unsigned int page_shift)
+ {
+ unsigned long shadow_start, origin_start, shadow_end, origin_end;
+ struct page **s_pages, **o_pages;
+- int nr, mapped;
++ int nr, mapped, err = 0;
+
+ if (!kmsan_enabled)
+- return;
++ return 0;
+
+ shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
+ shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
+ if (!shadow_start)
+- return;
++ return 0;
+
+ nr = (end - start) / PAGE_SIZE;
+ s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
+ o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
+- if (!s_pages || !o_pages)
++ if (!s_pages || !o_pages) {
++ err = -ENOMEM;
+ goto ret;
++ }
+ for (int i = 0; i < nr; i++) {
+ s_pages[i] = shadow_page_for(pages[i]);
+ o_pages[i] = origin_page_for(pages[i]);
+@@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ kmsan_enter_runtime();
+ mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
+ s_pages, page_shift);
+- KMSAN_WARN_ON(mapped);
++ if (mapped) {
++ err = mapped;
++ goto ret;
++ }
+ mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
+ o_pages, page_shift);
+- KMSAN_WARN_ON(mapped);
++ if (mapped) {
++ err = mapped;
++ goto ret;
++ }
+ kmsan_leave_runtime();
+ flush_tlb_kernel_range(shadow_start, shadow_end);
+ flush_tlb_kernel_range(origin_start, origin_end);
+@@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ ret:
+ kfree(s_pages);
+ kfree(o_pages);
++ return err;
+ }
+
+ /* Allocate metadata for pages allocated at boot time. */
+diff --git a/mm/mmap.c b/mm/mmap.c
+index fe1db604dc49e..14ca259189b77 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1565,7 +1565,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
+ */
+ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+ {
+- unsigned long length, gap;
++ unsigned long length, gap, low_limit;
++ struct vm_area_struct *tmp;
+
+ MA_STATE(mas, &current->mm->mm_mt, 0, 0);
+
+@@ -1574,12 +1575,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+ if (length < info->length)
+ return -ENOMEM;
+
+- if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1,
+- length))
++ low_limit = info->low_limit;
++retry:
++ if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length))
+ return -ENOMEM;
+
+ gap = mas.index;
+ gap += (info->align_offset - gap) & info->align_mask;
++ tmp = mas_next(&mas, ULONG_MAX);
++ if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
++ if (vm_start_gap(tmp) < gap + length - 1) {
++ low_limit = tmp->vm_end;
++ mas_reset(&mas);
++ goto retry;
++ }
++ } else {
++ tmp = mas_prev(&mas, 0);
++ if (tmp && vm_end_gap(tmp) > gap) {
++ low_limit = vm_end_gap(tmp);
++ mas_reset(&mas);
++ goto retry;
++ }
++ }
++
+ return gap;
+ }
+
+@@ -1595,7 +1613,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+ */
+ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+ {
+- unsigned long length, gap;
++ unsigned long length, gap, high_limit, gap_end;
++ struct vm_area_struct *tmp;
+
+ MA_STATE(mas, &current->mm->mm_mt, 0, 0);
+ /* Adjust search length to account for worst case alignment overhead */
+@@ -1603,12 +1622,31 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+ if (length < info->length)
+ return -ENOMEM;
+
+- if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1,
++ high_limit = info->high_limit;
++retry:
++ if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1,
+ length))
+ return -ENOMEM;
+
+ gap = mas.last + 1 - info->length;
+ gap -= (gap - info->align_offset) & info->align_mask;
++ gap_end = mas.last;
++ tmp = mas_next(&mas, ULONG_MAX);
++ if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
++ if (vm_start_gap(tmp) <= gap_end) {
++ high_limit = vm_start_gap(tmp);
++ mas_reset(&mas);
++ goto retry;
++ }
++ } else {
++ tmp = mas_prev(&mas, 0);
++ if (tmp && vm_end_gap(tmp) > gap) {
++ high_limit = tmp->vm_start;
++ mas_reset(&mas);
++ goto retry;
++ }
++ }
++
+ return gap;
+ }
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 5cae089639848..69668817fed37 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6599,7 +6599,21 @@ static void __build_all_zonelists(void *data)
+ int nid;
+ int __maybe_unused cpu;
+ pg_data_t *self = data;
++ unsigned long flags;
+
++ /*
++ * Explicitly disable this CPU's interrupts before taking seqlock
++ * to prevent any IRQ handler from calling into the page allocator
++ * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock.
++ */
++ local_irq_save(flags);
++ /*
++ * Explicitly disable this CPU's synchronous printk() before taking
++ * seqlock to prevent any printk() from trying to hold port->lock, for
++ * tty_insert_flip_string_and_push_buffer() on other CPU might be
++ * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
++ */
++ printk_deferred_enter();
+ write_seqlock(&zonelist_update_seq);
+
+ #ifdef CONFIG_NUMA
+@@ -6638,6 +6652,8 @@ static void __build_all_zonelists(void *data)
+ }
+
+ write_sequnlock(&zonelist_update_seq);
++ printk_deferred_exit();
++ local_irq_restore(flags);
+ }
+
+ static noinline void __init
+@@ -9418,6 +9434,9 @@ static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
+
+ if (PageReserved(page))
+ return false;
++
++ if (PageHuge(page))
++ return false;
+ }
+ return true;
+ }
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index d606e53c650e5..d5dc361dc104d 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -321,8 +321,8 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
+ ioremap_max_page_shift);
+ flush_cache_vmap(addr, end);
+ if (!err)
+- kmsan_ioremap_page_range(addr, end, phys_addr, prot,
+- ioremap_max_page_shift);
++ err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
++ ioremap_max_page_shift);
+ return err;
+ }
+
+@@ -613,7 +613,11 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ pgprot_t prot, struct page **pages, unsigned int page_shift)
+ {
+- kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
++ int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
++ page_shift);
++
++ if (ret)
++ return ret;
+ return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
+ }
+
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 9554abcfd5b4e..812bd7e1750b6 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -868,12 +868,17 @@ static unsigned int ip_sabotage_in(void *priv,
+ {
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+- if (nf_bridge && !nf_bridge->in_prerouting &&
+- !netif_is_l3_master(skb->dev) &&
+- !netif_is_l3_slave(skb->dev)) {
+- nf_bridge_info_free(skb);
+- state->okfn(state->net, state->sk, skb);
+- return NF_STOLEN;
++ if (nf_bridge) {
++ if (nf_bridge->sabotage_in_done)
++ return NF_ACCEPT;
++
++ if (!nf_bridge->in_prerouting &&
++ !netif_is_l3_master(skb->dev) &&
++ !netif_is_l3_slave(skb->dev)) {
++ nf_bridge->sabotage_in_done = 1;
++ state->okfn(state->net, state->sk, skb);
++ return NF_STOLEN;
++ }
+ }
+
+ return NF_ACCEPT;
+diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
+index 8f3d76c751dd0..4b3982c368b35 100644
+--- a/net/bridge/br_switchdev.c
++++ b/net/bridge/br_switchdev.c
+@@ -146,6 +146,17 @@ br_switchdev_fdb_notify(struct net_bridge *br,
+ {
+ struct switchdev_notifier_fdb_info item;
+
++ /* Entries with these flags were created using ndm_state == NUD_REACHABLE,
++ * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
++ * equivalent to 'bridge fdb add ... master dynamic (sticky)'.
++ * Drivers don't know how to deal with these, so don't notify them to
++ * avoid confusing them.
++ */
++ if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
++ !test_bit(BR_FDB_STATIC, &fdb->flags) &&
++ !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
++ return;
++
+ br_switchdev_fdb_populate(br, &item, fdb, NULL);
+
+ switch (type) {
+diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
+index 7dfc00c9fb32b..9ddc3a9e89e40 100644
+--- a/net/dccp/dccp.h
++++ b/net/dccp/dccp.h
+@@ -278,6 +278,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct dccp_hdr *dh, const unsigned int len);
+
++void dccp_destruct_common(struct sock *sk);
+ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
+ void dccp_destroy_sock(struct sock *sk);
+
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 7a736c352dc4b..b9d7c3dd1cb39 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -1004,6 +1004,12 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
+ .sockaddr_len = sizeof(struct sockaddr_in6),
+ };
+
++static void dccp_v6_sk_destruct(struct sock *sk)
++{
++ dccp_destruct_common(sk);
++ inet6_sock_destruct(sk);
++}
++
+ /* NOTE: A lot of things set to zero explicitly by call to
+ * sk_alloc() so need not be done here.
+ */
+@@ -1016,17 +1022,12 @@ static int dccp_v6_init_sock(struct sock *sk)
+ if (unlikely(!dccp_v6_ctl_sock_initialized))
+ dccp_v6_ctl_sock_initialized = 1;
+ inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
++ sk->sk_destruct = dccp_v6_sk_destruct;
+ }
+
+ return err;
+ }
+
+-static void dccp_v6_destroy_sock(struct sock *sk)
+-{
+- dccp_destroy_sock(sk);
+- inet6_destroy_sock(sk);
+-}
+-
+ static struct timewait_sock_ops dccp6_timewait_sock_ops = {
+ .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
+ };
+@@ -1049,7 +1050,7 @@ static struct proto dccp_v6_prot = {
+ .accept = inet_csk_accept,
+ .get_port = inet_csk_get_port,
+ .shutdown = dccp_shutdown,
+- .destroy = dccp_v6_destroy_sock,
++ .destroy = dccp_destroy_sock,
+ .orphan_count = &dccp_orphan_count,
+ .max_header = MAX_DCCP_HEADER,
+ .obj_size = sizeof(struct dccp6_sock),
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 85e35c5e88902..a06b5641287a2 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -171,12 +171,18 @@ const char *dccp_packet_name(const int type)
+
+ EXPORT_SYMBOL_GPL(dccp_packet_name);
+
+-static void dccp_sk_destruct(struct sock *sk)
++void dccp_destruct_common(struct sock *sk)
+ {
+ struct dccp_sock *dp = dccp_sk(sk);
+
+ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+ dp->dccps_hc_tx_ccid = NULL;
++}
++EXPORT_SYMBOL_GPL(dccp_destruct_common);
++
++static void dccp_sk_destruct(struct sock *sk)
++{
++ dccp_destruct_common(sk);
+ inet_sock_destruct(sk);
+ }
+
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index fb1bf6eb0ff8e..b5309ae87fd79 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -114,6 +114,7 @@ void inet6_sock_destruct(struct sock *sk)
+ inet6_cleanup_sock(sk);
+ inet_sock_destruct(sk);
+ }
++EXPORT_SYMBOL_GPL(inet6_sock_destruct);
+
+ static int inet6_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 86c26e48d065a..808983bc2ec9f 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -23,11 +23,6 @@
+ #include <linux/bpf-cgroup.h>
+ #include <net/ping.h>
+
+-static void ping_v6_destroy(struct sock *sk)
+-{
+- inet6_destroy_sock(sk);
+-}
+-
+ /* Compatibility glue so we can support IPv6 when it's compiled as a module */
+ static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len)
+@@ -205,7 +200,6 @@ struct proto pingv6_prot = {
+ .owner = THIS_MODULE,
+ .init = ping_init_sock,
+ .close = ping_close,
+- .destroy = ping_v6_destroy,
+ .pre_connect = ping_v6_pre_connect,
+ .connect = ip6_datagram_connect_v6_only,
+ .disconnect = __udp_disconnect,
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 9ee1506e23ab1..4fc511bdf176c 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -1175,8 +1175,6 @@ static void raw6_destroy(struct sock *sk)
+ lock_sock(sk);
+ ip6_flush_pending_frames(sk);
+ release_sock(sk);
+-
+- inet6_destroy_sock(sk);
+ }
+
+ static int rawv6_init_sk(struct sock *sk)
+diff --git a/net/ipv6/rpl.c b/net/ipv6/rpl.c
+index 488aec9e1a74f..d1876f1922255 100644
+--- a/net/ipv6/rpl.c
++++ b/net/ipv6/rpl.c
+@@ -32,7 +32,8 @@ static void *ipv6_rpl_segdata_pos(const struct ipv6_rpl_sr_hdr *hdr, int i)
+ size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
+ unsigned char cmpre)
+ {
+- return (n * IPV6_PFXTAIL_LEN(cmpri)) + IPV6_PFXTAIL_LEN(cmpre);
++ return sizeof(struct ipv6_rpl_sr_hdr) + (n * IPV6_PFXTAIL_LEN(cmpri)) +
++ IPV6_PFXTAIL_LEN(cmpre);
+ }
+
+ void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index ea1ecf5fe947c..81afb40bfc0bb 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1951,12 +1951,6 @@ static int tcp_v6_init_sock(struct sock *sk)
+ return 0;
+ }
+
+-static void tcp_v6_destroy_sock(struct sock *sk)
+-{
+- tcp_v4_destroy_sock(sk);
+- inet6_destroy_sock(sk);
+-}
+-
+ #ifdef CONFIG_PROC_FS
+ /* Proc filesystem TCPv6 sock list dumping. */
+ static void get_openreq6(struct seq_file *seq,
+@@ -2149,7 +2143,7 @@ struct proto tcpv6_prot = {
+ .accept = inet_csk_accept,
+ .ioctl = tcp_ioctl,
+ .init = tcp_v6_init_sock,
+- .destroy = tcp_v6_destroy_sock,
++ .destroy = tcp_v4_destroy_sock,
+ .shutdown = tcp_shutdown,
+ .setsockopt = tcp_setsockopt,
+ .getsockopt = tcp_getsockopt,
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 17d721a6add72..0b8127988adb7 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1668,8 +1668,6 @@ void udpv6_destroy_sock(struct sock *sk)
+ udp_encap_disable();
+ }
+ }
+-
+- inet6_destroy_sock(sk);
+ }
+
+ /*
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 9db7f4f5a4414..5137ea1861ce2 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -257,8 +257,6 @@ static void l2tp_ip6_destroy_sock(struct sock *sk)
+
+ if (tunnel)
+ l2tp_tunnel_delete(tunnel);
+-
+- inet6_destroy_sock(sk);
+ }
+
+ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 19f35869a164a..b1bbb0b75a13c 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -3939,12 +3939,6 @@ static const struct proto_ops mptcp_v6_stream_ops = {
+
+ static struct proto mptcp_v6_prot;
+
+-static void mptcp_v6_destroy(struct sock *sk)
+-{
+- mptcp_destroy(sk);
+- inet6_destroy_sock(sk);
+-}
+-
+ static struct inet_protosw mptcp_v6_protosw = {
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_MPTCP,
+@@ -3960,7 +3954,6 @@ int __init mptcp_proto_v6_init(void)
+ mptcp_v6_prot = mptcp_prot;
+ strcpy(mptcp_v6_prot.name, "MPTCPv6");
+ mptcp_v6_prot.slab = NULL;
+- mptcp_v6_prot.destroy = mptcp_v6_destroy;
+ mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
+
+ err = proto_register(&mptcp_v6_prot, 1);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 1a9d759d0a026..12d815b9aa131 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3391,6 +3391,64 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
+ return 0;
+ }
+
++int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
++ const struct nft_set_iter *iter,
++ struct nft_set_elem *elem)
++{
++ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
++ struct nft_ctx *pctx = (struct nft_ctx *)ctx;
++ const struct nft_data *data;
++ int err;
++
++ if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
++ *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
++ return 0;
++
++ data = nft_set_ext_data(ext);
++ switch (data->verdict.code) {
++ case NFT_JUMP:
++ case NFT_GOTO:
++ pctx->level++;
++ err = nft_chain_validate(ctx, data->verdict.chain);
++ if (err < 0)
++ return err;
++ pctx->level--;
++ break;
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++struct nft_set_elem_catchall {
++ struct list_head list;
++ struct rcu_head rcu;
++ void *elem;
++};
++
++int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
++{
++ u8 genmask = nft_genmask_next(ctx->net);
++ struct nft_set_elem_catchall *catchall;
++ struct nft_set_elem elem;
++ struct nft_set_ext *ext;
++ int ret = 0;
++
++ list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
++ ext = nft_set_elem_ext(set, catchall->elem);
++ if (!nft_set_elem_active(ext, genmask))
++ continue;
++
++ elem.priv = catchall->elem;
++ ret = nft_setelem_validate(ctx, set, NULL, &elem);
++ if (ret < 0)
++ return ret;
++ }
++
++ return ret;
++}
++
+ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+ const struct nft_chain *chain,
+ const struct nlattr *nla);
+@@ -4695,12 +4753,6 @@ err_set_name:
+ return err;
+ }
+
+-struct nft_set_elem_catchall {
+- struct list_head list;
+- struct rcu_head rcu;
+- void *elem;
+-};
+-
+ static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
+ struct nft_set *set)
+ {
+@@ -5988,7 +6040,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ if (err < 0)
+ return err;
+
+- if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
++ if (((flags & NFT_SET_ELEM_CATCHALL) && nla[NFTA_SET_ELEM_KEY]) ||
++ (!(flags & NFT_SET_ELEM_CATCHALL) && !nla[NFTA_SET_ELEM_KEY]))
+ return -EINVAL;
+
+ if (flags != 0) {
+@@ -6980,7 +7033,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ }
+
+ if (nla[NFTA_OBJ_USERDATA]) {
+- obj->udata = nla_memdup(nla[NFTA_OBJ_USERDATA], GFP_KERNEL);
++ obj->udata = nla_memdup(nla[NFTA_OBJ_USERDATA], GFP_KERNEL_ACCOUNT);
+ if (obj->udata == NULL)
+ goto err_userdata;
+
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index dfae12759c7cd..d9ad1aa818564 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -198,37 +198,6 @@ nla_put_failure:
+ return -1;
+ }
+
+-static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
+- struct nft_set *set,
+- const struct nft_set_iter *iter,
+- struct nft_set_elem *elem)
+-{
+- const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+- struct nft_ctx *pctx = (struct nft_ctx *)ctx;
+- const struct nft_data *data;
+- int err;
+-
+- if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+- *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
+- return 0;
+-
+- data = nft_set_ext_data(ext);
+- switch (data->verdict.code) {
+- case NFT_JUMP:
+- case NFT_GOTO:
+- pctx->level++;
+- err = nft_chain_validate(ctx, data->verdict.chain);
+- if (err < 0)
+- return err;
+- pctx->level--;
+- break;
+- default:
+- break;
+- }
+-
+- return 0;
+-}
+-
+ static int nft_lookup_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **d)
+@@ -244,9 +213,12 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
+ iter.skip = 0;
+ iter.count = 0;
+ iter.err = 0;
+- iter.fn = nft_lookup_validate_setelem;
++ iter.fn = nft_setelem_validate;
+
+ priv->set->ops->walk(ctx, priv->set, &iter);
++ if (!iter.err)
++ iter.err = nft_set_catchall_validate(ctx, priv->set);
++
+ if (iter.err < 0)
+ return iter.err;
+
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index cf5ebe43b3b4e..02098a02943eb 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -421,15 +421,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ } else
+ weight = 1;
+
+- if (tb[TCA_QFQ_LMAX]) {
++ if (tb[TCA_QFQ_LMAX])
+ lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
+- if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
+- pr_notice("qfq: invalid max length %u\n", lmax);
+- return -EINVAL;
+- }
+- } else
++ else
+ lmax = psched_mtu(qdisc_dev(sch));
+
++ if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
++ pr_notice("qfq: invalid max length %u\n", lmax);
++ return -EINVAL;
++ }
++
+ inv_w = ONE_FP / weight;
+ weight = ONE_FP / inv_w;
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 507b2ad5ef7c7..17185200079d5 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -5102,13 +5102,17 @@ static void sctp_destroy_sock(struct sock *sk)
+ }
+
+ /* Triggered when there are no references on the socket anymore */
+-static void sctp_destruct_sock(struct sock *sk)
++static void sctp_destruct_common(struct sock *sk)
+ {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ /* Free up the HMAC transform. */
+ crypto_free_shash(sp->hmac);
++}
+
++static void sctp_destruct_sock(struct sock *sk)
++{
++ sctp_destruct_common(sk);
+ inet_sock_destruct(sk);
+ }
+
+@@ -9431,7 +9435,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ sctp_sk(newsk)->reuse = sp->reuse;
+
+ newsk->sk_shutdown = sk->sk_shutdown;
+- newsk->sk_destruct = sctp_destruct_sock;
++ newsk->sk_destruct = sk->sk_destruct;
+ newsk->sk_family = sk->sk_family;
+ newsk->sk_protocol = IPPROTO_SCTP;
+ newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+@@ -9666,11 +9670,20 @@ struct proto sctp_prot = {
+
+ #if IS_ENABLED(CONFIG_IPV6)
+
+-#include <net/transp_v6.h>
+-static void sctp_v6_destroy_sock(struct sock *sk)
++static void sctp_v6_destruct_sock(struct sock *sk)
++{
++ sctp_destruct_common(sk);
++ inet6_sock_destruct(sk);
++}
++
++static int sctp_v6_init_sock(struct sock *sk)
+ {
+- sctp_destroy_sock(sk);
+- inet6_destroy_sock(sk);
++ int ret = sctp_init_sock(sk);
++
++ if (!ret)
++ sk->sk_destruct = sctp_v6_destruct_sock;
++
++ return ret;
+ }
+
+ struct proto sctpv6_prot = {
+@@ -9680,8 +9693,8 @@ struct proto sctpv6_prot = {
+ .disconnect = sctp_disconnect,
+ .accept = sctp_accept,
+ .ioctl = sctp_ioctl,
+- .init = sctp_init_sock,
+- .destroy = sctp_v6_destroy_sock,
++ .init = sctp_v6_init_sock,
++ .destroy = sctp_destroy_sock,
+ .shutdown = sctp_shutdown,
+ .setsockopt = sctp_setsockopt,
+ .getsockopt = sctp_getsockopt,
+diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
+index 97ff086ba22e9..b6d1c12136de1 100644
+--- a/rust/kernel/print.rs
++++ b/rust/kernel/print.rs
+@@ -18,7 +18,11 @@ use crate::bindings;
+
+ // Called from `vsprintf` with format specifier `%pA`.
+ #[no_mangle]
+-unsafe fn rust_fmt_argument(buf: *mut c_char, end: *mut c_char, ptr: *const c_void) -> *mut c_char {
++unsafe extern "C" fn rust_fmt_argument(
++ buf: *mut c_char,
++ end: *mut c_char,
++ ptr: *const c_void,
++) -> *mut c_char {
+ use fmt::Write;
+ // SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`.
+ let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) };
+diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
+index e45ff220ae50f..2c4b4bac28f42 100644
+--- a/rust/kernel/str.rs
++++ b/rust/kernel/str.rs
+@@ -29,7 +29,7 @@ impl RawFormatter {
+ /// If `pos` is less than `end`, then the region between `pos` (inclusive) and `end`
+ /// (exclusive) must be valid for writes for the lifetime of the returned [`RawFormatter`].
+ pub(crate) unsafe fn from_ptrs(pos: *mut u8, end: *mut u8) -> Self {
+- // INVARIANT: The safety requierments guarantee the type invariants.
++ // INVARIANT: The safety requirements guarantee the type invariants.
+ Self {
+ beg: pos as _,
+ pos: pos as _,
+diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
+index 71d4a7c879008..c3e501451b41d 100644
+--- a/scripts/asn1_compiler.c
++++ b/scripts/asn1_compiler.c
+@@ -625,7 +625,7 @@ int main(int argc, char **argv)
+ p = strrchr(argv[1], '/');
+ p = p ? p + 1 : argv[1];
+ grammar_name = strdup(p);
+- if (!p) {
++ if (!grammar_name) {
+ perror(NULL);
+ exit(1);
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6a6c72b5ea26d..f70d6a33421d2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9468,6 +9468,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
+index 3b81a465814a1..05a7d1588d204 100644
+--- a/sound/soc/fsl/fsl_asrc_dma.c
++++ b/sound/soc/fsl/fsl_asrc_dma.c
+@@ -209,14 +209,19 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
+ be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
+ tmp_chan = be_chan;
+ }
+- if (!tmp_chan)
+- tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
++ if (!tmp_chan) {
++ tmp_chan = dma_request_chan(dev_be, tx ? "tx" : "rx");
++ if (IS_ERR(tmp_chan)) {
++ dev_err(dev, "failed to request DMA channel for Back-End\n");
++ return -EINVAL;
++ }
++ }
+
+ /*
+ * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
+ * peripheral, unlike SDMA channel that is allocated dynamically. So no
+ * need to configure dma_request and dma_request2, but get dma_chan of
+- * Back-End device directly via dma_request_slave_channel.
++ * Back-End device directly via dma_request_chan.
+ */
+ if (!asrc->use_edma) {
+ /* Get DMA request of Back-End */
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index df7c0bf372451..6d88af5b287fe 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -1541,7 +1541,7 @@ static const struct fsl_sai_soc_data fsl_sai_imx8qm_data = {
+ .use_imx_pcm = true,
+ .use_edma = true,
+ .fifo_depth = 64,
+- .pins = 1,
++ .pins = 4,
+ .reg_offset = 0,
+ .mclk0_is_mclk1 = false,
+ .flags = 0,
+diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
+index 8722bbd7fd3d7..26ffcbb6e30f4 100644
+--- a/sound/soc/sof/pm.c
++++ b/sound/soc/sof/pm.c
+@@ -183,6 +183,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
+ pm_message_t pm_state;
+ u32 target_state = snd_sof_dsp_power_target(sdev);
++ u32 old_state = sdev->dsp_power_state.state;
+ int ret;
+
+ /* do nothing if dsp suspend callback is not set */
+@@ -192,7 +193,12 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
+ return 0;
+
+- if (tplg_ops && tplg_ops->tear_down_all_pipelines)
++ /* we need to tear down pipelines only if the DSP hardware is
++ * active, which happens for PCI devices. if the device is
++ * suspended, it is brought back to full power and then
++ * suspended again
++ */
++ if (tplg_ops && tplg_ops->tear_down_all_pipelines && (old_state == SOF_DSP_PM_D0))
+ tplg_ops->tear_down_all_pipelines(sdev, false);
+
+ if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
+diff --git a/tools/testing/selftests/sigaltstack/current_stack_pointer.h b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+new file mode 100644
+index 0000000000000..ea9bdf3a90b16
+--- /dev/null
++++ b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+@@ -0,0 +1,23 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#if __alpha__
++register unsigned long sp asm("$30");
++#elif __arm__ || __aarch64__ || __csky__ || __m68k__ || __mips__ || __riscv
++register unsigned long sp asm("sp");
++#elif __i386__
++register unsigned long sp asm("esp");
++#elif __loongarch64
++register unsigned long sp asm("$sp");
++#elif __ppc__
++register unsigned long sp asm("r1");
++#elif __s390x__
++register unsigned long sp asm("%15");
++#elif __sh__
++register unsigned long sp asm("r15");
++#elif __x86_64__
++register unsigned long sp asm("rsp");
++#elif __XTENSA__
++register unsigned long sp asm("a1");
++#else
++#error "implement current_stack_pointer equivalent"
++#endif
+diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c
+index c53b070755b65..98d37cb744fb2 100644
+--- a/tools/testing/selftests/sigaltstack/sas.c
++++ b/tools/testing/selftests/sigaltstack/sas.c
+@@ -20,6 +20,7 @@
+ #include <sys/auxv.h>
+
+ #include "../kselftest.h"
++#include "current_stack_pointer.h"
+
+ #ifndef SS_AUTODISARM
+ #define SS_AUTODISARM (1U << 31)
+@@ -46,12 +47,6 @@ void my_usr1(int sig, siginfo_t *si, void *u)
+ stack_t stk;
+ struct stk_data *p;
+
+-#if __s390x__
+- register unsigned long sp asm("%15");
+-#else
+- register unsigned long sp asm("sp");
+-#endif
+-
+ if (sp < (unsigned long)sstack ||
+ sp >= (unsigned long)sstack + stack_size) {
+ ksft_exit_fail_msg("SP is not on sigaltstack\n");
+diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
+index ce860ab941629..58ebfe3924024 100644
+--- a/tools/vm/page_owner_sort.c
++++ b/tools/vm/page_owner_sort.c
+@@ -847,7 +847,7 @@ int main(int argc, char **argv)
+ if (cull & CULL_PID || filter & FILTER_PID)
+ fprintf(fout, ", PID %d", list[i].pid);
+ if (cull & CULL_TGID || filter & FILTER_TGID)
+- fprintf(fout, ", TGID %d", list[i].pid);
++ fprintf(fout, ", TGID %d", list[i].tgid);
+ if (cull & CULL_COMM || filter & FILTER_COMM)
+ fprintf(fout, ", task_comm_name: %s", list[i].comm);
+ if (cull & CULL_ALLOCATOR) {