summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2021-01-27 06:28:59 -0500
committerMike Pagano <mpagano@gentoo.org>2021-01-27 06:28:59 -0500
commit42b6a29af6c32b8480be206fc4489da99d58ab2c (patch)
treee6026a78fd88b51e54ee1e8467d13e268ba9053c
parentLinux patch 5.10.10 (diff)
downloadlinux-patches-42b6a29af6c32b8480be206fc4489da99d58ab2c.tar.gz
linux-patches-42b6a29af6c32b8480be206fc4489da99d58ab2c.tar.bz2
linux-patches-42b6a29af6c32b8480be206fc4489da99d58ab2c.zip
Linux patch 5.10.115.10-13
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1010_linux-5.10.11.patch7021
2 files changed, 7025 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 4ad6d695..fe8a7782 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch: 1009_linux-5.10.10.patch
From: http://www.kernel.org
Desc: Linux 5.10.10
+Patch: 1010_linux-5.10.11.patch
+From: http://www.kernel.org
+Desc: Linux 5.10.11
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1010_linux-5.10.11.patch b/1010_linux-5.10.11.patch
new file mode 100644
index 00000000..a4b4a65b
--- /dev/null
+++ b/1010_linux-5.10.11.patch
@@ -0,0 +1,7021 @@
+diff --git a/Documentation/ABI/testing/sysfs-class-devlink b/Documentation/ABI/testing/sysfs-class-devlink
+index b662f747c83eb..8a21ce515f61f 100644
+--- a/Documentation/ABI/testing/sysfs-class-devlink
++++ b/Documentation/ABI/testing/sysfs-class-devlink
+@@ -5,8 +5,8 @@ Description:
+ Provide a place in sysfs for the device link objects in the
+ kernel at any given time. The name of a device link directory,
+ denoted as ... above, is of the form <supplier>--<consumer>
+- where <supplier> is the supplier device name and <consumer> is
+- the consumer device name.
++ where <supplier> is the supplier bus:device name and <consumer>
++ is the consumer bus:device name.
+
+ What: /sys/class/devlink/.../auto_remove_on
+ Date: May 2020
+diff --git a/Documentation/ABI/testing/sysfs-devices-consumer b/Documentation/ABI/testing/sysfs-devices-consumer
+index 1f06d74d1c3cc..0809fda092e66 100644
+--- a/Documentation/ABI/testing/sysfs-devices-consumer
++++ b/Documentation/ABI/testing/sysfs-devices-consumer
+@@ -4,5 +4,6 @@ Contact: Saravana Kannan <saravanak@google.com>
+ Description:
+ The /sys/devices/.../consumer:<consumer> are symlinks to device
+ links where this device is the supplier. <consumer> denotes the
+- name of the consumer in that device link. There can be zero or
+- more of these symlinks for a given device.
++ name of the consumer in that device link and is of the form
++ bus:device name. There can be zero or more of these symlinks
++ for a given device.
+diff --git a/Documentation/ABI/testing/sysfs-devices-supplier b/Documentation/ABI/testing/sysfs-devices-supplier
+index a919e0db5e902..207f5972e98d8 100644
+--- a/Documentation/ABI/testing/sysfs-devices-supplier
++++ b/Documentation/ABI/testing/sysfs-devices-supplier
+@@ -4,5 +4,6 @@ Contact: Saravana Kannan <saravanak@google.com>
+ Description:
+ The /sys/devices/.../supplier:<supplier> are symlinks to device
+ links where this device is the consumer. <supplier> denotes the
+- name of the supplier in that device link. There can be zero or
+- more of these symlinks for a given device.
++ name of the supplier in that device link and is of the form
++ bus:device name. There can be zero or more of these symlinks
++ for a given device.
+diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst
+index 3ab4f7756a6e6..bf878c879afb6 100644
+--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
++++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
+@@ -177,14 +177,20 @@ bitmap_flush_interval:number
+ The bitmap flush interval in milliseconds. The metadata buffers
+ are synchronized when this interval expires.
+
++allow_discards
++ Allow block discard requests (a.k.a. TRIM) for the integrity device.
++ Discards are only allowed to devices using internal hash.
++
+ fix_padding
+ Use a smaller padding of the tag area that is more
+ space-efficient. If this option is not present, large padding is
+ used - that is for compatibility with older kernels.
+
+-allow_discards
+- Allow block discard requests (a.k.a. TRIM) for the integrity device.
+- Discards are only allowed to devices using internal hash.
++legacy_recalculate
++ Allow recalculating of volumes with HMAC keys. This is disabled by
++ default for security reasons - an attacker could modify the volume,
++ set recalc_sector to zero, and the kernel would not detect the
++ modification.
+
+ The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
+ allow_discards can be changed when reloading the target (load an inactive
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index f6a1513dfb76c..26bfe7ae711b8 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5965,6 +5965,10 @@
+ This option is obsoleted by the "nopv" option, which
+ has equivalent effect for XEN platform.
+
++ xen_no_vector_callback
++ [KNL,X86,XEN] Disable the vector callback for Xen
++ event channel interrupts.
++
+ xen_scrub_pages= [XEN]
+ Boolean option to control scrubbing pages before giving them back
+ to Xen, for use by other domains. Can be also changed at runtime
+diff --git a/Makefile b/Makefile
+index 7d86ad6ad36cc..7a5d906f6ee36 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index 60e901cd0de6a..5a957a9a09843 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -371,7 +371,7 @@ static int __init xen_guest_init(void)
+ }
+ gnttab_init();
+ if (!xen_initial_domain())
+- xenbus_probe(NULL);
++ xenbus_probe();
+
+ /*
+ * Making sure board specific code will not set up ops for
+diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
+index 015ddffaf6caa..b56a4b2bc2486 100644
+--- a/arch/arm64/include/asm/atomic.h
++++ b/arch/arm64/include/asm/atomic.h
+@@ -17,7 +17,7 @@
+ #include <asm/lse.h>
+
+ #define ATOMIC_OP(op) \
+-static inline void arch_##op(int i, atomic_t *v) \
++static __always_inline void arch_##op(int i, atomic_t *v) \
+ { \
+ __lse_ll_sc_body(op, i, v); \
+ }
+@@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
+ #undef ATOMIC_OP
+
+ #define ATOMIC_FETCH_OP(name, op) \
+-static inline int arch_##op##name(int i, atomic_t *v) \
++static __always_inline int arch_##op##name(int i, atomic_t *v) \
+ { \
+ return __lse_ll_sc_body(op##name, i, v); \
+ }
+@@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
+ #undef ATOMIC_FETCH_OPS
+
+ #define ATOMIC64_OP(op) \
+-static inline void arch_##op(long i, atomic64_t *v) \
++static __always_inline void arch_##op(long i, atomic64_t *v) \
+ { \
+ __lse_ll_sc_body(op, i, v); \
+ }
+@@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
+ #undef ATOMIC64_OP
+
+ #define ATOMIC64_FETCH_OP(name, op) \
+-static inline long arch_##op##name(long i, atomic64_t *v) \
++static __always_inline long arch_##op##name(long i, atomic64_t *v) \
+ { \
+ return __lse_ll_sc_body(op##name, i, v); \
+ }
+@@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
+ #undef ATOMIC64_FETCH_OP
+ #undef ATOMIC64_FETCH_OPS
+
+-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
++static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+ {
+ return __lse_ll_sc_body(atomic64_dec_if_positive, v);
+ }
+diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
+index a8184cad88907..50852992752b0 100644
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -914,13 +914,6 @@ static void do_signal(struct pt_regs *regs)
+ asmlinkage void do_notify_resume(struct pt_regs *regs,
+ unsigned long thread_flags)
+ {
+- /*
+- * The assembly code enters us with IRQs off, but it hasn't
+- * informed the tracing code of that for efficiency reasons.
+- * Update the trace code with the current status.
+- */
+- trace_hardirqs_off();
+-
+ do {
+ /* Check valid user FS if needed */
+ addr_limit_user_check();
+diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
+index f8f758e4a3064..6fa8cfb8232aa 100644
+--- a/arch/arm64/kernel/syscall.c
++++ b/arch/arm64/kernel/syscall.c
+@@ -165,15 +165,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
+ if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
+ local_daif_mask();
+ flags = current_thread_info()->flags;
+- if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
+- /*
+- * We're off to userspace, where interrupts are
+- * always enabled after we restore the flags from
+- * the SPSR.
+- */
+- trace_hardirqs_on();
++ if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
+ return;
+- }
+ local_daif_restore(DAIF_PROCCTX);
+ }
+
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index 1d32b174ab6ae..c1a8aac01cf91 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -63,6 +63,12 @@
+ nop; \
+ nop;
+
++#define SCV_ENTRY_FLUSH_SLOT \
++ SCV_ENTRY_FLUSH_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop;
++
+ /*
+ * r10 must be free to use, r13 must be paca
+ */
+@@ -70,6 +76,13 @@
+ STF_ENTRY_BARRIER_SLOT; \
+ ENTRY_FLUSH_SLOT
+
++/*
++ * r10, ctr must be free to use, r13 must be paca
++ */
++#define SCV_INTERRUPT_TO_KERNEL \
++ STF_ENTRY_BARRIER_SLOT; \
++ SCV_ENTRY_FLUSH_SLOT
++
+ /*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
+index fbd406cd6916c..8d100059e266c 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -221,6 +221,14 @@ label##3: \
+ FTR_ENTRY_OFFSET 957b-958b; \
+ .popsection;
+
++#define SCV_ENTRY_FLUSH_FIXUP_SECTION \
++957: \
++ .pushsection __scv_entry_flush_fixup,"a"; \
++ .align 2; \
++958: \
++ FTR_ENTRY_OFFSET 957b-958b; \
++ .popsection;
++
+ #define RFI_FLUSH_FIXUP_SECTION \
+ 951: \
+ .pushsection __rfi_flush_fixup,"a"; \
+@@ -254,10 +262,12 @@ label##3: \
+
+ extern long stf_barrier_fallback;
+ extern long entry_flush_fallback;
++extern long scv_entry_flush_fallback;
+ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
+ extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+ extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
+ extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
++extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+ extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+ extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 2f3846192ec7d..2831b0aa92b15 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -75,7 +75,7 @@ BEGIN_FTR_SECTION
+ bne .Ltabort_syscall
+ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+- INTERRUPT_TO_KERNEL
++ SCV_INTERRUPT_TO_KERNEL
+ mr r10,r1
+ ld r1,PACAKSAVE(r13)
+ std r10,0(r1)
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 4d01f09ecf808..3cde2fbd74fce 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -2993,6 +2993,25 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
+ ld r11,PACA_EXRFI+EX_R11(r13)
+ blr
+
++/*
++ * The SCV entry flush happens with interrupts enabled, so it must disable
++ * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
++ * (containing LR) does not need to be preserved here because scv entry
++ * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
++ */
++TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
++ li r10,0
++ mtmsrd r10,1
++ lbz r10,PACAIRQHAPPENED(r13)
++ ori r10,r10,PACA_IRQ_HARD_DIS
++ stb r10,PACAIRQHAPPENED(r13)
++ std r11,PACA_EXRFI+EX_R11(r13)
++ L1D_DISPLACEMENT_FLUSH
++ ld r11,PACA_EXRFI+EX_R11(r13)
++ li r10,MSR_RI
++ mtmsrd r10,1
++ blr
++
+ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ SET_SCRATCH0(r13);
+ GET_PACA(r13);
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index f887f9d5b9e84..4a1f494ef03f3 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -145,6 +145,13 @@ SECTIONS
+ __stop___entry_flush_fixup = .;
+ }
+
++ . = ALIGN(8);
++ __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
++ __start___scv_entry_flush_fixup = .;
++ *(__scv_entry_flush_fixup)
++ __stop___scv_entry_flush_fixup = .;
++ }
++
+ . = ALIGN(8);
+ __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
+ __start___stf_exit_barrier_fixup = .;
+@@ -187,6 +194,12 @@ SECTIONS
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+ _sinittext = .;
+ INIT_TEXT
++
++ /*
++ *.init.text might be RO so we must ensure this section ends on
++ * a page boundary.
++ */
++ . = ALIGN(PAGE_SIZE);
+ _einittext = .;
+ #ifdef CONFIG_PPC64
+ *(.tramp.ftrace.init);
+@@ -200,21 +213,9 @@ SECTIONS
+ EXIT_TEXT
+ }
+
+- .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
+- INIT_DATA
+- }
+-
+- .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
+- INIT_SETUP(16)
+- }
+-
+- .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
+- INIT_CALLS
+- }
++ . = ALIGN(PAGE_SIZE);
+
+- .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
+- CON_INITCALL
+- }
++ INIT_DATA_SECTION(16)
+
+ . = ALIGN(8);
+ __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
+@@ -242,9 +243,6 @@ SECTIONS
+ __stop___fw_ftr_fixup = .;
+ }
+ #endif
+- .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+- INIT_RAM_FS
+- }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index 321c12a9ef6b8..92705d6dfb6e0 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -290,9 +290,6 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
+ long *start, *end;
+ int i;
+
+- start = PTRRELOC(&__start___entry_flush_fixup);
+- end = PTRRELOC(&__stop___entry_flush_fixup);
+-
+ instrs[0] = 0x60000000; /* nop */
+ instrs[1] = 0x60000000; /* nop */
+ instrs[2] = 0x60000000; /* nop */
+@@ -312,6 +309,8 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
+ if (types & L1D_FLUSH_MTTRIG)
+ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+
++ start = PTRRELOC(&__start___entry_flush_fixup);
++ end = PTRRELOC(&__stop___entry_flush_fixup);
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+@@ -328,6 +327,25 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ }
+
++ start = PTRRELOC(&__start___scv_entry_flush_fixup);
++ end = PTRRELOC(&__stop___scv_entry_flush_fixup);
++ for (; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
++
++ if (types == L1D_FLUSH_FALLBACK)
++ patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
++ BRANCH_SET_LINK);
++ else
++ patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
++
++ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
++ }
++
++
+ printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
+ (types == L1D_FLUSH_NONE) ? "no" :
+ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index 44377fd7860e4..234a21d26f674 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -134,7 +134,7 @@ config PA_BITS
+
+ config PAGE_OFFSET
+ hex
+- default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
++ default 0xC0000000 if 32BIT && MAXPHYSMEM_1GB
+ default 0x80000000 if 64BIT && !MMU
+ default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
+ default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
+@@ -247,10 +247,12 @@ config MODULE_SECTIONS
+
+ choice
+ prompt "Maximum Physical Memory"
+- default MAXPHYSMEM_2GB if 32BIT
++ default MAXPHYSMEM_1GB if 32BIT
+ default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW
+ default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
+
++ config MAXPHYSMEM_1GB
++ bool "1GiB"
+ config MAXPHYSMEM_2GB
+ bool "2GiB"
+ config MAXPHYSMEM_128GB
+diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+index 4a2729f5ca3f0..24d75a146e02d 100644
+--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
++++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+@@ -88,7 +88,9 @@
+ phy-mode = "gmii";
+ phy-handle = <&phy0>;
+ phy0: ethernet-phy@0 {
++ compatible = "ethernet-phy-id0007.0771";
+ reg = <0>;
++ reset-gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
+index d222d353d86d4..8c3d1e4517031 100644
+--- a/arch/riscv/configs/defconfig
++++ b/arch/riscv/configs/defconfig
+@@ -64,6 +64,8 @@ CONFIG_HW_RANDOM=y
+ CONFIG_HW_RANDOM_VIRTIO=y
+ CONFIG_SPI=y
+ CONFIG_SPI_SIFIVE=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_SIFIVE=y
+ # CONFIG_PTP_1588_CLOCK is not set
+ CONFIG_POWER_RESET=y
+ CONFIG_DRM=y
+diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
+index de59dd457b415..d867813570442 100644
+--- a/arch/riscv/kernel/cacheinfo.c
++++ b/arch/riscv/kernel/cacheinfo.c
+@@ -26,7 +26,16 @@ cache_get_priv_group(struct cacheinfo *this_leaf)
+
+ static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
+ {
+- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(smp_processor_id());
++ /*
++ * Using raw_smp_processor_id() elides a preemptability check, but this
++ * is really indicative of a larger problem: the cacheinfo UABI assumes
++ * that cores have a homonogenous view of the cache hierarchy. That
++ * happens to be the case for the current set of RISC-V systems, but
++ * likely won't be true in general. Since there's no way to provide
++ * correct information for these systems via the current UABI we're
++ * just eliding the check for now.
++ */
++ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
+ struct cacheinfo *this_leaf;
+ int index;
+
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 835e45bb59c40..744f3209c48d0 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -155,6 +155,15 @@ skip_context_tracking:
+ tail do_trap_unknown
+
+ handle_syscall:
++#ifdef CONFIG_RISCV_M_MODE
++ /*
++ * When running is M-Mode (no MMU config), MPIE does not get set.
++ * As a result, we need to force enable interrupts here because
++ * handle_exception did not do set SR_IE as it always sees SR_PIE
++ * being cleared.
++ */
++ csrs CSR_STATUS, SR_IE
++#endif
+ #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
+ /* Recover a0 - a7 for system calls */
+ REG_L a0, PT_A0(sp)
+diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
+index 4d3a1048ad8b1..8a5cf99c07762 100644
+--- a/arch/riscv/kernel/time.c
++++ b/arch/riscv/kernel/time.c
+@@ -4,6 +4,7 @@
+ * Copyright (C) 2017 SiFive
+ */
+
++#include <linux/of_clk.h>
+ #include <linux/clocksource.h>
+ #include <linux/delay.h>
+ #include <asm/sbi.h>
+@@ -24,6 +25,8 @@ void __init time_init(void)
+ riscv_timebase = prop;
+
+ lpj_fine = riscv_timebase / HZ;
++
++ of_clk_init(NULL);
+ timer_probe();
+ }
+
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index e4133c20744ce..608082fb9a6c6 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -155,9 +155,10 @@ disable:
+ void __init setup_bootmem(void)
+ {
+ phys_addr_t mem_start = 0;
+- phys_addr_t start, end = 0;
++ phys_addr_t start, dram_end, end = 0;
+ phys_addr_t vmlinux_end = __pa_symbol(&_end);
+ phys_addr_t vmlinux_start = __pa_symbol(&_start);
++ phys_addr_t max_mapped_addr = __pa(~(ulong)0);
+ u64 i;
+
+ /* Find the memory region containing the kernel */
+@@ -179,7 +180,18 @@ void __init setup_bootmem(void)
+ /* Reserve from the start of the kernel to the end of the kernel */
+ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
+
+- max_pfn = PFN_DOWN(memblock_end_of_DRAM());
++ dram_end = memblock_end_of_DRAM();
++
++ /*
++ * memblock allocator is not aware of the fact that last 4K bytes of
++ * the addressable memory can not be mapped because of IS_ERR_VALUE
++ * macro. Make sure that last 4k bytes are not usable by memblock
++ * if end of dram is equal to maximum addressable memory.
++ */
++ if (max_mapped_addr == (dram_end - 1))
++ memblock_set_current_limit(max_mapped_addr - 4096);
++
++ max_pfn = PFN_DOWN(dram_end);
+ max_low_pfn = max_pfn;
+ set_max_mapnr(max_low_pfn);
+
+diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
+index 159da4ed578f2..b6f3d49991d37 100644
+--- a/arch/sh/Kconfig
++++ b/arch/sh/Kconfig
+@@ -30,7 +30,6 @@ config SUPERH
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
+- select HAVE_COPY_THREAD_TLS
+ select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DYNAMIC_FTRACE
+diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
+index d0de378beefe5..7d54f284ce10f 100644
+--- a/arch/sh/drivers/dma/Kconfig
++++ b/arch/sh/drivers/dma/Kconfig
+@@ -63,8 +63,7 @@ config PVR2_DMA
+
+ config G2_DMA
+ tristate "G2 Bus DMA support"
+- depends on SH_DREAMCAST
+- select SH_DMA_API
++ depends on SH_DREAMCAST && SH_DMA_API
+ help
+ This enables support for the DMA controller for the Dreamcast's
+ G2 bus. Drivers that want this will generally enable this on
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 870efeec8bdac..94c6e6330e043 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -73,10 +73,8 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
+ unsigned int nr)
+ {
+ if (likely(nr < IA32_NR_syscalls)) {
+- instrumentation_begin();
+ nr = array_index_nospec(nr, IA32_NR_syscalls);
+ regs->ax = ia32_sys_call_table[nr](regs);
+- instrumentation_end();
+ }
+ }
+
+@@ -91,8 +89,11 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
+ * or may not be necessary, but it matches the old asm behavior.
+ */
+ nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
++ instrumentation_begin();
+
+ do_syscall_32_irqs_on(regs, nr);
++
++ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+ }
+
+@@ -121,11 +122,12 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
+ res = get_user(*(u32 *)&regs->bp,
+ (u32 __user __force *)(unsigned long)(u32)regs->sp);
+ }
+- instrumentation_end();
+
+ if (res) {
+ /* User code screwed up. */
+ regs->ax = -EFAULT;
++
++ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+ return false;
+ }
+@@ -135,6 +137,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
+
+ /* Now this is just like a normal syscall. */
+ do_syscall_32_irqs_on(regs, nr);
++
++ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+ return true;
+ }
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index 6fb8cb7b9bcc6..6375967a8244d 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -16,6 +16,7 @@
+ #include <asm/hyperv-tlfs.h>
+ #include <asm/mshyperv.h>
+ #include <asm/idtentry.h>
++#include <linux/kexec.h>
+ #include <linux/version.h>
+ #include <linux/vmalloc.h>
+ #include <linux/mm.h>
+@@ -26,6 +27,8 @@
+ #include <linux/syscore_ops.h>
+ #include <clocksource/hyperv_timer.h>
+
++int hyperv_init_cpuhp;
++
+ void *hv_hypercall_pg;
+ EXPORT_SYMBOL_GPL(hv_hypercall_pg);
+
+@@ -424,6 +427,7 @@ void __init hyperv_init(void)
+
+ register_syscore_ops(&hv_syscore_ops);
+
++ hyperv_init_cpuhp = cpuhp;
+ return;
+
+ remove_cpuhp_state:
+diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
+index dcd9503b10983..38f4936045ab6 100644
+--- a/arch/x86/include/asm/fpu/api.h
++++ b/arch/x86/include/asm/fpu/api.h
+@@ -16,14 +16,25 @@
+ * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
+ * disables preemption so be careful if you intend to use it for long periods
+ * of time.
+- * If you intend to use the FPU in softirq you need to check first with
++ * If you intend to use the FPU in irq/softirq you need to check first with
+ * irq_fpu_usable() if it is possible.
+ */
+-extern void kernel_fpu_begin(void);
++
++/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
++#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
++#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
++
++extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
+ extern void kernel_fpu_end(void);
+ extern bool irq_fpu_usable(void);
+ extern void fpregs_mark_activate(void);
+
++/* Code that is unaware of kernel_fpu_begin_mask() can use this */
++static inline void kernel_fpu_begin(void)
++{
++ kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
++}
++
+ /*
+ * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
+ * A context switch will (and softirq might) save CPU's FPU registers to
+diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
+index ffc289992d1b0..30f76b9668579 100644
+--- a/arch/x86/include/asm/mshyperv.h
++++ b/arch/x86/include/asm/mshyperv.h
+@@ -74,6 +74,8 @@ static inline void hv_disable_stimer0_percpu_irq(int irq) {}
+
+
+ #if IS_ENABLED(CONFIG_HYPERV)
++extern int hyperv_init_cpuhp;
++
+ extern void *hv_hypercall_pg;
+ extern void __percpu **hyperv_pcpu_input_arg;
+
+diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
+index f4234575f3fdb..1f6caceccbb02 100644
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
+ #define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
+ #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
+
++extern unsigned int __max_die_per_package;
++
+ #ifdef CONFIG_SMP
+ #define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
+ #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
+@@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
+ extern unsigned int __max_logical_packages;
+ #define topology_max_packages() (__max_logical_packages)
+
+-extern unsigned int __max_die_per_package;
+-
+ static inline int topology_max_die_per_package(void)
+ {
+ return __max_die_per_package;
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 2f1fbd8150af7..a2551b10780c6 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -569,12 +569,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ u32 ecx;
+
+ ecx = cpuid_ecx(0x8000001e);
+- nodes_per_socket = ((ecx >> 8) & 7) + 1;
++ __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
+ } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
+ u64 value;
+
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+- nodes_per_socket = ((value >> 3) & 7) + 1;
++ __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
+ }
+
+ if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 05ef1f4550cbd..6cc50ab07bded 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -135,14 +135,32 @@ static void hv_machine_shutdown(void)
+ {
+ if (kexec_in_progress && hv_kexec_handler)
+ hv_kexec_handler();
++
++ /*
++ * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
++ * corrupts the old VP Assist Pages and can crash the kexec kernel.
++ */
++ if (kexec_in_progress && hyperv_init_cpuhp > 0)
++ cpuhp_remove_state(hyperv_init_cpuhp);
++
++ /* The function calls stop_other_cpus(). */
+ native_machine_shutdown();
++
++ /* Disable the hypercall page when there is only 1 active CPU. */
++ if (kexec_in_progress)
++ hyperv_cleanup();
+ }
+
+ static void hv_machine_crash_shutdown(struct pt_regs *regs)
+ {
+ if (hv_crash_handler)
+ hv_crash_handler(regs);
++
++ /* The function calls crash_smp_send_stop(). */
+ native_machine_crash_shutdown(regs);
++
++ /* Disable the hypercall page when there is only 1 active CPU. */
++ hyperv_cleanup();
+ }
+ #endif /* CONFIG_KEXEC_CORE */
+ #endif /* CONFIG_HYPERV */
+diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
+index d3a0791bc052a..91288da295995 100644
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -25,10 +25,10 @@
+ #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
+ #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
+
+-#ifdef CONFIG_SMP
+ unsigned int __max_die_per_package __read_mostly = 1;
+ EXPORT_SYMBOL(__max_die_per_package);
+
++#ifdef CONFIG_SMP
+ /*
+ * Check if given CPUID extended toplogy "leaf" is implemented
+ */
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index eb86a2b831b15..571220ac8beaa 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -121,7 +121,7 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
+ }
+ EXPORT_SYMBOL(copy_fpregs_to_fpstate);
+
+-void kernel_fpu_begin(void)
++void kernel_fpu_begin_mask(unsigned int kfpu_mask)
+ {
+ preempt_disable();
+
+@@ -141,13 +141,14 @@ void kernel_fpu_begin(void)
+ }
+ __cpu_invalidate_fpregs_state();
+
+- if (boot_cpu_has(X86_FEATURE_XMM))
++ /* Put sane initial values into the control registers. */
++ if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
+ ldmxcsr(MXCSR_DEFAULT);
+
+- if (boot_cpu_has(X86_FEATURE_FPU))
++ if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
+ asm volatile ("fninit");
+ }
+-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
++EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
+
+ void kernel_fpu_end(void)
+ {
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 84f581c91db45..098015b739993 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -665,17 +665,6 @@ static void __init trim_platform_memory_ranges(void)
+
+ static void __init trim_bios_range(void)
+ {
+- /*
+- * A special case is the first 4Kb of memory;
+- * This is a BIOS owned area, not kernel ram, but generally
+- * not listed as such in the E820 table.
+- *
+- * This typically reserves additional memory (64KiB by default)
+- * since some BIOSes are known to corrupt low memory. See the
+- * Kconfig help text for X86_RESERVE_LOW.
+- */
+- e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
+-
+ /*
+ * special case: Some BIOSes report the PC BIOS
+ * area (640Kb -> 1Mb) as RAM even though it is not.
+@@ -733,6 +722,15 @@ early_param("reservelow", parse_reservelow);
+
+ static void __init trim_low_memory_range(void)
+ {
++ /*
++ * A special case is the first 4Kb of memory;
++ * This is a BIOS owned area, not kernel ram, but generally
++ * not listed as such in the E820 table.
++ *
++ * This typically reserves additional memory (64KiB by default)
++ * since some BIOSes are known to corrupt low memory. See the
++ * Kconfig help text for X86_RESERVE_LOW.
++ */
+ memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
+ }
+
+diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
+index 0bd1a0fc587e0..84c1821819afb 100644
+--- a/arch/x86/kernel/sev-es.c
++++ b/arch/x86/kernel/sev-es.c
+@@ -225,7 +225,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
+ return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
+ }
+
+-static inline void sev_es_wr_ghcb_msr(u64 val)
++static __always_inline void sev_es_wr_ghcb_msr(u64 val)
+ {
+ u32 low, high;
+
+@@ -286,6 +286,12 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
+ u16 d2;
+ u8 d1;
+
++ /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
++ if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
++ memcpy(dst, buf, size);
++ return ES_OK;
++ }
++
+ switch (size) {
+ case 1:
+ memcpy(&d1, buf, 1);
+@@ -335,6 +341,12 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ u16 d2;
+ u8 d1;
+
++ /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
++ if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
++ memcpy(buf, src, size);
++ return ES_OK;
++ }
++
+ switch (size) {
+ case 1:
+ if (get_user(d1, s))
+diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
+index 4321fa02e18df..419365c48b2ad 100644
+--- a/arch/x86/lib/mmx_32.c
++++ b/arch/x86/lib/mmx_32.c
+@@ -26,6 +26,16 @@
+ #include <asm/fpu/api.h>
+ #include <asm/asm.h>
+
++/*
++ * Use KFPU_387. MMX instructions are not affected by MXCSR,
++ * but both AMD and Intel documentation states that even integer MMX
++ * operations will result in #MF if an exception is pending in FCW.
++ *
++ * EMMS is not needed afterwards because, after calling kernel_fpu_end(),
++ * any subsequent user of the 387 stack will reinitialize it using
++ * KFPU_387.
++ */
++
+ void *_mmx_memcpy(void *to, const void *from, size_t len)
+ {
+ void *p;
+@@ -37,7 +47,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
+ p = to;
+ i = len >> 6; /* len/64 */
+
+- kernel_fpu_begin();
++ kernel_fpu_begin_mask(KFPU_387);
+
+ __asm__ __volatile__ (
+ "1: prefetch (%0)\n" /* This set is 28 bytes */
+@@ -127,7 +137,7 @@ static void fast_clear_page(void *page)
+ {
+ int i;
+
+- kernel_fpu_begin();
++ kernel_fpu_begin_mask(KFPU_387);
+
+ __asm__ __volatile__ (
+ " pxor %%mm0, %%mm0\n" : :
+@@ -160,7 +170,7 @@ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
+
+- kernel_fpu_begin();
++ kernel_fpu_begin_mask(KFPU_387);
+
+ /*
+ * maybe the prefetch stuff can go before the expensive fnsave...
+@@ -247,7 +257,7 @@ static void fast_clear_page(void *page)
+ {
+ int i;
+
+- kernel_fpu_begin();
++ kernel_fpu_begin_mask(KFPU_387);
+
+ __asm__ __volatile__ (
+ " pxor %%mm0, %%mm0\n" : :
+@@ -282,7 +292,7 @@ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
+
+- kernel_fpu_begin();
++ kernel_fpu_begin_mask(KFPU_387);
+
+ __asm__ __volatile__ (
+ "1: prefetch (%0)\n"
+diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
+index 9e87ab010c82b..ec50b7423a4c8 100644
+--- a/arch/x86/xen/enlighten_hvm.c
++++ b/arch/x86/xen/enlighten_hvm.c
+@@ -188,6 +188,8 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
+ return 0;
+ }
+
++static bool no_vector_callback __initdata;
++
+ static void __init xen_hvm_guest_init(void)
+ {
+ if (xen_pv_domain())
+@@ -207,7 +209,7 @@ static void __init xen_hvm_guest_init(void)
+
+ xen_panic_handler_init();
+
+- if (xen_feature(XENFEAT_hvm_callback_vector))
++ if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector))
+ xen_have_vector_callback = 1;
+
+ xen_hvm_smp_init();
+@@ -233,6 +235,13 @@ static __init int xen_parse_nopv(char *arg)
+ }
+ early_param("xen_nopv", xen_parse_nopv);
+
++static __init int xen_parse_no_vector_callback(char *arg)
++{
++ no_vector_callback = true;
++ return 0;
++}
++early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
++
+ bool __init xen_hvm_need_lapic(void)
+ {
+ if (xen_pv_domain())
+diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
+index f5e7db4f82abb..6ff3c887e0b99 100644
+--- a/arch/x86/xen/smp_hvm.c
++++ b/arch/x86/xen/smp_hvm.c
+@@ -33,9 +33,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
+ int cpu;
+
+ native_smp_prepare_cpus(max_cpus);
+- WARN_ON(xen_smp_intr_init(0));
+
+- xen_init_lock_cpu(0);
++ if (xen_have_vector_callback) {
++ WARN_ON(xen_smp_intr_init(0));
++ xen_init_lock_cpu(0);
++ }
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == 0)
+@@ -50,9 +52,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
+ static void xen_hvm_cpu_die(unsigned int cpu)
+ {
+ if (common_cpu_die(cpu) == 0) {
+- xen_smp_intr_free(cpu);
+- xen_uninit_lock_cpu(cpu);
+- xen_teardown_timer(cpu);
++ if (xen_have_vector_callback) {
++ xen_smp_intr_free(cpu);
++ xen_uninit_lock_cpu(cpu);
++ xen_teardown_timer(cpu);
++ }
+ }
+ }
+ #else
+@@ -64,14 +68,19 @@ static void xen_hvm_cpu_die(unsigned int cpu)
+
+ void __init xen_hvm_smp_init(void)
+ {
+- if (!xen_have_vector_callback)
++ smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
++ smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
++ smp_ops.smp_cpus_done = xen_smp_cpus_done;
++ smp_ops.cpu_die = xen_hvm_cpu_die;
++
++ if (!xen_have_vector_callback) {
++#ifdef CONFIG_PARAVIRT_SPINLOCKS
++ nopvspin = true;
++#endif
+ return;
++ }
+
+- smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+ smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
+- smp_ops.cpu_die = xen_hvm_cpu_die;
+ smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
+ smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
+- smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
+- smp_ops.smp_cpus_done = xen_smp_cpus_done;
+ }
+diff --git a/crypto/xor.c b/crypto/xor.c
+index eacbf4f939900..8f899f898ec9f 100644
+--- a/crypto/xor.c
++++ b/crypto/xor.c
+@@ -107,6 +107,8 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
+ preempt_enable();
+
+ // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
++ if (!min)
++ min = 1;
+ speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
+ tmpl->speed = speed;
+
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index f23ef508fe88c..dca5cc423cd41 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -586,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
+ if (!device)
+ return -EINVAL;
+
++ *device = NULL;
++
+ status = acpi_get_data_full(handle, acpi_scan_drop_device,
+ (void **)device, callback);
+ if (ACPI_FAILURE(status) || !*device) {
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index a6187f6380d8d..96f73aaf71da3 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -115,6 +115,16 @@ int device_links_read_lock_held(void)
+ #endif
+ #endif /* !CONFIG_SRCU */
+
++static bool device_is_ancestor(struct device *dev, struct device *target)
++{
++ while (target->parent) {
++ target = target->parent;
++ if (dev == target)
++ return true;
++ }
++ return false;
++}
++
+ /**
+ * device_is_dependent - Check if one device depends on another one
+ * @dev: Device to check dependencies for.
+@@ -128,7 +138,12 @@ int device_is_dependent(struct device *dev, void *target)
+ struct device_link *link;
+ int ret;
+
+- if (dev == target)
++ /*
++ * The "ancestors" check is needed to catch the case when the target
++ * device has not been completely initialized yet and it is still
++ * missing from the list of children of its parent device.
++ */
++ if (dev == target || device_is_ancestor(dev, target))
+ return 1;
+
+ ret = device_for_each_child(dev, target, device_is_dependent);
+@@ -363,7 +378,9 @@ static int devlink_add_symlinks(struct device *dev,
+ struct device *con = link->consumer;
+ char *buf;
+
+- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
++ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
++ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
++ len += strlen(":");
+ len += strlen("supplier:") + 1;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+@@ -377,12 +394,12 @@ static int devlink_add_symlinks(struct device *dev,
+ if (ret)
+ goto err_con;
+
+- snprintf(buf, len, "consumer:%s", dev_name(con));
++ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
+ if (ret)
+ goto err_con_dev;
+
+- snprintf(buf, len, "supplier:%s", dev_name(sup));
++ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
+ if (ret)
+ goto err_sup_dev;
+@@ -390,7 +407,7 @@ static int devlink_add_symlinks(struct device *dev,
+ goto out;
+
+ err_sup_dev:
+- snprintf(buf, len, "consumer:%s", dev_name(con));
++ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ sysfs_remove_link(&sup->kobj, buf);
+ err_con_dev:
+ sysfs_remove_link(&link->link_dev.kobj, "consumer");
+@@ -413,7 +430,9 @@ static void devlink_remove_symlinks(struct device *dev,
+ sysfs_remove_link(&link->link_dev.kobj, "consumer");
+ sysfs_remove_link(&link->link_dev.kobj, "supplier");
+
+- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
++ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
++ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
++ len += strlen(":");
+ len += strlen("supplier:") + 1;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf) {
+@@ -421,9 +440,9 @@ static void devlink_remove_symlinks(struct device *dev,
+ return;
+ }
+
+- snprintf(buf, len, "supplier:%s", dev_name(sup));
++ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ sysfs_remove_link(&con->kobj, buf);
+- snprintf(buf, len, "consumer:%s", dev_name(con));
++ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ sysfs_remove_link(&sup->kobj, buf);
+ kfree(buf);
+ }
+@@ -633,8 +652,9 @@ struct device_link *device_link_add(struct device *consumer,
+
+ link->link_dev.class = &devlink_class;
+ device_set_pm_not_required(&link->link_dev);
+- dev_set_name(&link->link_dev, "%s--%s",
+- dev_name(supplier), dev_name(consumer));
++ dev_set_name(&link->link_dev, "%s:%s--%s:%s",
++ dev_bus_name(supplier), dev_name(supplier),
++ dev_bus_name(consumer), dev_name(consumer));
+ if (device_register(&link->link_dev)) {
+ put_device(consumer);
+ put_device(supplier);
+@@ -1652,9 +1672,7 @@ const char *dev_driver_string(const struct device *dev)
+ * never change once they are set, so they don't need special care.
+ */
+ drv = READ_ONCE(dev->driver);
+- return drv ? drv->name :
+- (dev->bus ? dev->bus->name :
+- (dev->class ? dev->class->name : ""));
++ return drv ? drv->name : dev_bus_name(dev);
+ }
+ EXPORT_SYMBOL(dev_driver_string);
+
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 148e81969e046..3c94ebc8d4bb0 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -612,6 +612,8 @@ dev_groups_failed:
+ else if (drv->remove)
+ drv->remove(dev);
+ probe_failed:
++ kfree(dev->dma_range_map);
++ dev->dma_range_map = NULL;
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
+index 37244a7e68c22..9cf249c344d9e 100644
+--- a/drivers/clk/tegra/clk-tegra30.c
++++ b/drivers/clk/tegra/clk-tegra30.c
+@@ -1256,6 +1256,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
+ { TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
++ { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
++ { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
+ /* must be the last entry */
+ { TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
+ };
+diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
+index a60aee1a1a291..65df9ef5b5bc0 100644
+--- a/drivers/counter/ti-eqep.c
++++ b/drivers/counter/ti-eqep.c
+@@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
+ return len;
+ }
+
+-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
+- struct counter_count *count,
+- void *ext_priv, char *buf)
+-{
+- struct ti_eqep_cnt *priv = counter->priv;
+- u32 qposinit;
+-
+- regmap_read(priv->regmap32, QPOSINIT, &qposinit);
+-
+- return sprintf(buf, "%u\n", qposinit);
+-}
+-
+-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
+- struct counter_count *count,
+- void *ext_priv, const char *buf,
+- size_t len)
+-{
+- struct ti_eqep_cnt *priv = counter->priv;
+- int err;
+- u32 res;
+-
+- err = kstrtouint(buf, 0, &res);
+- if (err < 0)
+- return err;
+-
+- regmap_write(priv->regmap32, QPOSINIT, res);
+-
+- return len;
+-}
+-
+ static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, char *buf)
+@@ -301,11 +271,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
+ .read = ti_eqep_position_ceiling_read,
+ .write = ti_eqep_position_ceiling_write,
+ },
+- {
+- .name = "floor",
+- .read = ti_eqep_position_floor_read,
+- .write = ti_eqep_position_floor_write,
+- },
+ {
+ .name = "enable",
+ .read = ti_eqep_position_enable_read,
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index 9d6645b1f0abe..ff5e85eefbf69 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -366,6 +366,7 @@ if CRYPTO_DEV_OMAP
+ config CRYPTO_DEV_OMAP_SHAM
+ tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
+ depends on ARCH_OMAP2PLUS
++ select CRYPTO_ENGINE
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ select CRYPTO_SHA256
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 5d4de5cd67595..f20ac3d694246 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -508,7 +508,8 @@ config GPIO_SAMA5D2_PIOBU
+
+ config GPIO_SIFIVE
+ bool "SiFive GPIO support"
+- depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
++ depends on OF_GPIO
++ select IRQ_DOMAIN_HIERARCHY
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select REGMAP_MMIO
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index e9faeaf65d14f..689c06cbbb457 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -1960,6 +1960,21 @@ struct gpio_chardev_data {
+ #endif
+ };
+
++static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
++{
++ struct gpio_device *gdev = cdev->gdev;
++ struct gpiochip_info chipinfo;
++
++ memset(&chipinfo, 0, sizeof(chipinfo));
++
++ strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
++ strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
++ chipinfo.lines = gdev->ngpio;
++ if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
++ return -EFAULT;
++ return 0;
++}
++
+ #ifdef CONFIG_GPIO_CDEV_V1
+ /*
+ * returns 0 if the versions match, else the previously selected ABI version
+@@ -1974,6 +1989,41 @@ static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
+
+ return abiv;
+ }
++
++static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
++ bool watch)
++{
++ struct gpio_desc *desc;
++ struct gpioline_info lineinfo;
++ struct gpio_v2_line_info lineinfo_v2;
++
++ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
++ return -EFAULT;
++
++ /* this doubles as a range check on line_offset */
++ desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
++ if (IS_ERR(desc))
++ return PTR_ERR(desc);
++
++ if (watch) {
++ if (lineinfo_ensure_abi_version(cdev, 1))
++ return -EPERM;
++
++ if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
++ return -EBUSY;
++ }
++
++ gpio_desc_to_lineinfo(desc, &lineinfo_v2);
++ gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
++
++ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
++ if (watch)
++ clear_bit(lineinfo.line_offset, cdev->watched_lines);
++ return -EFAULT;
++ }
++
++ return 0;
++}
+ #endif
+
+ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
+@@ -2011,6 +2061,22 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
+ return 0;
+ }
+
++static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
++{
++ __u32 offset;
++
++ if (copy_from_user(&offset, ip, sizeof(offset)))
++ return -EFAULT;
++
++ if (offset >= cdev->gdev->ngpio)
++ return -EINVAL;
++
++ if (!test_and_clear_bit(offset, cdev->watched_lines))
++ return -EBUSY;
++
++ return 0;
++}
++
+ /*
+ * gpio_ioctl() - ioctl handler for the GPIO chardev
+ */
+@@ -2018,80 +2084,24 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+ struct gpio_chardev_data *cdev = file->private_data;
+ struct gpio_device *gdev = cdev->gdev;
+- struct gpio_chip *gc = gdev->chip;
+ void __user *ip = (void __user *)arg;
+- __u32 offset;
+
+ /* We fail any subsequent ioctl():s when the chip is gone */
+- if (!gc)
++ if (!gdev->chip)
+ return -ENODEV;
+
+ /* Fill in the struct and pass to userspace */
+ if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
+- struct gpiochip_info chipinfo;
+-
+- memset(&chipinfo, 0, sizeof(chipinfo));
+-
+- strscpy(chipinfo.name, dev_name(&gdev->dev),
+- sizeof(chipinfo.name));
+- strscpy(chipinfo.label, gdev->label,
+- sizeof(chipinfo.label));
+- chipinfo.lines = gdev->ngpio;
+- if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+- return -EFAULT;
+- return 0;
++ return chipinfo_get(cdev, ip);
+ #ifdef CONFIG_GPIO_CDEV_V1
+- } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
+- struct gpio_desc *desc;
+- struct gpioline_info lineinfo;
+- struct gpio_v2_line_info lineinfo_v2;
+-
+- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+- return -EFAULT;
+-
+- /* this doubles as a range check on line_offset */
+- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
+- if (IS_ERR(desc))
+- return PTR_ERR(desc);
+-
+- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
+-
+- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
+- return -EFAULT;
+- return 0;
+ } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
+ return linehandle_create(gdev, ip);
+ } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
+ return lineevent_create(gdev, ip);
+- } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+- struct gpio_desc *desc;
+- struct gpioline_info lineinfo;
+- struct gpio_v2_line_info lineinfo_v2;
+-
+- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+- return -EFAULT;
+-
+- /* this doubles as a range check on line_offset */
+- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
+- if (IS_ERR(desc))
+- return PTR_ERR(desc);
+-
+- if (lineinfo_ensure_abi_version(cdev, 1))
+- return -EPERM;
+-
+- if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
+- return -EBUSY;
+-
+- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
+-
+- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+- clear_bit(lineinfo.line_offset, cdev->watched_lines);
+- return -EFAULT;
+- }
+-
+- return 0;
++ } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
++ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
++ return lineinfo_get_v1(cdev, ip,
++ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL);
+ #endif /* CONFIG_GPIO_CDEV_V1 */
+ } else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
+ cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
+@@ -2100,16 +2110,7 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ } else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
+ return linereq_create(gdev, ip);
+ } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
+- if (copy_from_user(&offset, ip, sizeof(offset)))
+- return -EFAULT;
+-
+- if (offset >= cdev->gdev->ngpio)
+- return -EINVAL;
+-
+- if (!test_and_clear_bit(offset, cdev->watched_lines))
+- return -EBUSY;
+-
+- return 0;
++ return lineinfo_unwatch(cdev, ip);
+ }
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 2ddbcfe0a72ff..76d10f1c579ba 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -80,7 +80,6 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+-MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
+
+ #define AMDGPU_RESUME_MS 2000
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+index 4137dc710aafd..7ad0434be293b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
++++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+@@ -47,7 +47,7 @@ enum psp_gfx_crtl_cmd_id
+ GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
+ GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
+ GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
+- GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
++ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
+
+ GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index d7f67620f57ba..31d793ee0836e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -1034,11 +1034,14 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
+ (struct crat_subtype_iolink *)sub_type_hdr);
+ if (ret < 0)
+ return ret;
+- crat_table->length += (sub_type_hdr->length * entries);
+- crat_table->total_entries += entries;
+
+- sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+- sub_type_hdr->length * entries);
++ if (entries) {
++ crat_table->length += (sub_type_hdr->length * entries);
++ crat_table->total_entries += entries;
++
++ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
++ sub_type_hdr->length * entries);
++ }
+ #else
+ pr_info("IO link not available for non x86 platforms\n");
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+index d0699e98db929..e00a30e7d2529 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+@@ -113,7 +113,7 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
+ mutex_lock(&adev->dm.dc_lock);
+
+ /* Enable CRTC CRC generation if necessary. */
+- if (dm_is_crc_source_crtc(source)) {
++ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
+ if (!dc_stream_configure_crc(stream_state->ctx->dc,
+ stream_state, enable, enable)) {
+ ret = -EINVAL;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 462d3d981ea5e..0a01be38ee1b8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -608,8 +608,8 @@ static const struct dc_debug_options debug_defaults_drv = {
+ .disable_pplib_clock_request = false,
+ .disable_pplib_wm_range = false,
+ .pplib_wm_report_mode = WM_REPORT_DEFAULT,
+- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+- .force_single_disp_pipe_split = true,
++ .pipe_split_policy = MPC_SPLIT_AVOID,
++ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .voltage_align_fclk = true,
+ .disable_stereo_support = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index d50a9c3706372..a92f6e4b2eb8f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -2520,8 +2520,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ * if this primary pipe has a bottom pipe in prev. state
+ * and if the bottom pipe is still available (which it should be),
+ * pick that pipe as secondary
+- * Same logic applies for ODM pipes. Since mpo is not allowed with odm
+- * check in else case.
++ * Same logic applies for ODM pipes
+ */
+ if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
+ preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
+@@ -2529,7 +2528,9 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ }
+- } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
++ }
++ if (secondary_pipe == NULL &&
++ dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+ preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index f9170b4b22e7e..8a871e5c3e26b 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -3007,7 +3007,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
+
+ ret = handle_conflicting_encoders(state, true);
+ if (ret)
+- return ret;
++ goto fail;
+
+ ret = drm_atomic_commit(state);
+
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index 6e74e6745ecae..3491460498491 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -388,19 +388,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
+ return -ENOENT;
+
+ *fence = drm_syncobj_fence_get(syncobj);
+- drm_syncobj_put(syncobj);
+
+ if (*fence) {
+ ret = dma_fence_chain_find_seqno(fence, point);
+ if (!ret)
+- return 0;
++ goto out;
+ dma_fence_put(*fence);
+ } else {
+ ret = -EINVAL;
+ }
+
+ if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+- return ret;
++ goto out;
+
+ memset(&wait, 0, sizeof(wait));
+ wait.task = current;
+@@ -432,6 +431,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
+ if (wait.node.next)
+ drm_syncobj_remove_wait(syncobj, &wait);
+
++out:
++ drm_syncobj_put(syncobj);
++
+ return ret;
+ }
+ EXPORT_SYMBOL(drm_syncobj_find_fence);
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index cdcb7b1034ae4..3f2bbd9370a86 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -3387,7 +3387,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ intel_ddi_init_dp_buf_reg(encoder);
+
+ if (!is_mst)
+- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
++ intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ /*
+@@ -3469,8 +3469,8 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
+
+ intel_ddi_init_dp_buf_reg(encoder);
+ if (!is_mst)
+- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+- intel_dp_configure_protocol_converter(intel_dp);
++ intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
++ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
+ true);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+@@ -3647,7 +3647,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
+- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
++ intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (is_mst) {
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 1901c88d418fa..1937b3d6342ae 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -3496,22 +3496,22 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ enable ? "enable" : "disable");
+ }
+
+-/* If the sink supports it, try to set the power state appropriately */
+-void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
++/* If the device supports it, try to set the power state appropriately */
++void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
+ {
+- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
++ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
++ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+- if (mode != DRM_MODE_DPMS_ON) {
++ if (mode != DP_SET_POWER_D0) {
+ if (downstream_hpd_needs_d0(intel_dp))
+ return;
+
+- ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+- DP_SET_POWER_D3);
++ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
+ } else {
+ struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
+
+@@ -3520,8 +3520,7 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+- ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+- DP_SET_POWER_D0);
++ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
+ if (ret == 1)
+ break;
+ msleep(1);
+@@ -3532,8 +3531,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+ }
+
+ if (ret != 1)
+- drm_dbg_kms(&i915->drm, "failed to %s sink power state\n",
+- mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
++ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
++ encoder->base.base.id, encoder->base.name,
++ mode == DP_SET_POWER_D0 ? "D0" : "D3");
+ }
+
+ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+@@ -3707,7 +3707,7 @@ static void intel_disable_dp(struct intel_atomic_state *state,
+ * ensure that we have vdd while we switch off the panel. */
+ intel_edp_panel_vdd_on(intel_dp);
+ intel_edp_backlight_off(old_conn_state);
+- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
++ intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
+ intel_edp_panel_off(intel_dp);
+ }
+
+@@ -3856,7 +3856,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ }
+
+-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
++void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
++ const struct intel_crtc_state *crtc_state)
+ {
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 tmp;
+@@ -3875,8 +3876,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+ drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
+ enableddisabled(intel_dp->has_hdmi_sink));
+
+- tmp = intel_dp->dfp.ycbcr_444_to_420 ?
+- DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
++ tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
++ intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
+@@ -3929,8 +3930,8 @@ static void intel_enable_dp(struct intel_atomic_state *state,
+ lane_mask);
+ }
+
+- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+- intel_dp_configure_protocol_converter(intel_dp);
++ intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
++ intel_dp_configure_protocol_converter(intel_dp, pipe_config);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
+index 08a1c0aa8b94b..2dd934182471e 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.h
++++ b/drivers/gpu/drm/i915/display/intel_dp.h
+@@ -50,8 +50,9 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+ int link_rate, u8 lane_count);
+ int intel_dp_retrain_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
+-void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
++void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
++void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
++ const struct intel_crtc_state *crtc_state);
+ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index 64d885539e94a..5d745d9b99b2a 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -488,7 +488,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
+ intel_dp->active_mst_links);
+
+ if (first_mst_stream)
+- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
++ intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+
+ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
+
+diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
+index 5492076d1ae09..17a8c2e73a820 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
++++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
+@@ -2187,6 +2187,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ if (content_protection_type_changed) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
++ drm_connector_get(&connector->base);
+ schedule_work(&hdcp->prop_work);
+ mutex_unlock(&hdcp->mutex);
+ }
+@@ -2198,6 +2199,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ desired_and_not_enabled =
+ hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ mutex_unlock(&hdcp->mutex);
++ /*
++ * If HDCP already ENABLED and CP property is DESIRED, schedule
++ * prop_work to update correct CP property to user space.
++ */
++ if (!desired_and_not_enabled && !content_protection_type_changed) {
++ drm_connector_get(&connector->base);
++ schedule_work(&hdcp->prop_work);
++ }
+ }
+
+ if (desired_and_not_enabled || content_protection_type_changed)
+diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+index a24cc1ff08a0c..0625cbb3b4312 100644
+--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
++++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+@@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
+ return true;
+ }
+
+-static inline bool __request_completed(const struct i915_request *rq)
+-{
+- return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
+-}
+-
+ __maybe_unused static bool
+ check_signal_order(struct intel_context *ce, struct i915_request *rq)
+ {
+@@ -257,7 +252,7 @@ static void signal_irq_work(struct irq_work *work)
+ list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
+ bool release;
+
+- if (!__request_completed(rq))
++ if (!__i915_request_is_complete(rq))
+ break;
+
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+@@ -379,7 +374,7 @@ static void insert_breadcrumb(struct i915_request *rq)
+ * straight onto a signaled list, and queue the irq worker for
+ * its signal completion.
+ */
+- if (__request_completed(rq)) {
++ if (__i915_request_is_complete(rq)) {
+ if (__signal_request(rq) &&
+ llist_add(&rq->signal_node, &b->signaled_requests))
+ irq_work_queue(&b->irq_work);
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index 724b2cb897d33..ee9b33c3aff83 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -3936,6 +3936,9 @@ err:
+ static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
+ {
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
++
++ /* Called on error unwind, clear all flags to prevent further use */
++ memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
+ }
+
+ typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
+diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
+index 7ea94d201fe6f..8015964043eb7 100644
+--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
++++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
+@@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
+ struct intel_timeline_cacheline *cl =
+ container_of(rcu, typeof(*cl), rcu);
+
++ /* Must wait until after all *rq->hwsp are complete before removing */
++ i915_gem_object_unpin_map(cl->hwsp->vma->obj);
++ __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
++
+ i915_active_fini(&cl->active);
+ kfree(cl);
+ }
+@@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
+ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
+ {
+ GEM_BUG_ON(!i915_active_is_idle(&cl->active));
+-
+- i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+- i915_vma_put(cl->hwsp->vma);
+- __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+-
+ call_rcu(&cl->rcu, __rcu_cacheline_free);
+ }
+
+@@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
+ return ERR_CAST(vaddr);
+ }
+
+- i915_vma_get(hwsp->vma);
+ cl->hwsp = hwsp;
+ cl->vaddr = page_pack_bits(vaddr, cacheline);
+
+diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
+index 620b6fab2c5cf..92adfee30c7c0 100644
+--- a/drivers/gpu/drm/i915/i915_request.h
++++ b/drivers/gpu/drm/i915/i915_request.h
+@@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
+
+ static inline bool __i915_request_has_started(const struct i915_request *rq)
+ {
+- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
++ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
+ }
+
+ /**
+@@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
+ */
+ static inline bool i915_request_started(const struct i915_request *rq)
+ {
++ bool result;
++
+ if (i915_request_signaled(rq))
+ return true;
+
+- /* Remember: started but may have since been preempted! */
+- return __i915_request_has_started(rq);
++ result = true;
++ rcu_read_lock(); /* the HWSP may be freed at runtime */
++ if (likely(!i915_request_signaled(rq)))
++ /* Remember: started but may have since been preempted! */
++ result = __i915_request_has_started(rq);
++ rcu_read_unlock();
++
++ return result;
+ }
+
+ /**
+@@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq)
+ */
+ static inline bool i915_request_is_running(const struct i915_request *rq)
+ {
++ bool result;
++
+ if (!i915_request_is_active(rq))
+ return false;
+
+- return __i915_request_has_started(rq);
++ rcu_read_lock();
++ result = __i915_request_has_started(rq) && i915_request_is_active(rq);
++ rcu_read_unlock();
++
++ return result;
+ }
+
+ /**
+@@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq)
+ return !list_empty(&rq->sched.link);
+ }
+
++static inline bool __i915_request_is_complete(const struct i915_request *rq)
++{
++ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
++}
++
+ static inline bool i915_request_completed(const struct i915_request *rq)
+ {
++ bool result;
++
+ if (i915_request_signaled(rq))
+ return true;
+
+- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
++ result = true;
++ rcu_read_lock(); /* the HWSP may be freed at runtime */
++ if (likely(!i915_request_signaled(rq)))
++ result = __i915_request_is_complete(rq);
++ rcu_read_unlock();
++
++ return result;
+ }
+
+ static inline void i915_request_mark_complete(struct i915_request *rq)
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 36d6b6093d16d..5b8cabb099eb1 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -221,7 +221,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
+
+ int
+ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+- const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
++ const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
+ struct nv50_dmac *dmac)
+ {
+ struct nouveau_cli *cli = (void *)device->object.client;
+@@ -270,7 +270,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+ if (ret)
+ return ret;
+
+- if (!syncbuf)
++ if (syncbuf < 0)
+ return 0;
+
+ ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
+index 92bddc0836171..38dec11e7dda5 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
+@@ -95,7 +95,7 @@ struct nv50_outp_atom {
+
+ int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+ const s32 *oclass, u8 head, void *data, u32 size,
+- u64 syncbuf, struct nv50_dmac *dmac);
++ s64 syncbuf, struct nv50_dmac *dmac);
+ void nv50_dmac_destroy(struct nv50_dmac *);
+
+ /*
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+index 685b708713242..b390029c69ec1 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+@@ -76,7 +76,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
+ int ret;
+
+ ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+- &oclass, 0, &args, sizeof(args), 0,
++ &oclass, 0, &args, sizeof(args), -1,
+ &wndw->wimm);
+ if (ret) {
+ NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+index 7deb81b6dbac6..4b571cc6bc70f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
+ nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
+ image.base, image.type, image.size);
+
+- if (!shadow_fetch(bios, mthd, image.size)) {
++ if (!shadow_fetch(bios, mthd, image.base + image.size)) {
+ nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+index edb6148cbca04..d0e80ad526845 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+@@ -33,7 +33,7 @@ static void
+ gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
+ {
+ struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
++ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
+ }
+
+ static int
+@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
+ AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
+ return -EBUSY;
+ }
+- } while (ctrl & 0x03010000);
++ } while (ctrl & 0x07010000);
+
+ /* set some magic, and wait up to 1ms for it to appear */
+- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
++ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
+ timeout = 1000;
+ do {
+ ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
+@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
+ gm200_i2c_aux_fini(aux);
+ return -EBUSY;
+ }
+- } while ((ctrl & 0x03000000) != urep);
++ } while ((ctrl & 0x07000000) != urep);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+index 2340040942c93..1115376bc85f5 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+@@ -22,6 +22,7 @@
+ * Authors: Ben Skeggs
+ */
+ #include "priv.h"
++#include <subdev/timer.h>
+
+ static void
+ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
+@@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
+ u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
+ u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
+ nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+- nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
+ }
+
+ static void
+@@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
+ u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
+ u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
+ nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+- nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
+ }
+
+ static void
+@@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
+ u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
+ u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
+ nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+- nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
+ }
+
+ void
+@@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
+ intr1 &= ~stat;
+ }
+ }
++
++ nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
++ nvkm_msec(device, 2000,
++ if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
++ break;
++ );
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+index f3915f85838ed..22e487b493ad1 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+@@ -22,6 +22,7 @@
+ * Authors: Ben Skeggs
+ */
+ #include "priv.h"
++#include <subdev/timer.h>
+
+ static void
+ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
+@@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
+ u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
+ u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
+ nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+- nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
+ }
+
+ static void
+@@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
+ u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
+ u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
+ nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+- nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
+ }
+
+ static void
+@@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
+ u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
+ u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
+ nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+- nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
+ }
+
+ void
+@@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
+ intr1 &= ~stat;
+ }
+ }
++
++ nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
++ nvkm_msec(device, 2000,
++ if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
++ break;
++ );
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+index de91e9a261725..6d5212ae2fd57 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+@@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu)
+ {
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_mm *mm = &device->fb->ram->vram;
+- const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+- const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+- const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
++ const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
++ const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
++ const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
+ u8 heap = NVKM_MEM_VRAM;
+ int heapM, heapN, heapU;
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index afc178b0d89f4..eaba98e15de46 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -1268,6 +1268,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->name = vc4_hdmi->variant->card_name;
++ card->driver_name = "vc4-hdmi";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 612629678c845..9b56226ce0d1c 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -899,6 +899,7 @@ config HID_SONY
+ depends on NEW_LEDS
+ depends on LEDS_CLASS
+ select POWER_SUPPLY
++ select CRC32
+ help
+ Support for
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index f170feaac40ba..94180c63571ed 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -387,6 +387,7 @@
+ #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
+ #define USB_DEVICE_ID_HP_X2 0x074d
+ #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
++#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
+
+ #define USB_VENDOR_ID_ELECOM 0x056e
+ #define USB_DEVICE_ID_ELECOM_BM084 0x0061
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 4dca113924593..32024905fd70f 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -322,6 +322,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
+ HID_BATTERY_QUIRK_IGNORE },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
++ HID_BATTERY_QUIRK_IGNORE },
+ {}
+ };
+
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 1ffcfc9a1e033..45e7e0bdd382b 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -1869,6 +1869,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc531),
+ .driver_data = recvr_type_gaming_hidpp},
++ { /* Logitech G602 receiver (0xc537) */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
++ 0xc537),
++ .driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech lightspeed receiver (0xc539) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1),
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 0ca7231195473..74ebfb12c360e 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4051,6 +4051,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ { /* MX Master mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
++ { /* MX Ergo trackball over Bluetooth */
++ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* MX Master 3 mouse over Bluetooth */
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index d670bcd57bdef..0743ef51d3b24 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2054,6 +2054,10 @@ static const struct hid_device_id mt_devices[] = {
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_SYNAPTICS, 0xce09) },
++
+ /* TopSeed panels */
+ { .driver_data = MT_CLS_TOPSEED,
+ MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 4fad3e6745e53..a5a402e776c77 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2542,7 +2542,6 @@ static void hv_kexec_handler(void)
+ /* Make sure conn_state is set as hv_synic_cleanup checks for it */
+ mb();
+ cpuhp_remove_state(hyperv_cpuhp_online);
+- hyperv_cleanup();
+ };
+
+ static void hv_crash_handler(struct pt_regs *regs)
+@@ -2558,7 +2557,6 @@ static void hv_crash_handler(struct pt_regs *regs)
+ cpu = smp_processor_id();
+ hv_stimer_cleanup(cpu);
+ hv_synic_disable_regs(cpu);
+- hyperv_cleanup();
+ };
+
+ static int hv_synic_suspend(void)
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 52acd77438ede..251e75c9ba9d0 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -268,6 +268,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7aa6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
++ {
++ /* Alder Lake-P */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
+ {
+ /* Alder Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
+diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
+index 3e7df1c0477f7..81d7b21d31ec2 100644
+--- a/drivers/hwtracing/stm/heartbeat.c
++++ b/drivers/hwtracing/stm/heartbeat.c
+@@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
+
+ static int stm_heartbeat_init(void)
+ {
+- int i, ret = -ENOMEM;
++ int i, ret;
+
+ if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
+ return -EINVAL;
+@@ -72,8 +72,10 @@ static int stm_heartbeat_init(void)
+ for (i = 0; i < nr_devs; i++) {
+ stm_heartbeat[i].data.name =
+ kasprintf(GFP_KERNEL, "heartbeat.%d", i);
+- if (!stm_heartbeat[i].data.name)
++ if (!stm_heartbeat[i].data.name) {
++ ret = -ENOMEM;
+ goto fail_unregister;
++ }
+
+ stm_heartbeat[i].data.nr_chans = 1;
+ stm_heartbeat[i].data.link = stm_heartbeat_link;
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index a49e0ed4a599d..7e693dcbdd196 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -1012,6 +1012,7 @@ config I2C_SIRF
+ config I2C_SPRD
+ tristate "Spreadtrum I2C interface"
+ depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
++ depends on COMMON_CLK
+ help
+ If you say yes to this option, support will be included for the
+ Spreadtrum I2C interface.
+diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
+index d9607905dc2f1..845eda70b8cab 100644
+--- a/drivers/i2c/busses/i2c-octeon-core.c
++++ b/drivers/i2c/busses/i2c-octeon-core.c
+@@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
+ if (result)
+ return result;
+ if (recv_len && i == 0) {
+- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
++ if (data[i] > I2C_SMBUS_BLOCK_MAX)
+ return -EPROTO;
+ length += data[i];
+ }
+diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
+index ec7a7e917eddb..c0c7d01473f2b 100644
+--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
++++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
+@@ -80,7 +80,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
+ flags &= ~I2C_M_RECV_LEN;
+ }
+
+- return (flags != 0) ? -EINVAL : 0;
++ return 0;
+ }
+
+ /**
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 6f08c0c3238d5..0727383f49402 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -533,7 +533,7 @@ static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev,
+ void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg);
+ u32 val;
+
+- if (!i2c_dev->atomic_mode)
++ if (!i2c_dev->atomic_mode && !in_irq())
+ return readl_relaxed_poll_timeout(addr, val, !(val & mask),
+ delay_us, timeout_us);
+
+diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
+index b11c8c47ba2aa..e946903b09936 100644
+--- a/drivers/iio/adc/ti_am335x_adc.c
++++ b/drivers/iio/adc/ti_am335x_adc.c
+@@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
+ ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
+ flags, indio_dev->name, indio_dev);
+ if (ret)
+- goto error_kfifo_free;
++ return ret;
+
+ indio_dev->setup_ops = setup_ops;
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+
+ return 0;
+-
+-error_kfifo_free:
+- iio_kfifo_free(indio_dev->buffer);
+- return ret;
+ }
+
+ static const char * const chan_name_ain[] = {
+diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
+index 0507283bd4c1d..2dbd2646e44e9 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
++++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
+@@ -23,35 +23,31 @@
+ * @sdata: Sensor data.
+ *
+ * returns:
+- * 0 - no new samples available
+- * 1 - new samples available
+- * negative - error or unknown
++ * false - no new samples available or read error
++ * true - new samples available
+ */
+-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
+- struct st_sensor_data *sdata)
++static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
++ struct st_sensor_data *sdata)
+ {
+ int ret, status;
+
+ /* How would I know if I can't check it? */
+ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
+- return -EINVAL;
++ return true;
+
+ /* No scan mask, no interrupt */
+ if (!indio_dev->active_scan_mask)
+- return 0;
++ return false;
+
+ ret = regmap_read(sdata->regmap,
+ sdata->sensor_settings->drdy_irq.stat_drdy.addr,
+ &status);
+ if (ret < 0) {
+ dev_err(sdata->dev, "error checking samples available\n");
+- return ret;
++ return false;
+ }
+
+- if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
+- return 1;
+-
+- return 0;
++ return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
+ }
+
+ /**
+@@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+
+ /* Tell the interrupt handler that we're dealing with edges */
+ if (irq_trig == IRQF_TRIGGER_FALLING ||
+- irq_trig == IRQF_TRIGGER_RISING)
++ irq_trig == IRQF_TRIGGER_RISING) {
++ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
++ dev_err(&indio_dev->dev,
++ "edge IRQ not supported w/o stat register.\n");
++ err = -EOPNOTSUPP;
++ goto iio_trigger_free;
++ }
+ sdata->edge_irq = true;
+- else
++ } else {
+ /*
+ * If we're not using edges (i.e. level interrupts) we
+ * just mask off the IRQ, handle one interrupt, then
+@@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ * interrupt handler top half again and start over.
+ */
+ irq_trig |= IRQF_ONESHOT;
++ }
+
+ /*
+ * If the interrupt pin is Open Drain, by definition this
+diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
+index 28921b62e6420..e9297c25d4ef6 100644
+--- a/drivers/iio/dac/ad5504.c
++++ b/drivers/iio/dac/ad5504.c
+@@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
+ return ret;
+
+ if (pwr_down)
+- st->pwr_down_mask |= (1 << chan->channel);
+- else
+ st->pwr_down_mask &= ~(1 << chan->channel);
++ else
++ st->pwr_down_mask |= (1 << chan->channel);
+
+ ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
+ AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
+diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
+index 503fe54a0bb93..608ccb1d8bc82 100644
+--- a/drivers/iio/temperature/mlx90632.c
++++ b/drivers/iio/temperature/mlx90632.c
+@@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
+ if (ret < 0)
+ return ret;
+
++ /*
++ * Give the mlx90632 some time to reset properly before sending a new I2C command
++ * if this is not done, the following I2C command(s) will not be accepted.
++ */
++ usleep_range(150, 200);
++
+ ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
+ (MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
+ (MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
+diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
+index 7ec4af2ed87ab..35d1ec1095f9c 100644
+--- a/drivers/infiniband/core/cma_configfs.c
++++ b/drivers/infiniband/core/cma_configfs.c
+@@ -131,8 +131,10 @@ static ssize_t default_roce_mode_store(struct config_item *item,
+ return ret;
+
+ gid_type = ib_cache_gid_parse_type_str(buf);
+- if (gid_type < 0)
++ if (gid_type < 0) {
++ cma_configfs_params_put(cma_dev);
+ return -EINVAL;
++ }
+
+ ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type);
+
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index ffe2563ad3456..2cc785c1970b4 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -95,8 +95,6 @@ struct ucma_context {
+ u64 uid;
+
+ struct list_head list;
+- /* sync between removal event and id destroy, protected by file mut */
+- int destroying;
+ struct work_struct close_work;
+ };
+
+@@ -122,7 +120,7 @@ static DEFINE_XARRAY_ALLOC(ctx_table);
+ static DEFINE_XARRAY_ALLOC(multicast_table);
+
+ static const struct file_operations ucma_fops;
+-static int __destroy_id(struct ucma_context *ctx);
++static int ucma_destroy_private_ctx(struct ucma_context *ctx);
+
+ static inline struct ucma_context *_ucma_find_context(int id,
+ struct ucma_file *file)
+@@ -179,19 +177,14 @@ static void ucma_close_id(struct work_struct *work)
+
+ /* once all inflight tasks are finished, we close all underlying
+ * resources. The context is still alive till its explicit destryoing
+- * by its creator.
++ * by its creator. This puts back the xarray's reference.
+ */
+ ucma_put_ctx(ctx);
+ wait_for_completion(&ctx->comp);
+ /* No new events will be generated after destroying the id. */
+ rdma_destroy_id(ctx->cm_id);
+
+- /*
+- * At this point ctx->ref is zero so the only place the ctx can be is in
+- * a uevent or in __destroy_id(). Since the former doesn't touch
+- * ctx->cm_id and the latter sync cancels this, there is no races with
+- * this store.
+- */
++ /* Reading the cm_id without holding a positive ref is not allowed */
+ ctx->cm_id = NULL;
+ }
+
+@@ -204,7 +197,6 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
+ return NULL;
+
+ INIT_WORK(&ctx->close_work, ucma_close_id);
+- refcount_set(&ctx->ref, 1);
+ init_completion(&ctx->comp);
+ /* So list_del() will work if we don't do ucma_finish_ctx() */
+ INIT_LIST_HEAD(&ctx->list);
+@@ -218,6 +210,13 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
+ return ctx;
+ }
+
++static void ucma_set_ctx_cm_id(struct ucma_context *ctx,
++ struct rdma_cm_id *cm_id)
++{
++ refcount_set(&ctx->ref, 1);
++ ctx->cm_id = cm_id;
++}
++
+ static void ucma_finish_ctx(struct ucma_context *ctx)
+ {
+ lockdep_assert_held(&ctx->file->mut);
+@@ -303,7 +302,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
+ ctx = ucma_alloc_ctx(listen_ctx->file);
+ if (!ctx)
+ goto err_backlog;
+- ctx->cm_id = cm_id;
++ ucma_set_ctx_cm_id(ctx, cm_id);
+
+ uevent = ucma_create_uevent(listen_ctx, event);
+ if (!uevent)
+@@ -321,8 +320,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
+ return 0;
+
+ err_alloc:
+- xa_erase(&ctx_table, ctx->id);
+- kfree(ctx);
++ ucma_destroy_private_ctx(ctx);
+ err_backlog:
+ atomic_inc(&listen_ctx->backlog);
+ /* Returning error causes the new ID to be destroyed */
+@@ -356,8 +354,12 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
+ wake_up_interruptible(&ctx->file->poll_wait);
+ }
+
+- if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying)
+- queue_work(system_unbound_wq, &ctx->close_work);
++ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
++ xa_lock(&ctx_table);
++ if (xa_load(&ctx_table, ctx->id) == ctx)
++ queue_work(system_unbound_wq, &ctx->close_work);
++ xa_unlock(&ctx_table);
++ }
+ return 0;
+ }
+
+@@ -461,13 +463,12 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
+ ret = PTR_ERR(cm_id);
+ goto err1;
+ }
+- ctx->cm_id = cm_id;
++ ucma_set_ctx_cm_id(ctx, cm_id);
+
+ resp.id = ctx->id;
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
+ &resp, sizeof(resp))) {
+- xa_erase(&ctx_table, ctx->id);
+- __destroy_id(ctx);
++ ucma_destroy_private_ctx(ctx);
+ return -EFAULT;
+ }
+
+@@ -477,8 +478,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
+ return 0;
+
+ err1:
+- xa_erase(&ctx_table, ctx->id);
+- kfree(ctx);
++ ucma_destroy_private_ctx(ctx);
+ return ret;
+ }
+
+@@ -516,68 +516,73 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
+ rdma_unlock_handler(mc->ctx->cm_id);
+ }
+
+-/*
+- * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
+- * this point, no new events will be reported from the hardware. However, we
+- * still need to cleanup the UCMA context for this ID. Specifically, there
+- * might be events that have not yet been consumed by the user space software.
+- * mutex. After that we release them as needed.
+- */
+-static int ucma_free_ctx(struct ucma_context *ctx)
++static int ucma_cleanup_ctx_events(struct ucma_context *ctx)
+ {
+ int events_reported;
+ struct ucma_event *uevent, *tmp;
+ LIST_HEAD(list);
+
+- ucma_cleanup_multicast(ctx);
+-
+- /* Cleanup events not yet reported to the user. */
++ /* Cleanup events not yet reported to the user.*/
+ mutex_lock(&ctx->file->mut);
+ list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
+- if (uevent->ctx == ctx || uevent->conn_req_ctx == ctx)
++ if (uevent->ctx != ctx)
++ continue;
++
++ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
++ xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id,
++ uevent->conn_req_ctx, XA_ZERO_ENTRY,
++ GFP_KERNEL) == uevent->conn_req_ctx) {
+ list_move_tail(&uevent->list, &list);
++ continue;
++ }
++ list_del(&uevent->list);
++ kfree(uevent);
+ }
+ list_del(&ctx->list);
+ events_reported = ctx->events_reported;
+ mutex_unlock(&ctx->file->mut);
+
+ /*
+- * If this was a listening ID then any connections spawned from it
+- * that have not been delivered to userspace are cleaned up too.
+- * Must be done outside any locks.
++ * If this was a listening ID then any connections spawned from it that
++ * have not been delivered to userspace are cleaned up too. Must be done
++ * outside any locks.
+ */
+ list_for_each_entry_safe(uevent, tmp, &list, list) {
+- list_del(&uevent->list);
+- if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
+- uevent->conn_req_ctx != ctx)
+- __destroy_id(uevent->conn_req_ctx);
++ ucma_destroy_private_ctx(uevent->conn_req_ctx);
+ kfree(uevent);
+ }
+-
+- mutex_destroy(&ctx->mutex);
+- kfree(ctx);
+ return events_reported;
+ }
+
+-static int __destroy_id(struct ucma_context *ctx)
++/*
++ * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie
++ * the ctx is not public to the user). This either because:
++ * - ucma_finish_ctx() hasn't been called
++ * - xa_cmpxchg() succeed to remove the entry (only one thread can succeed)
++ */
++static int ucma_destroy_private_ctx(struct ucma_context *ctx)
+ {
++ int events_reported;
++
+ /*
+- * If the refcount is already 0 then ucma_close_id() has already
+- * destroyed the cm_id, otherwise holding the refcount keeps cm_id
+- * valid. Prevent queue_work() from being called.
++ * Destroy the underlying cm_id. New work queuing is prevented now by
++ * the removal from the xarray. Once the work is cancled ref will either
++ * be 0 because the work ran to completion and consumed the ref from the
++ * xarray, or it will be positive because we still have the ref from the
++ * xarray. This can also be 0 in cases where cm_id was never set
+ */
+- if (refcount_inc_not_zero(&ctx->ref)) {
+- rdma_lock_handler(ctx->cm_id);
+- ctx->destroying = 1;
+- rdma_unlock_handler(ctx->cm_id);
+- ucma_put_ctx(ctx);
+- }
+-
+ cancel_work_sync(&ctx->close_work);
+- /* At this point it's guaranteed that there is no inflight closing task */
+- if (ctx->cm_id)
++ if (refcount_read(&ctx->ref))
+ ucma_close_id(&ctx->close_work);
+- return ucma_free_ctx(ctx);
++
++ events_reported = ucma_cleanup_ctx_events(ctx);
++ ucma_cleanup_multicast(ctx);
++
++ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL,
++ GFP_KERNEL) != NULL);
++ mutex_destroy(&ctx->mutex);
++ kfree(ctx);
++ return events_reported;
+ }
+
+ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
+@@ -596,14 +601,17 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
+
+ xa_lock(&ctx_table);
+ ctx = _ucma_find_context(cmd.id, file);
+- if (!IS_ERR(ctx))
+- __xa_erase(&ctx_table, ctx->id);
++ if (!IS_ERR(ctx)) {
++ if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
++ GFP_KERNEL) != ctx)
++ ctx = ERR_PTR(-ENOENT);
++ }
+ xa_unlock(&ctx_table);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+- resp.events_reported = __destroy_id(ctx);
++ resp.events_reported = ucma_destroy_private_ctx(ctx);
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
+ &resp, sizeof(resp)))
+ ret = -EFAULT;
+@@ -1777,15 +1785,16 @@ static int ucma_close(struct inode *inode, struct file *filp)
+ * prevented by this being a FD release function. The list_add_tail() in
+ * ucma_connect_event_handler() can run concurrently, however it only
+ * adds to the list *after* a listening ID. By only reading the first of
+- * the list, and relying on __destroy_id() to block
++ * the list, and relying on ucma_destroy_private_ctx() to block
+ * ucma_connect_event_handler(), no additional locking is needed.
+ */
+ while (!list_empty(&file->ctx_list)) {
+ struct ucma_context *ctx = list_first_entry(
+ &file->ctx_list, struct ucma_context, list);
+
+- xa_erase(&ctx_table, ctx->id);
+- __destroy_id(ctx);
++ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
++ GFP_KERNEL) != ctx);
++ ucma_destroy_private_ctx(ctx);
+ }
+ kfree(file);
+ return 0;
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index e9fecbdf391bc..5157ae29a4460 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -126,7 +126,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ */
+ if (mask)
+ pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
+- return rounddown_pow_of_two(pgsz_bitmap);
++ return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
+ }
+ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
+
+diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
+index ba43a15aefec0..d7768d3c6d8aa 100644
+--- a/drivers/interconnect/imx/imx8mq.c
++++ b/drivers/interconnect/imx/imx8mq.c
+@@ -7,6 +7,7 @@
+
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
++#include <linux/interconnect-provider.h>
+ #include <dt-bindings/interconnect/imx8mq.h>
+
+ #include "imx.h"
+@@ -94,6 +95,7 @@ static struct platform_driver imx8mq_icc_driver = {
+ .remove = imx8mq_icc_remove,
+ .driver = {
+ .name = "imx8mq-interconnect",
++ .sync_state = icc_sync_state,
+ },
+ };
+
+diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
+index 95d4fd8f7a968..0bbb0b2d0dd5f 100644
+--- a/drivers/irqchip/irq-mips-cpu.c
++++ b/drivers/irqchip/irq-mips-cpu.c
+@@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
+ if (ret)
+ return ret;
+
++ ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
++ &mips_mt_cpu_irq_controller,
++ NULL);
++
++ if (ret)
++ return ret;
++
+ ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
+ if (ret)
+ return ret;
+diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
+index c1bcac71008c6..28ddcaa5358b1 100644
+--- a/drivers/lightnvm/core.c
++++ b/drivers/lightnvm/core.c
+@@ -844,11 +844,10 @@ static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
+ rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
+
+ ret = nvm_submit_io_sync_raw(dev, &rqd);
++ __free_page(page);
+ if (ret)
+ return ret;
+
+- __free_page(page);
+-
+ return rqd.error;
+ }
+
+diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
+index 0e04d3718af3c..2cefb075b2b84 100644
+--- a/drivers/md/Kconfig
++++ b/drivers/md/Kconfig
+@@ -585,6 +585,7 @@ config DM_INTEGRITY
+ select BLK_DEV_INTEGRITY
+ select DM_BUFIO
+ select CRYPTO
++ select CRYPTO_SKCIPHER
+ select ASYNC_XOR
+ help
+ This device-mapper target emulates a block device that has
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 89de9cde02028..875823d6ee7e0 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1481,9 +1481,9 @@ static int crypt_alloc_req_skcipher(struct crypt_config *cc,
+ static int crypt_alloc_req_aead(struct crypt_config *cc,
+ struct convert_context *ctx)
+ {
+- if (!ctx->r.req) {
+- ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+- if (!ctx->r.req)
++ if (!ctx->r.req_aead) {
++ ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
++ if (!ctx->r.req_aead)
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 81df019ab284a..b64fede032dc5 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -257,8 +257,9 @@ struct dm_integrity_c {
+ bool journal_uptodate;
+ bool just_formatted;
+ bool recalculate_flag;
+- bool fix_padding;
+ bool discard;
++ bool fix_padding;
++ bool legacy_recalculate;
+
+ struct alg_spec internal_hash_alg;
+ struct alg_spec journal_crypt_alg;
+@@ -386,6 +387,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic)
+ return READ_ONCE(ic->failed);
+ }
+
++static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
++{
++ if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
++ !ic->legacy_recalculate)
++ return true;
++ return false;
++}
++
+ static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
+ unsigned j, unsigned char seq)
+ {
+@@ -3140,6 +3149,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+ arg_count += !!ic->journal_crypt_alg.alg_string;
+ arg_count += !!ic->journal_mac_alg.alg_string;
+ arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
++ arg_count += ic->legacy_recalculate;
+ DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
+ ic->tag_size, ic->mode, arg_count);
+ if (ic->meta_dev)
+@@ -3163,6 +3173,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+ }
+ if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
+ DMEMIT(" fix_padding");
++ if (ic->legacy_recalculate)
++ DMEMIT(" legacy_recalculate");
+
+ #define EMIT_ALG(a, n) \
+ do { \
+@@ -3792,7 +3804,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ unsigned extra_args;
+ struct dm_arg_set as;
+ static const struct dm_arg _args[] = {
+- {0, 15, "Invalid number of feature args"},
++ {0, 16, "Invalid number of feature args"},
+ };
+ unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
+ bool should_write_sb;
+@@ -3940,6 +3952,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ ic->discard = true;
+ } else if (!strcmp(opt_string, "fix_padding")) {
+ ic->fix_padding = true;
++ } else if (!strcmp(opt_string, "legacy_recalculate")) {
++ ic->legacy_recalculate = true;
+ } else {
+ r = -EINVAL;
+ ti->error = "Invalid argument";
+@@ -4235,6 +4249,20 @@ try_smaller_buffer:
+ r = -ENOMEM;
+ goto bad;
+ }
++ } else {
++ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
++ ti->error = "Recalculate can only be specified with internal_hash";
++ r = -EINVAL;
++ goto bad;
++ }
++ }
++
++ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
++ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
++ dm_integrity_disable_recalculate(ic)) {
++ ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
++ r = -EOPNOTSUPP;
++ goto bad;
+ }
+
+ ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 7eeb7c4169c94..09ded08cbb609 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -370,14 +370,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ {
+ int r;
+ dev_t dev;
++ unsigned int major, minor;
++ char dummy;
+ struct dm_dev_internal *dd;
+ struct dm_table *t = ti->table;
+
+ BUG_ON(!t);
+
+- dev = dm_get_dev_t(path);
+- if (!dev)
+- return -ENODEV;
++ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
++ /* Extract the major/minor numbers */
++ dev = MKDEV(major, minor);
++ if (MAJOR(dev) != major || MINOR(dev) != minor)
++ return -EOVERFLOW;
++ } else {
++ dev = dm_get_dev_t(path);
++ if (!dev)
++ return -ENODEV;
++ }
+
+ dd = find_device(&t->devices, dev);
+ if (!dd) {
+diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
+index de7cb0369c308..002426e3cf76c 100644
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -384,8 +384,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+ "merging was advertised but not possible");
+ blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
+
+- if (mmc_card_mmc(card))
++ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
+ block_size = card->ext_csd.data_sector_size;
++ WARN_ON(block_size != 512 && block_size != 4096);
++ }
+
+ blk_queue_logical_block_size(mq->queue, block_size);
+ /*
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index bbf3496f44955..f9780c65ebe98 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -314,11 +314,7 @@ err_clk:
+
+ static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
+ {
+- int ret;
+-
+- ret = sdhci_pltfm_unregister(pdev);
+- if (ret)
+- dev_err(&pdev->dev, "failed to shutdown\n");
++ sdhci_pltfm_suspend(&pdev->dev);
+ }
+
+ MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
+diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
+index 4b673792b5a42..d90020ed36227 100644
+--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
++++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
+@@ -16,6 +16,8 @@
+
+ #include "sdhci-pltfm.h"
+
++#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16)
++
+ /* DWCMSHC specific Mode Select value */
+ #define DWCMSHC_CTRL_HS400 0x7
+
+@@ -49,6 +51,29 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
+ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+ }
+
++static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
++ struct mmc_request *mrq)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++
++ /*
++ * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
++ * block count register which doesn't support stuff bits of
++ * CMD23 argument on dwcmsch host controller.
++ */
++ if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
++ host->flags &= ~SDHCI_AUTO_CMD23;
++ else
++ host->flags |= SDHCI_AUTO_CMD23;
++}
++
++static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
++{
++ dwcmshc_check_auto_cmd23(mmc, mrq);
++
++ sdhci_request(mmc, mrq);
++}
++
+ static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+ {
+@@ -133,6 +158,8 @@ static int dwcmshc_probe(struct platform_device *pdev)
+
+ sdhci_get_of_property(pdev);
+
++ host->mmc_host_ops.request = dwcmshc_request;
++
+ err = sdhci_add_host(host);
+ if (err)
+ goto err_clk;
+diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
+index 24c978de2a3f1..0e5234a5ca224 100644
+--- a/drivers/mmc/host/sdhci-xenon.c
++++ b/drivers/mmc/host/sdhci-xenon.c
+@@ -167,7 +167,12 @@ static void xenon_reset_exit(struct sdhci_host *host,
+ /* Disable tuning request and auto-retuning again */
+ xenon_retune_setup(host);
+
+- xenon_set_acg(host, true);
++ /*
++ * The ACG should be turned off at the early init time, in order
++ * to solve a possible issues with the 1.8V regulator stabilization.
++ * The feature is enabled in later stage.
++ */
++ xenon_set_acg(host, false);
+
+ xenon_set_sdclk_off_idle(host, sdhc_id, false);
+
+diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+index 81028ba35f35d..31a6210eb5d44 100644
+--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+@@ -1613,7 +1613,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ /* Extract interleaved payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+- nand_extract_bits(buf, step * eccsize, tmp_buf,
++ nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
+ src_bit_off, eccsize * 8);
+ src_bit_off += eccsize * 8;
+
+diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
+index a8048cb8d2205..9a9f1c24d8321 100644
+--- a/drivers/mtd/nand/raw/nandsim.c
++++ b/drivers/mtd/nand/raw/nandsim.c
+@@ -2211,6 +2211,9 @@ static int ns_attach_chip(struct nand_chip *chip)
+ {
+ unsigned int eccsteps, eccbytes;
+
++ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
++ chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
++
+ if (!bch)
+ return 0;
+
+@@ -2234,8 +2237,6 @@ static int ns_attach_chip(struct nand_chip *chip)
+ return -EINVAL;
+ }
+
+- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+- chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ chip->ecc.size = 512;
+ chip->ecc.strength = bch;
+ chip->ecc.bytes = eccbytes;
+@@ -2274,8 +2275,6 @@ static int __init ns_init_module(void)
+ nsmtd = nand_to_mtd(chip);
+ nand_set_controller_data(chip, (void *)ns);
+
+- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
+ /* and 'badblocks' parameters to work */
+ chip->options |= NAND_SKIP_BBTSCAN;
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 81e39d7507d8f..09879aea9f7cc 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -592,11 +592,11 @@ static void can_restart(struct net_device *dev)
+
+ cf->can_id |= CAN_ERR_RESTARTED;
+
+- netif_rx_ni(skb);
+-
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
++ netif_rx_ni(skb);
++
+ restart:
+ netdev_dbg(dev, "restarted\n");
+ priv->can_stats.restarts++;
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+index d29d20525588c..d565922838186 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+@@ -512,11 +512,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
+ else
+ memcpy(cfd->data, rm->d, cfd->len);
+
+- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+-
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += cfd->len;
+
++ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
++
+ return 0;
+ }
+
+@@ -578,11 +578,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
+ if (!skb)
+ return -ENOMEM;
+
+- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+-
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += cf->can_dlc;
+
++ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
++
+ return 0;
+ }
+
+diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
+index d6ba9426be4de..b1baa4ac1d537 100644
+--- a/drivers/net/can/vxcan.c
++++ b/drivers/net/can/vxcan.c
+@@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
+ struct net_device *peer;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct net_device_stats *peerstats, *srcstats = &dev->stats;
++ u8 len;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+@@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
+ skb->dev = peer;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
++ len = cfd->len;
+ if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
+ srcstats->tx_packets++;
+- srcstats->tx_bytes += cfd->len;
++ srcstats->tx_bytes += len;
+ peerstats = &peer->stats;
+ peerstats->rx_packets++;
+- peerstats->rx_bytes += cfd->len;
++ peerstats->rx_bytes += len;
+ }
+
+ out_unlock:
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 288b5a5c3e0db..95c7fa171e35a 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1404,7 +1404,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
+ return -EINVAL;
+
+- if (vlan->vid_end > dev->num_vlans)
++ if (vlan->vid_end >= dev->num_vlans)
+ return -ERANGE;
+
+ b53_enable_vlan(dev, true, ds->vlan_filtering);
+diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+index 1048509a849bc..0938caccc62ac 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
++++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+@@ -351,6 +351,10 @@ int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ if (err)
+ return err;
+
++ err = mv88e6185_g1_stu_data_read(chip, entry);
++ if (err)
++ return err;
++
+ /* VTU DBNum[3:0] are located in VTU Operation 3:0
+ * VTU DBNum[5:4] are located in VTU Operation 9:8
+ */
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index b1ae9eb8f2479..0404aafd5ce56 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -2503,8 +2503,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+ priv = netdev_priv(dev);
+
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
+- if (IS_ERR(priv->clk))
+- return PTR_ERR(priv->clk);
++ if (IS_ERR(priv->clk)) {
++ ret = PTR_ERR(priv->clk);
++ goto err_free_netdev;
++ }
+
+ /* Allocate number of TX rings */
+ priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+index fa9152ff5e2a0..f4ecc755eaff1 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+@@ -454,6 +454,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
++ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
++ return -EPERM;
++
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
+@@ -470,6 +473,9 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
+ int rc = 0, i;
+ u64 cfg;
+
++ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
++ return -EPERM;
++
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rsp->hdr.rc = rc;
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index a53bd36b11c60..d4768dcb6c699 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -60,14 +60,27 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+ {
++ u32 cmd = ANA_TABLES_MACACCESS_VALID |
++ ANA_TABLES_MACACCESS_DEST_IDX(port) |
++ ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
++ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
++ unsigned int mc_ports;
++
++ /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
++ if (type == ENTRYTYPE_MACv4)
++ mc_ports = (mac[1] << 8) | mac[2];
++ else if (type == ENTRYTYPE_MACv6)
++ mc_ports = (mac[0] << 8) | mac[1];
++ else
++ mc_ports = 0;
++
++ if (mc_ports & BIT(ocelot->num_phys_ports))
++ cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
++
+ ocelot_mact_select(ocelot, mac, vid);
+
+ /* Issue a write command */
+- ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
+- ANA_TABLES_MACACCESS_DEST_IDX(port) |
+- ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+- ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
+- ANA_TABLES_MACACCESS);
++ ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
+
+ return ocelot_mact_wait_for_completion(ocelot);
+ }
+diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
+index b34da11acf65b..d60cd4326f4cd 100644
+--- a/drivers/net/ethernet/mscc/ocelot_net.c
++++ b/drivers/net/ethernet/mscc/ocelot_net.c
+@@ -952,10 +952,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret = 0;
+
+- if (!ocelot_netdevice_dev_check(dev))
+- return 0;
+-
+ if (event == NETDEV_PRECHANGEUPPER &&
++ ocelot_netdevice_dev_check(dev) &&
+ netif_is_lag_master(info->upper_dev)) {
+ struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
+ struct netlink_ext_ack *extack;
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index c633046329352..d5d236d687e9e 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -2606,10 +2606,10 @@ static int sh_eth_close(struct net_device *ndev)
+ /* Free all the skbuffs in the Rx queue and the DMA buffer. */
+ sh_eth_ring_free(ndev);
+
+- pm_runtime_put_sync(&mdp->pdev->dev);
+-
+ mdp->is_opened = 0;
+
++ pm_runtime_put(&mdp->pdev->dev);
++
+ return 0;
+ }
+
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index a89d74c5cd1a7..77f615568194d 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -542,50 +542,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+ return true;
+ }
+
+-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
++static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
+ {
+- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
+- dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
++ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
++ dma_addr_t dma_addr = iod->first_dma;
+ int i;
+
+- if (iod->dma_len) {
+- dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
+- rq_dma_dir(req));
+- return;
++ for (i = 0; i < iod->npages; i++) {
++ __le64 *prp_list = nvme_pci_iod_list(req)[i];
++ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
++
++ dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
++ dma_addr = next_dma_addr;
+ }
+
+- WARN_ON_ONCE(!iod->nents);
++}
+
+- if (is_pci_p2pdma_page(sg_page(iod->sg)))
+- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
+- rq_dma_dir(req));
+- else
+- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
++static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
++{
++ const int last_sg = SGES_PER_PAGE - 1;
++ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
++ dma_addr_t dma_addr = iod->first_dma;
++ int i;
+
++ for (i = 0; i < iod->npages; i++) {
++ struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
++ dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
+
+- if (iod->npages == 0)
+- dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+- dma_addr);
++ dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
++ dma_addr = next_dma_addr;
++ }
+
+- for (i = 0; i < iod->npages; i++) {
+- void *addr = nvme_pci_iod_list(req)[i];
++}
+
+- if (iod->use_sgl) {
+- struct nvme_sgl_desc *sg_list = addr;
++static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
++{
++ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+- next_dma_addr =
+- le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+- } else {
+- __le64 *prp_list = addr;
++ if (is_pci_p2pdma_page(sg_page(iod->sg)))
++ pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
++ rq_dma_dir(req));
++ else
++ dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
++}
+
+- next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+- }
++static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
++{
++ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+- dma_pool_free(dev->prp_page_pool, addr, dma_addr);
+- dma_addr = next_dma_addr;
++ if (iod->dma_len) {
++ dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
++ rq_dma_dir(req));
++ return;
+ }
+
++ WARN_ON_ONCE(!iod->nents);
++
++ nvme_unmap_sg(dev, req);
++ if (iod->npages == 0)
++ dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
++ iod->first_dma);
++ else if (iod->use_sgl)
++ nvme_free_sgls(dev, req);
++ else
++ nvme_free_prps(dev, req);
+ mempool_free(iod->sg, dev->iod_mempool);
+ }
+
+@@ -661,7 +682,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
+ __le64 *old_prp_list = prp_list;
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list)
+- return BLK_STS_RESOURCE;
++ goto free_prps;
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+@@ -681,14 +702,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+-
+ done:
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+-
+ return BLK_STS_OK;
+-
+- bad_sgl:
++free_prps:
++ nvme_free_prps(dev, req);
++ return BLK_STS_RESOURCE;
++bad_sgl:
+ WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
+ "Invalid SGL for payload:%d nents:%d\n",
+ blk_rq_payload_bytes(req), iod->nents);
+@@ -760,7 +781,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list)
+- return BLK_STS_RESOURCE;
++ goto free_sgls;
+
+ i = 0;
+ nvme_pci_iod_list(req)[iod->npages++] = sg_list;
+@@ -773,6 +794,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
+ } while (--entries > 0);
+
+ return BLK_STS_OK;
++free_sgls:
++ nvme_free_sgls(dev, req);
++ return BLK_STS_RESOURCE;
+ }
+
+ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
+@@ -841,7 +865,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
+ iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
+ if (!iod->nents)
+- goto out;
++ goto out_free_sg;
+
+ if (is_pci_p2pdma_page(sg_page(iod->sg)))
+ nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
+@@ -850,16 +874,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
+ rq_dma_dir(req), DMA_ATTR_NO_WARN);
+ if (!nr_mapped)
+- goto out;
++ goto out_free_sg;
+
+ iod->use_sgl = nvme_pci_use_sgls(dev, req);
+ if (iod->use_sgl)
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
+ else
+ ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
+-out:
+ if (ret != BLK_STS_OK)
+- nvme_unmap_data(dev, req);
++ goto out_unmap_sg;
++ return BLK_STS_OK;
++
++out_unmap_sg:
++ nvme_unmap_sg(dev, req);
++out_free_sg:
++ mempool_free(iod->sg, dev->iod_mempool);
+ return ret;
+ }
+
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+index 34803a6c76643..5c1a109842a76 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+@@ -347,7 +347,7 @@ FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
+
+ #define D22 40
+ SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
+-SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8));
++SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8));
+ PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8);
+ GROUP_DECL(PWM8G0, D22);
+
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+index 7e950f5d62d0f..7815426e7aeaa 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+@@ -926,6 +926,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
+ err = hw->soc->bias_set(hw, desc, pullup);
+ if (err)
+ return err;
++ } else if (hw->soc->bias_set_combo) {
++ err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
++ if (err)
++ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
+index 621909b01debd..033d142f0c272 100644
+--- a/drivers/pinctrl/pinctrl-ingenic.c
++++ b/drivers/pinctrl/pinctrl-ingenic.c
+@@ -2052,7 +2052,7 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
+ static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
+ u8 offset, int value)
+ {
+- if (jzgc->jzpc->info->version >= ID_JZ4760)
++ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
+@@ -2082,7 +2082,7 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
+ break;
+ }
+
+- if (jzgc->jzpc->info->version >= ID_JZ4760) {
++ if (jzgc->jzpc->info->version >= ID_JZ4770) {
+ reg1 = JZ4760_GPIO_PAT1;
+ reg2 = JZ4760_GPIO_PAT0;
+ } else {
+@@ -2122,7 +2122,7 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
+
+- if (jzgc->jzpc->info->version >= ID_JZ4760)
++ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
+@@ -2138,7 +2138,7 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
+
+ ingenic_gpio_irq_mask(irqd);
+
+- if (jzgc->jzpc->info->version >= ID_JZ4760)
++ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
+@@ -2163,7 +2163,7 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
+ irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
+ }
+
+- if (jzgc->jzpc->info->version >= ID_JZ4760)
++ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
+@@ -2220,7 +2220,7 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
+
+ chained_irq_enter(irq_chip, desc);
+
+- if (jzgc->jzpc->info->version >= ID_JZ4760)
++ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
+ else
+ flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
+@@ -2302,7 +2302,7 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+ struct ingenic_pinctrl *jzpc = jzgc->jzpc;
+ unsigned int pin = gc->base + offset;
+
+- if (jzpc->info->version >= ID_JZ4760) {
++ if (jzpc->info->version >= ID_JZ4770) {
+ if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
+ ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+ return GPIO_LINE_DIRECTION_IN;
+@@ -2360,7 +2360,7 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
+ ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_shadow_config_pin_load(jzpc, pin);
+- } else if (jzpc->info->version >= ID_JZ4760) {
++ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
+@@ -2368,7 +2368,7 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
+- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0);
++ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
+ }
+
+ return 0;
+@@ -2418,7 +2418,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
+ ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_shadow_config_pin_load(jzpc, pin);
+- } else if (jzpc->info->version >= ID_JZ4760) {
++ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+@@ -2448,7 +2448,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+ bool pull;
+
+- if (jzpc->info->version >= ID_JZ4760)
++ if (jzpc->info->version >= ID_JZ4770)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
+ else
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
+@@ -2498,7 +2498,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
+ REG_SET(X1830_GPIO_PEH), bias << idxh);
+ }
+
+- } else if (jzpc->info->version >= ID_JZ4760) {
++ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
+@@ -2508,7 +2508,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
+ static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, bool high)
+ {
+- if (jzpc->info->version >= ID_JZ4760)
++ if (jzpc->info->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+ else
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index 77a25bdf0da70..37526aa1fb2c4 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -51,6 +51,7 @@
+ * @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
+ * detection.
+ * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
++ * @disabled_for_mux: These IRQs were disabled because we muxed away.
+ * @soc: Reference to soc_data of platform specific data.
+ * @regs: Base addresses for the TLMM tiles.
+ * @phys_base: Physical base address
+@@ -72,6 +73,7 @@ struct msm_pinctrl {
+ DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
++ DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO);
+
+ const struct msm_pinctrl_soc_data *soc;
+ void __iomem *regs[MAX_NR_TILES];
+@@ -96,6 +98,14 @@ MSM_ACCESSOR(intr_cfg)
+ MSM_ACCESSOR(intr_status)
+ MSM_ACCESSOR(intr_target)
+
++static void msm_ack_intr_status(struct msm_pinctrl *pctrl,
++ const struct msm_pingroup *g)
++{
++ u32 val = g->intr_ack_high ? BIT(g->intr_status_bit) : 0;
++
++ msm_writel_intr_status(val, pctrl, g);
++}
++
+ static int msm_get_groups_count(struct pinctrl_dev *pctldev)
+ {
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+@@ -171,6 +181,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned group)
+ {
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
++ struct gpio_chip *gc = &pctrl->chip;
++ unsigned int irq = irq_find_mapping(gc->irq.domain, group);
++ struct irq_data *d = irq_get_irq_data(irq);
++ unsigned int gpio_func = pctrl->soc->gpio_func;
+ const struct msm_pingroup *g;
+ unsigned long flags;
+ u32 val, mask;
+@@ -187,6 +201,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ if (WARN_ON(i == g->nfuncs))
+ return -EINVAL;
+
++ /*
++ * If an GPIO interrupt is setup on this pin then we need special
++ * handling. Specifically interrupt detection logic will still see
++ * the pin twiddle even when we're muxed away.
++ *
++ * When we see a pin with an interrupt setup on it then we'll disable
++ * (mask) interrupts on it when we mux away until we mux back. Note
++ * that disable_irq() refcounts and interrupts are disabled as long as
++ * at least one disable_irq() has been called.
++ */
++ if (d && i != gpio_func &&
++ !test_and_set_bit(d->hwirq, pctrl->disabled_for_mux))
++ disable_irq(irq);
++
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ val = msm_readl_ctl(pctrl, g);
+@@ -196,6 +224,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
++ if (d && i == gpio_func &&
++ test_and_clear_bit(d->hwirq, pctrl->disabled_for_mux)) {
++ /*
++ * Clear interrupts detected while not GPIO since we only
++ * masked things.
++ */
++ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
++ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false);
++ else
++ msm_ack_intr_status(pctrl, g);
++
++ enable_irq(irq);
++ }
++
+ return 0;
+ }
+
+@@ -210,8 +252,7 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
+ if (!g->nfuncs)
+ return 0;
+
+- /* For now assume function 0 is GPIO because it always is */
+- return msm_pinmux_set_mux(pctldev, g->funcs[0], offset);
++ return msm_pinmux_set_mux(pctldev, g->funcs[pctrl->soc->gpio_func], offset);
+ }
+
+ static const struct pinmux_ops msm_pinmux_ops = {
+@@ -774,7 +815,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+-static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
++static void msm_gpio_irq_unmask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+@@ -792,17 +833,6 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+- if (status_clear) {
+- /*
+- * clear the interrupt status bit before unmask to avoid
+- * any erroneous interrupts that would have got latched
+- * when the interrupt is not in use.
+- */
+- val = msm_readl_intr_status(pctrl, g);
+- val &= ~BIT(g->intr_status_bit);
+- msm_writel_intr_status(val, pctrl, g);
+- }
+-
+ val = msm_readl_intr_cfg(pctrl, g);
+ val |= BIT(g->intr_raw_status_bit);
+ val |= BIT(g->intr_enable_bit);
+@@ -822,7 +852,7 @@ static void msm_gpio_irq_enable(struct irq_data *d)
+ irq_chip_enable_parent(d);
+
+ if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
+- msm_gpio_irq_clear_unmask(d, true);
++ msm_gpio_irq_unmask(d);
+ }
+
+ static void msm_gpio_irq_disable(struct irq_data *d)
+@@ -837,11 +867,6 @@ static void msm_gpio_irq_disable(struct irq_data *d)
+ msm_gpio_irq_mask(d);
+ }
+
+-static void msm_gpio_irq_unmask(struct irq_data *d)
+-{
+- msm_gpio_irq_clear_unmask(d, false);
+-}
+-
+ /**
+ * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
+ * @d: The irq dta.
+@@ -894,7 +919,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_pingroup *g;
+ unsigned long flags;
+- u32 val;
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
+ if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
+@@ -906,12 +930,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+- val = msm_readl_intr_status(pctrl, g);
+- if (g->intr_ack_high)
+- val |= BIT(g->intr_status_bit);
+- else
+- val &= ~BIT(g->intr_status_bit);
+- msm_writel_intr_status(val, pctrl, g);
++ msm_ack_intr_status(pctrl, g);
+
+ if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
+ msm_gpio_update_dual_edge_pos(pctrl, g, d);
+@@ -936,6 +955,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_pingroup *g;
+ unsigned long flags;
++ bool was_enabled;
+ u32 val;
+
+ if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
+@@ -997,6 +1017,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ * could cause the INTR_STATUS to be set for EDGE interrupts.
+ */
+ val = msm_readl_intr_cfg(pctrl, g);
++ was_enabled = val & BIT(g->intr_raw_status_bit);
+ val |= BIT(g->intr_raw_status_bit);
+ if (g->intr_detection_width == 2) {
+ val &= ~(3 << g->intr_detection_bit);
+@@ -1046,6 +1067,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ }
+ msm_writel_intr_cfg(val, pctrl, g);
+
++ /*
++ * The first time we set RAW_STATUS_EN it could trigger an interrupt.
++ * Clear the interrupt. This is safe because we have
++ * IRQCHIP_SET_TYPE_MASKED.
++ */
++ if (!was_enabled)
++ msm_ack_intr_status(pctrl, g);
++
+ if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
+ msm_gpio_update_dual_edge_pos(pctrl, g, d);
+
+@@ -1099,16 +1128,11 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
+ }
+
+ /*
+- * Clear the interrupt that may be pending before we enable
+- * the line.
+- * This is especially a problem with the GPIOs routed to the
+- * PDC. These GPIOs are direct-connect interrupts to the GIC.
+- * Disabling the interrupt line at the PDC does not prevent
+- * the interrupt from being latched at the GIC. The state at
+- * GIC needs to be cleared before enabling.
++ * The disable / clear-enable workaround we do in msm_pinmux_set_mux()
++ * only works if disable is not lazy since we only clear any bogus
++ * interrupt in hardware. Explicitly mark the interrupt as UNLAZY.
+ */
+- if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+- irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
++ irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
+
+ return 0;
+ out:
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
+index 333f99243c43a..e31a5167c91ec 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.h
++++ b/drivers/pinctrl/qcom/pinctrl-msm.h
+@@ -118,6 +118,7 @@ struct msm_gpio_wakeirq_map {
+ * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
+ * to be aware that their parent can't handle dual
+ * edge interrupts.
++ * @gpio_func: Which function number is GPIO (usually 0).
+ */
+ struct msm_pinctrl_soc_data {
+ const struct pinctrl_pin_desc *pins;
+@@ -134,6 +135,7 @@ struct msm_pinctrl_soc_data {
+ const struct msm_gpio_wakeirq_map *wakeirq_map;
+ unsigned int nwakeirq_map;
+ bool wakeirq_dual_edge_errata;
++ unsigned int gpio_func;
+ };
+
+ extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
+diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
+index ecd477964d117..18bf8aeb5f870 100644
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -247,7 +247,8 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
+ ret = bios_return->return_code;
+
+ if (ret) {
+- if (ret != HPWMI_RET_UNKNOWN_CMDTYPE)
++ if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
++ ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+ pr_warn("query 0x%x returned error 0x%x\n", query, ret);
+ goto out_free;
+ }
+diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
+index 6acc8457866e1..d3b5afbe4833e 100644
+--- a/drivers/platform/x86/i2c-multi-instantiate.c
++++ b/drivers/platform/x86/i2c-multi-instantiate.c
+@@ -166,13 +166,29 @@ static const struct i2c_inst_data bsg2150_data[] = {
+ {}
+ };
+
+-static const struct i2c_inst_data int3515_data[] = {
+- { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+- { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+- { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+- { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+- {}
+-};
++/*
++ * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt
++ * issues. The most common problem seen is interrupt flood.
++ *
++ * There are at least two known causes. Firstly, on some boards, the
++ * I2CSerialBus resource index does not match the Interrupt resource, i.e. they
++ * are not one-to-one mapped like in the array below. Secondly, on some boards
++ * the IRQ line from the PD controller is not actually connected at all. But the
++ * interrupt flood is also seen on some boards where those are not a problem, so
++ * there are some other problems as well.
++ *
++ * Because of the issues with the interrupt, the device is disabled for now. If
++ * you wish to debug the issues, uncomment the below, and add an entry for the
++ * INT3515 device to the i2c_multi_instance_ids table.
++ *
++ * static const struct i2c_inst_data int3515_data[] = {
++ * { "tps6598x", IRQ_RESOURCE_APIC, 0 },
++ * { "tps6598x", IRQ_RESOURCE_APIC, 1 },
++ * { "tps6598x", IRQ_RESOURCE_APIC, 2 },
++ * { "tps6598x", IRQ_RESOURCE_APIC, 3 },
++ * { }
++ * };
++ */
+
+ /*
+ * Note new device-ids must also be added to i2c_multi_instantiate_ids in
+@@ -181,7 +197,6 @@ static const struct i2c_inst_data int3515_data[] = {
+ static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
+ { "BSG1160", (unsigned long)bsg1160_data },
+ { "BSG2150", (unsigned long)bsg2150_data },
+- { "INT3515", (unsigned long)int3515_data },
+ { }
+ };
+ MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index 7598cd46cf606..5b81bafa5c165 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -92,6 +92,7 @@ struct ideapad_private {
+ struct dentry *debug;
+ unsigned long cfg;
+ bool has_hw_rfkill_switch;
++ bool has_touchpad_switch;
+ const char *fnesc_guid;
+ };
+
+@@ -535,7 +536,9 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
+ } else if (attr == &dev_attr_fn_lock.attr) {
+ supported = acpi_has_method(priv->adev->handle, "HALS") &&
+ acpi_has_method(priv->adev->handle, "SALS");
+- } else
++ } else if (attr == &dev_attr_touchpad.attr)
++ supported = priv->has_touchpad_switch;
++ else
+ supported = true;
+
+ return supported ? attr->mode : 0;
+@@ -867,6 +870,9 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
+ {
+ unsigned long value;
+
++ if (!priv->has_touchpad_switch)
++ return;
++
+ /* Without reading from EC touchpad LED doesn't switch state */
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
+ /* Some IdeaPads don't really turn off touchpad - they only
+@@ -989,6 +995,9 @@ static int ideapad_acpi_add(struct platform_device *pdev)
+ priv->platform_device = pdev;
+ priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
+
++ /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
++ priv->has_touchpad_switch = !acpi_dev_present("ELAN0634", NULL, -1);
++
+ ret = ideapad_sysfs_init(priv);
+ if (ret)
+ return ret;
+@@ -1006,6 +1015,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
+ if (!priv->has_hw_rfkill_switch)
+ write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
+
++ /* The same for Touchpad */
++ if (!priv->has_touchpad_switch)
++ write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1);
++
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+ if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
+ ideapad_register_rfkill(priv, i);
+diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
+index 3b49a1f4061bc..65fb3a3031470 100644
+--- a/drivers/platform/x86/intel-vbtn.c
++++ b/drivers/platform/x86/intel-vbtn.c
+@@ -204,12 +204,6 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
+ },
+ },
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+- },
+- },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 9ebeb031329d9..cc45cdac13844 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -8232,11 +8232,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+ goto out;
+ }
+
++ /* always store 64 bits regardless of addressing */
+ sense_ptr = (void *)cmd->frame + ioc->sense_off;
+- if (instance->consistent_mask_64bit)
+- put_unaligned_le64(sense_handle, sense_ptr);
+- else
+- put_unaligned_le32(sense_handle, sense_ptr);
++ put_unaligned_le64(sense_handle, sense_ptr);
+ }
+
+ /*
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index f5fc7f518f8af..47ad64b066236 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -2245,7 +2245,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
+ chap_name);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
++ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
+ chap_secret);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+@@ -2253,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
+ mchap_name);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
++ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
+ mchap_secret);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 24c0f7ec03511..4a08c450b756f 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -6740,7 +6740,7 @@ static int __init scsi_debug_init(void)
+ k = sdeb_zbc_model_str(sdeb_zbc_model_s);
+ if (k < 0) {
+ ret = k;
+- goto free_vm;
++ goto free_q_arr;
+ }
+ sdeb_zbc_model = k;
+ switch (sdeb_zbc_model) {
+@@ -6753,7 +6753,8 @@ static int __init scsi_debug_init(void)
+ break;
+ default:
+ pr_err("Invalid ZBC model\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto free_q_arr;
+ }
+ }
+ if (sdeb_zbc_model != BLK_ZONED_NONE) {
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 656bcf4940d6d..fedb89d4ac3f0 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -986,8 +986,10 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
+ }
+ }
+
+- if (sdp->no_write_same)
++ if (sdp->no_write_same) {
++ rq->rq_flags |= RQF_QUIET;
+ return BLK_STS_TARGET;
++ }
+
+ if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
+ return sd_setup_write_same16_cmnd(cmd, false);
+diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
+index dcdb4eb1f90ba..c339517b7a094 100644
+--- a/drivers/scsi/ufs/Kconfig
++++ b/drivers/scsi/ufs/Kconfig
+@@ -72,6 +72,7 @@ config SCSI_UFS_DWC_TC_PCI
+ config SCSI_UFSHCD_PLATFORM
+ tristate "Platform bus based UFS Controller support"
+ depends on SCSI_UFSHCD
++ depends on HAS_IOMEM
+ help
+ This selects the UFS host controller support. Select this if
+ you have an UFS controller on Platform bus.
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 7b9a9a771b11b..8132893284670 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -283,7 +283,8 @@ static inline void ufshcd_wb_config(struct ufs_hba *hba)
+ if (ret)
+ dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
+ __func__, ret);
+- ufshcd_wb_toggle_flush(hba, true);
++ if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
++ ufshcd_wb_toggle_flush(hba, true);
+ }
+
+ static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
+@@ -4912,7 +4913,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ break;
+ } /* end of switch */
+
+- if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
++ if ((host_byte(result) != DID_OK) &&
++ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
+ ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
+ return result;
+ }
+@@ -5353,9 +5355,6 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+
+ static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
+ {
+- if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
+- return;
+-
+ if (enable)
+ ufshcd_wb_buf_flush_enable(hba);
+ else
+@@ -6210,9 +6209,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ }
+
+- if (enabled_intr_status && retval == IRQ_NONE) {
+- dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+- __func__, intr_status);
++ if (enabled_intr_status && retval == IRQ_NONE &&
++ !ufshcd_eh_in_progress(hba)) {
++ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
++ __func__,
++ intr_status,
++ hba->ufs_stats.last_intr_status,
++ enabled_intr_status);
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
+ }
+
+@@ -6256,7 +6259,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+ */
+- req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
++ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
++ if (IS_ERR(req))
++ return PTR_ERR(req);
++
+ req->end_io_data = &wait;
+ free_slot = req->tag;
+ WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
+@@ -6569,19 +6575,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
+ {
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+- unsigned int tag;
+ u32 pos;
+ int err;
+- u8 resp = 0xF;
+- struct ufshcd_lrb *lrbp;
++ u8 resp = 0xF, lun;
+ unsigned long flags;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+- tag = cmd->request->tag;
+
+- lrbp = &hba->lrb[tag];
+- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
++ lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
++ err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err)
+ err = resp;
+@@ -6590,7 +6593,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
+
+ /* clear the commands that were pending for corresponding LUN */
+ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
+- if (hba->lrb[pos].lun == lrbp->lun) {
++ if (hba->lrb[pos].lun == lun) {
+ err = ufshcd_clear_cmd(hba, pos);
+ if (err)
+ break;
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 590e6d0722281..7d5814a95e1ed 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -562,8 +562,6 @@ tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
+
+ static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
+ {
+- if (tcmu_cmd->se_cmd)
+- tcmu_cmd->se_cmd->priv = NULL;
+ kfree(tcmu_cmd->dbi);
+ kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
+ }
+@@ -1188,11 +1186,12 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ mutex_lock(&udev->cmdr_lock);
+- se_cmd->priv = tcmu_cmd;
+ if (!(se_cmd->transport_state & CMD_T_ABORTED))
+ ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
+ if (ret < 0)
+ tcmu_free_cmd(tcmu_cmd);
++ else
++ se_cmd->priv = tcmu_cmd;
+ mutex_unlock(&udev->cmdr_lock);
+ return scsi_ret;
+ }
+@@ -1255,6 +1254,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
+
+ list_del_init(&cmd->queue_entry);
+ tcmu_free_cmd(cmd);
++ se_cmd->priv = NULL;
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
+ unqueued = true;
+ }
+@@ -1346,6 +1346,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
+ }
+
+ done:
++ se_cmd->priv = NULL;
+ if (read_len_valid) {
+ pr_debug("read_len = %d\n", read_len);
+ target_complete_cmd_with_length(cmd->se_cmd,
+@@ -1492,6 +1493,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
+ se_cmd = cmd->se_cmd;
+ tcmu_free_cmd(cmd);
+
++ se_cmd->priv = NULL;
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
+ }
+
+@@ -1606,6 +1608,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
+ * removed then LIO core will do the right thing and
+ * fail the retry.
+ */
++ tcmu_cmd->se_cmd->priv = NULL;
+ target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
+ tcmu_free_cmd(tcmu_cmd);
+ continue;
+@@ -1619,6 +1622,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
+ * Ignore scsi_ret for now. target_complete_cmd
+ * drops it.
+ */
++ tcmu_cmd->se_cmd->priv = NULL;
+ target_complete_cmd(tcmu_cmd->se_cmd,
+ SAM_STAT_CHECK_CONDITION);
+ tcmu_free_cmd(tcmu_cmd);
+@@ -2226,6 +2230,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
+ if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ WARN_ON(!cmd->se_cmd);
+ list_del_init(&cmd->queue_entry);
++ cmd->se_cmd->priv = NULL;
+ if (err_level == 1) {
+ /*
+ * Userspace was not able to start the
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 7e5e363152607..c2869489ba681 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -2079,9 +2079,6 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
+ return 0;
+ }
+
+-extern ssize_t redirected_tty_write(struct file *, const char __user *,
+- size_t, loff_t *);
+-
+ /**
+ * job_control - check job control
+ * @tty: tty
+@@ -2103,7 +2100,7 @@ static int job_control(struct tty_struct *tty, struct file *file)
+ /* NOTE: not yet done after every sleep pending a thorough
+ check of the logic of this change. -- jlc */
+ /* don't stop on /dev/console */
+- if (file->f_op->write == redirected_tty_write)
++ if (file->f_op->write_iter == redirected_tty_write)
+ return 0;
+
+ return __tty_check_change(tty, SIGTTIN);
+@@ -2307,7 +2304,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
+ ssize_t retval = 0;
+
+ /* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
+- if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
++ if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) {
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
+index 118b299122898..e0c00a1b07639 100644
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port)
+ (val & STAT_TX_RDY(port)), 1, 10000);
+ }
+
++static void wait_for_xmite(struct uart_port *port)
++{
++ u32 val;
++
++ readl_poll_timeout_atomic(port->membase + UART_STAT, val,
++ (val & STAT_TX_EMP), 1, 10000);
++}
++
+ static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
+ {
+ wait_for_xmitr(port);
+@@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
+
+ uart_console_write(port, s, count, mvebu_uart_console_putchar);
+
+- wait_for_xmitr(port);
++ wait_for_xmite(port);
+
+ if (ier)
+ writel(ier, port->membase + UART_CTRL(port));
+diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
+index 13eadcb8aec4e..214bf3086c68a 100644
+--- a/drivers/tty/serial/sifive.c
++++ b/drivers/tty/serial/sifive.c
+@@ -999,6 +999,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
+ /* Set up clock divider */
+ ssp->clkin_rate = clk_get_rate(ssp->clk);
+ ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
++ ssp->port.uartclk = ssp->baud_rate * 16;
+ __ssp_update_div(ssp);
+
+ platform_set_drvdata(pdev, ssp);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 56ade99ef99f4..2f8223b2ffa45 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -143,12 +143,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
+ DEFINE_MUTEX(tty_mutex);
+
+ static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
+-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
+-ssize_t redirected_tty_write(struct file *, const char __user *,
+- size_t, loff_t *);
++static ssize_t tty_write(struct kiocb *, struct iov_iter *);
+ static __poll_t tty_poll(struct file *, poll_table *);
+ static int tty_open(struct inode *, struct file *);
+-long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ #ifdef CONFIG_COMPAT
+ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+@@ -438,8 +435,7 @@ static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
+ return 0;
+ }
+
+-static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
+- size_t count, loff_t *ppos)
++static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
+ {
+ return -EIO;
+ }
+@@ -478,7 +474,8 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
+ static const struct file_operations tty_fops = {
+ .llseek = no_llseek,
+ .read = tty_read,
+- .write = tty_write,
++ .write_iter = tty_write,
++ .splice_write = iter_file_splice_write,
+ .poll = tty_poll,
+ .unlocked_ioctl = tty_ioctl,
+ .compat_ioctl = tty_compat_ioctl,
+@@ -491,7 +488,8 @@ static const struct file_operations tty_fops = {
+ static const struct file_operations console_fops = {
+ .llseek = no_llseek,
+ .read = tty_read,
+- .write = redirected_tty_write,
++ .write_iter = redirected_tty_write,
++ .splice_write = iter_file_splice_write,
+ .poll = tty_poll,
+ .unlocked_ioctl = tty_ioctl,
+ .compat_ioctl = tty_compat_ioctl,
+@@ -503,7 +501,7 @@ static const struct file_operations console_fops = {
+ static const struct file_operations hung_up_tty_fops = {
+ .llseek = no_llseek,
+ .read = hung_up_tty_read,
+- .write = hung_up_tty_write,
++ .write_iter = hung_up_tty_write,
+ .poll = hung_up_tty_poll,
+ .unlocked_ioctl = hung_up_tty_ioctl,
+ .compat_ioctl = hung_up_tty_compat_ioctl,
+@@ -607,9 +605,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
+ /* This breaks for file handles being sent over AF_UNIX sockets ? */
+ list_for_each_entry(priv, &tty->tty_files, list) {
+ filp = priv->file;
+- if (filp->f_op->write == redirected_tty_write)
++ if (filp->f_op->write_iter == redirected_tty_write)
+ cons_filp = filp;
+- if (filp->f_op->write != tty_write)
++ if (filp->f_op->write_iter != tty_write)
+ continue;
+ closecount++;
+ __tty_fasync(-1, filp, 0); /* can't block */
+@@ -902,9 +900,9 @@ static inline ssize_t do_tty_write(
+ ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
+ struct tty_struct *tty,
+ struct file *file,
+- const char __user *buf,
+- size_t count)
++ struct iov_iter *from)
+ {
++ size_t count = iov_iter_count(from);
+ ssize_t ret, written = 0;
+ unsigned int chunk;
+
+@@ -956,14 +954,20 @@ static inline ssize_t do_tty_write(
+ size_t size = count;
+ if (size > chunk)
+ size = chunk;
++
+ ret = -EFAULT;
+- if (copy_from_user(tty->write_buf, buf, size))
++ if (copy_from_iter(tty->write_buf, size, from) != size)
+ break;
++
+ ret = write(tty, file, tty->write_buf, size);
+ if (ret <= 0)
+ break;
++
++ /* FIXME! Have Al check this! */
++ if (ret != size)
++ iov_iter_revert(from, size-ret);
++
+ written += ret;
+- buf += ret;
+ count -= ret;
+ if (!count)
+ break;
+@@ -1023,9 +1027,9 @@ void tty_write_message(struct tty_struct *tty, char *msg)
+ * write method will not be invoked in parallel for each device.
+ */
+
+-static ssize_t tty_write(struct file *file, const char __user *buf,
+- size_t count, loff_t *ppos)
++static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
+ {
++ struct file *file = iocb->ki_filp;
+ struct tty_struct *tty = file_tty(file);
+ struct tty_ldisc *ld;
+ ssize_t ret;
+@@ -1039,17 +1043,16 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
+ tty_err(tty, "missing write_room method\n");
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+- return hung_up_tty_write(file, buf, count, ppos);
++ return hung_up_tty_write(iocb, from);
+ if (!ld->ops->write)
+ ret = -EIO;
+ else
+- ret = do_tty_write(ld->ops->write, tty, file, buf, count);
++ ret = do_tty_write(ld->ops->write, tty, file, from);
+ tty_ldisc_deref(ld);
+ return ret;
+ }
+
+-ssize_t redirected_tty_write(struct file *file, const char __user *buf,
+- size_t count, loff_t *ppos)
++ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
+ {
+ struct file *p = NULL;
+
+@@ -1060,11 +1063,11 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
+
+ if (p) {
+ ssize_t res;
+- res = vfs_write(p, buf, count, &p->f_pos);
++ res = vfs_iocb_iter_write(p, iocb, iter);
+ fput(p);
+ return res;
+ }
+- return tty_write(file, buf, count, ppos);
++ return tty_write(iocb, iter);
+ }
+
+ /**
+@@ -2293,7 +2296,7 @@ static int tioccons(struct file *file)
+ {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+- if (file->f_op->write == redirected_tty_write) {
++ if (file->f_op->write_iter == redirected_tty_write) {
+ struct file *f;
+ spin_lock(&redirect_lock);
+ f = redirect;
+diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
+index 54a2d70a9c730..7e728aab64755 100644
+--- a/drivers/usb/cdns3/cdns3-imx.c
++++ b/drivers/usb/cdns3/cdns3-imx.c
+@@ -184,7 +184,11 @@ static int cdns_imx_probe(struct platform_device *pdev)
+ }
+
+ data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
+- data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
++ data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
++ sizeof(imx_cdns3_core_clks), GFP_KERNEL);
++ if (!data->clks)
++ return -ENOMEM;
++
+ ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
+ if (ret)
+ return ret;
+@@ -214,20 +218,11 @@ err:
+ return ret;
+ }
+
+-static int cdns_imx_remove_core(struct device *dev, void *data)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+-
+- platform_device_unregister(pdev);
+-
+- return 0;
+-}
+-
+ static int cdns_imx_remove(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+
+- device_for_each_child(dev, NULL, cdns_imx_remove_core);
++ of_platform_depopulate(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+index 0bd6b20435b8a..02d8bfae58fb1 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
++++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
+ u32 state, reg, loops;
+
+ /* Stop DMA activity */
+- writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
++ if (ep->epn.desc_mode)
++ writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
++ else
++ writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+
+ /* Wait for it to complete */
+ for (loops = 0; loops < 1000; loops++) {
+diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
+index 3e88c7670b2ed..fb01ff47b64cf 100644
+--- a/drivers/usb/gadget/udc/bdc/Kconfig
++++ b/drivers/usb/gadget/udc/bdc/Kconfig
+@@ -17,7 +17,7 @@ if USB_BDC_UDC
+ comment "Platform Support"
+ config USB_BDC_PCI
+ tristate "BDC support for PCIe based platforms"
+- depends on USB_PCI
++ depends on USB_PCI && BROKEN
+ default USB_BDC_UDC
+ help
+ Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index debf54205d22e..da691a69fec10 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -1532,10 +1532,13 @@ static ssize_t soft_connect_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+ {
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
++ ssize_t ret;
+
++ mutex_lock(&udc_lock);
+ if (!udc->driver) {
+ dev_err(dev, "soft-connect without a gadget driver\n");
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto out;
+ }
+
+ if (sysfs_streq(buf, "connect")) {
+@@ -1546,10 +1549,14 @@ static ssize_t soft_connect_store(struct device *dev,
+ usb_gadget_udc_stop(udc);
+ } else {
+ dev_err(dev, "unsupported command '%s'\n", buf);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+
+- return n;
++ ret = n;
++out:
++ mutex_unlock(&udc_lock);
++ return ret;
+ }
+ static DEVICE_ATTR_WO(soft_connect);
+
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 016937579ed97..17704ee2d7f54 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -2266,17 +2266,20 @@ static int dummy_hub_control(
+ }
+ fallthrough;
+ case USB_PORT_FEAT_RESET:
++ if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
++ break;
+ /* if it's already enabled, disable */
+ if (hcd->speed == HCD_USB3) {
+- dum_hcd->port_status = 0;
+ dum_hcd->port_status =
+ (USB_SS_PORT_STAT_POWER |
+ USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_RESET);
+- } else
++ } else {
+ dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_LOW_SPEED
+ | USB_PORT_STAT_HIGH_SPEED);
++ dum_hcd->port_status |= USB_PORT_STAT_RESET;
++ }
+ /*
+ * We want to reset device status. All but the
+ * Self powered feature
+@@ -2288,7 +2291,8 @@ static int dummy_hub_control(
+ * interval? Is it still 50msec as for HS?
+ */
+ dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
+- fallthrough;
++ set_link_state(dum_hcd);
++ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_ENABLE:
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 3575b72018810..b5db2b2d0901a 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
+ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
+ u32 temp;
+ u32 hcc_params;
++ int rc;
+
+ hcd->uses_new_polling = 1;
+
+@@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
+ down_write(&ehci_cf_port_reset_rwsem);
+ ehci->rh_state = EHCI_RH_RUNNING;
+ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
++
++ /* Wait until HC become operational */
+ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
+ msleep(5);
++ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
++
+ up_write(&ehci_cf_port_reset_rwsem);
++
++ if (rc) {
++ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
++ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
++ return rc;
++ }
++
+ ehci->last_periodic_enable = ktime_get_real();
+
+ temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 087402aec5cbe..9f9ab5ccea889 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
+
+ unlink_empty_async_suspended(ehci);
+
++ /* Some Synopsys controllers mistakenly leave IAA turned on */
++ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
++
+ /* Any IAA cycle that started before the suspend is now invalid */
+ end_iaa_cycle(ehci);
+ ehci_handle_start_intr_unlinks(ehci);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 167dae117f738..db8612ec82d3e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2930,6 +2930,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ trb->field[0] = cpu_to_le32(field1);
+ trb->field[1] = cpu_to_le32(field2);
+ trb->field[2] = cpu_to_le32(field3);
++ /* make sure TRB is fully written before giving it to the controller */
++ wmb();
+ trb->field[3] = cpu_to_le32(field4);
+
+ trace_xhci_queue_trb(ring, trb);
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index 934be16863523..50bb91b6a4b8d 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
+ enable);
+ if (err < 0)
+ break;
++
++ /*
++ * wait 500us for LFPS detector to be disabled before
++ * sending ACK
++ */
++ if (!enable)
++ usleep_range(500, 1000);
+ }
+
+ if (err < 0) {
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 6038c4c35db5a..bbebe248b7264 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -2010,16 +2010,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
+ .irq_ack = ack_dynirq,
+ };
+
+-int xen_set_callback_via(uint64_t via)
+-{
+- struct xen_hvm_param a;
+- a.domid = DOMID_SELF;
+- a.index = HVM_PARAM_CALLBACK_IRQ;
+- a.value = via;
+- return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
+-}
+-EXPORT_SYMBOL_GPL(xen_set_callback_via);
+-
+ #ifdef CONFIG_XEN_PVHVM
+ /* Vector callbacks are better than PCI interrupts to receive event
+ * channel notifications because we can receive vector callbacks on any
+diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
+index dd911e1ff782c..9db557b76511b 100644
+--- a/drivers/xen/platform-pci.c
++++ b/drivers/xen/platform-pci.c
+@@ -149,7 +149,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
+ ret = gnttab_init();
+ if (ret)
+ goto grant_out;
+- xenbus_probe(NULL);
+ return 0;
+ grant_out:
+ gnttab_free_auto_xlat_frames();
+diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
+index 2a93b7c9c1599..dc15373354144 100644
+--- a/drivers/xen/xenbus/xenbus.h
++++ b/drivers/xen/xenbus/xenbus.h
+@@ -115,6 +115,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
+ const char *type,
+ const char *nodename);
+ int xenbus_probe_devices(struct xen_bus_type *bus);
++void xenbus_probe(void);
+
+ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
+
+diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
+index eb5151fc8efab..e5fda0256feb3 100644
+--- a/drivers/xen/xenbus/xenbus_comms.c
++++ b/drivers/xen/xenbus/xenbus_comms.c
+@@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex);
+ static int xenbus_irq;
+ static struct task_struct *xenbus_task;
+
+-static DECLARE_WORK(probe_work, xenbus_probe);
+-
+-
+ static irqreturn_t wake_waiting(int irq, void *unused)
+ {
+- if (unlikely(xenstored_ready == 0)) {
+- xenstored_ready = 1;
+- schedule_work(&probe_work);
+- }
+-
+ wake_up(&xb_waitq);
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 44634d970a5ca..c8f0282bb6497 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -683,29 +683,76 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
+ }
+ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
+
+-void xenbus_probe(struct work_struct *unused)
++void xenbus_probe(void)
+ {
+ xenstored_ready = 1;
+
++ /*
++ * In the HVM case, xenbus_init() deferred its call to
++ * xs_init() in case callbacks were not operational yet.
++ * So do it now.
++ */
++ if (xen_store_domain_type == XS_HVM)
++ xs_init();
++
+ /* Notify others that xenstore is up */
+ blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
+ }
+-EXPORT_SYMBOL_GPL(xenbus_probe);
+
+-static int __init xenbus_probe_initcall(void)
++/*
++ * Returns true when XenStore init must be deferred in order to
++ * allow the PCI platform device to be initialised, before we
++ * can actually have event channel interrupts working.
++ */
++static bool xs_hvm_defer_init_for_callback(void)
+ {
+- if (!xen_domain())
+- return -ENODEV;
++#ifdef CONFIG_XEN_PVHVM
++ return xen_store_domain_type == XS_HVM &&
++ !xen_have_vector_callback;
++#else
++ return false;
++#endif
++}
+
+- if (xen_initial_domain() || xen_hvm_domain())
+- return 0;
++static int __init xenbus_probe_initcall(void)
++{
++ /*
++ * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
++ * need to wait for the platform PCI device to come up.
++ */
++ if (xen_store_domain_type == XS_PV ||
++ (xen_store_domain_type == XS_HVM &&
++ !xs_hvm_defer_init_for_callback()))
++ xenbus_probe();
+
+- xenbus_probe(NULL);
+ return 0;
+ }
+-
+ device_initcall(xenbus_probe_initcall);
+
++int xen_set_callback_via(uint64_t via)
++{
++ struct xen_hvm_param a;
++ int ret;
++
++ a.domid = DOMID_SELF;
++ a.index = HVM_PARAM_CALLBACK_IRQ;
++ a.value = via;
++
++ ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
++ if (ret)
++ return ret;
++
++ /*
++ * If xenbus_probe_initcall() deferred the xenbus_probe()
++ * due to the callback not functioning yet, we can do it now.
++ */
++ if (!xenstored_ready && xs_hvm_defer_init_for_callback())
++ xenbus_probe();
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xen_set_callback_via);
++
+ /* Set up event channel for xenstored which is run as a local process
+ * (this is normally used only in dom0)
+ */
+@@ -818,11 +865,17 @@ static int __init xenbus_init(void)
+ break;
+ }
+
+- /* Initialize the interface to xenstore. */
+- err = xs_init();
+- if (err) {
+- pr_warn("Error initializing xenstore comms: %i\n", err);
+- goto out_error;
++ /*
++ * HVM domains may not have a functional callback yet. In that
++ * case let xs_init() be called from xenbus_probe(), which will
++ * get invoked at an appropriate time.
++ */
++ if (xen_store_domain_type != XS_HVM) {
++ err = xs_init();
++ if (err) {
++ pr_warn("Error initializing xenstore comms: %i\n", err);
++ goto out_error;
++ }
+ }
+
+ if ((xen_store_domain_type != XS_LOCAL) &&
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 771a036867dc0..553b4f6ec8639 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -3124,7 +3124,7 @@ void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
+ list_del_init(&lower->list);
+ if (lower == node)
+ node = NULL;
+- btrfs_backref_free_node(cache, lower);
++ btrfs_backref_drop_node(cache, lower);
+ }
+
+ btrfs_backref_cleanup_node(cache, node);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 3ba6f3839d392..cef2f080fdcd5 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2687,7 +2687,8 @@ again:
+ * Go through delayed refs for all the stuff we've just kicked off
+ * and then loop back (just once)
+ */
+- ret = btrfs_run_delayed_refs(trans, 0);
++ if (!ret)
++ ret = btrfs_run_delayed_refs(trans, 0);
+ if (!ret && loops == 0) {
+ loops++;
+ spin_lock(&cur_trans->dirty_bgs_lock);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index af97ddcc6b3e8..56f3b9acd2154 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1482,7 +1482,7 @@ void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
+ root = list_first_entry(&fs_info->allocated_roots,
+ struct btrfs_root, leak_list);
+ btrfs_err(fs_info, "leaked root %s refcount %d",
+- btrfs_root_name(root->root_key.objectid, buf),
++ btrfs_root_name(&root->root_key, buf),
+ refcount_read(&root->refs));
+ while (refcount_read(&root->refs) > 1)
+ btrfs_put_root(root);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 4209dbd6286e4..8fba1c219b190 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5571,7 +5571,15 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
+ goto out_free;
+ }
+
+- trans = btrfs_start_transaction(tree_root, 0);
++ /*
++ * Use join to avoid potential EINTR from transaction
++ * start. See wait_reserve_ticket and the whole
++ * reservation callchain.
++ */
++ if (for_reloc)
++ trans = btrfs_join_transaction(tree_root);
++ else
++ trans = btrfs_start_transaction(tree_root, 0);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ goto out_free;
+diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
+index 7695c4783d33b..c62771f3af8c6 100644
+--- a/fs/btrfs/print-tree.c
++++ b/fs/btrfs/print-tree.c
+@@ -26,22 +26,22 @@ static const struct root_name_map root_map[] = {
+ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" },
+ };
+
+-const char *btrfs_root_name(u64 objectid, char *buf)
++const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
+ {
+ int i;
+
+- if (objectid == BTRFS_TREE_RELOC_OBJECTID) {
++ if (key->objectid == BTRFS_TREE_RELOC_OBJECTID) {
+ snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN,
+- "TREE_RELOC offset=%llu", objectid);
++ "TREE_RELOC offset=%llu", key->offset);
+ return buf;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(root_map); i++) {
+- if (root_map[i].id == objectid)
++ if (root_map[i].id == key->objectid)
+ return root_map[i].name;
+ }
+
+- snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", objectid);
++ snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", key->objectid);
+ return buf;
+ }
+
+diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
+index 78b99385a503f..8c3e9319ec4ef 100644
+--- a/fs/btrfs/print-tree.h
++++ b/fs/btrfs/print-tree.h
+@@ -11,6 +11,6 @@
+
+ void btrfs_print_leaf(struct extent_buffer *l);
+ void btrfs_print_tree(struct extent_buffer *c, bool follow);
+-const char *btrfs_root_name(u64 objectid, char *buf);
++const char *btrfs_root_name(const struct btrfs_key *key, char *buf);
+
+ #endif
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 9e08ddb629685..9e5809118c34d 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -5512,6 +5512,21 @@ static int clone_range(struct send_ctx *sctx,
+ break;
+ offset += clone_len;
+ clone_root->offset += clone_len;
++
++ /*
++ * If we are cloning from the file we are currently processing,
++ * and using the send root as the clone root, we must stop once
++ * the current clone offset reaches the current eof of the file
++ * at the receiver, otherwise we would issue an invalid clone
++ * operation (source range going beyond eof) and cause the
++ * receiver to fail. So if we reach the current eof, bail out
++ * and fallback to a regular write.
++ */
++ if (clone_root->root == sctx->send_root &&
++ clone_root->ino == sctx->cur_ino &&
++ clone_root->offset >= sctx->cur_inode_next_write_offset)
++ break;
++
+ data_offset += clone_len;
+ next:
+ path->slots[0]++;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 78637665166e0..6311308b32beb 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4288,6 +4288,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
+ btrfs_warn(fs_info,
+ "balance: cannot set exclusive op status, resume manually");
+
++ btrfs_release_path(path);
++
+ mutex_lock(&fs_info->balance_mutex);
+ BUG_ON(fs_info->balance_ctl);
+ spin_lock(&fs_info->balance_lock);
+diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
+index 8bda092e60c5a..e027c718ca01a 100644
+--- a/fs/cachefiles/rdwr.c
++++ b/fs/cachefiles/rdwr.c
+@@ -413,7 +413,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
+
+ inode = d_backing_inode(object->backer);
+ ASSERT(S_ISREG(inode->i_mode));
+- ASSERT(inode->i_mapping->a_ops->readpages);
+
+ /* calculate the shift required to use bmap */
+ shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
+@@ -713,7 +712,6 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
+
+ inode = d_backing_inode(object->backer);
+ ASSERT(S_ISREG(inode->i_mode));
+- ASSERT(inode->i_mapping->a_ops->readpages);
+
+ /* calculate the shift required to use bmap */
+ shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 36b2ece434037..b1c2f416b9bd9 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -338,7 +338,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ if (ssocket == NULL)
+ return -EAGAIN;
+
+- if (signal_pending(current)) {
++ if (fatal_signal_pending(current)) {
+ cifs_dbg(FYI, "signal pending before send request\n");
+ return -ERESTARTSYS;
+ }
+@@ -429,7 +429,7 @@ unmask:
+
+ if (signal_pending(current) && (total_len != send_length)) {
+ cifs_dbg(FYI, "signal is pending after attempt to send\n");
+- rc = -EINTR;
++ rc = -ERESTARTSYS;
+ }
+
+ /* uncork it */
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index e6005c78bfa93..90dddb507e4af 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -1474,21 +1474,25 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
+ }
+
+ /*
+- * Some filesystems may redirty the inode during the writeback
+- * due to delalloc, clear dirty metadata flags right before
+- * write_inode()
++ * If the inode has dirty timestamps and we need to write them, call
++ * mark_inode_dirty_sync() to notify the filesystem about it and to
++ * change I_DIRTY_TIME into I_DIRTY_SYNC.
+ */
+- spin_lock(&inode->i_lock);
+-
+- dirty = inode->i_state & I_DIRTY;
+ if ((inode->i_state & I_DIRTY_TIME) &&
+- ((dirty & I_DIRTY_INODE) ||
+- wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
++ (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
+ time_after(jiffies, inode->dirtied_time_when +
+ dirtytime_expire_interval * HZ))) {
+- dirty |= I_DIRTY_TIME;
+ trace_writeback_lazytime(inode);
++ mark_inode_dirty_sync(inode);
+ }
++
++ /*
++ * Some filesystems may redirty the inode during the writeback
++ * due to delalloc, clear dirty metadata flags right before
++ * write_inode()
++ */
++ spin_lock(&inode->i_lock);
++ dirty = inode->i_state & I_DIRTY;
+ inode->i_state &= ~dirty;
+
+ /*
+@@ -1509,8 +1513,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
+
+ spin_unlock(&inode->i_lock);
+
+- if (dirty & I_DIRTY_TIME)
+- mark_inode_dirty_sync(inode);
+ /* Don't write the inode if only I_DIRTY_PAGES was set */
+ if (dirty & ~I_DIRTY_PAGES) {
+ int err = write_inode(inode, wbc);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 265aea2cd7bc8..8cb0db187d90f 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -353,6 +353,7 @@ struct io_ring_ctx {
+ unsigned cq_entries;
+ unsigned cq_mask;
+ atomic_t cq_timeouts;
++ unsigned cq_last_tm_flush;
+ unsigned long cq_check_overflow;
+ struct wait_queue_head cq_wait;
+ struct fasync_struct *cq_fasync;
+@@ -1521,19 +1522,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
+
+ static void io_flush_timeouts(struct io_ring_ctx *ctx)
+ {
+- while (!list_empty(&ctx->timeout_list)) {
++ u32 seq;
++
++ if (list_empty(&ctx->timeout_list))
++ return;
++
++ seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
++
++ do {
++ u32 events_needed, events_got;
+ struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
+ struct io_kiocb, timeout.list);
+
+ if (io_is_timeout_noseq(req))
+ break;
+- if (req->timeout.target_seq != ctx->cached_cq_tail
+- - atomic_read(&ctx->cq_timeouts))
++
++ /*
++ * Since seq can easily wrap around over time, subtract
++ * the last seq at which timeouts were flushed before comparing.
++ * Assuming not more than 2^31-1 events have happened since,
++ * these subtractions won't have wrapped, so we can check if
++ * target is in [last_seq, current_seq] by comparing the two.
++ */
++ events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
++ events_got = seq - ctx->cq_last_tm_flush;
++ if (events_got < events_needed)
+ break;
+
+ list_del_init(&req->timeout.list);
+ io_kill_timeout(req);
+- }
++ } while (!list_empty(&ctx->timeout_list));
++
++ ctx->cq_last_tm_flush = seq;
+ }
+
+ static void io_commit_cqring(struct io_ring_ctx *ctx)
+@@ -2147,6 +2167,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
+ struct io_uring_task *tctx = rb->task->io_uring;
+
+ percpu_counter_sub(&tctx->inflight, rb->task_refs);
++ if (atomic_read(&tctx->in_idle))
++ wake_up(&tctx->wait);
+ put_task_struct_many(rb->task, rb->task_refs);
+ rb->task = NULL;
+ }
+@@ -2166,6 +2188,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
+ struct io_uring_task *tctx = rb->task->io_uring;
+
+ percpu_counter_sub(&tctx->inflight, rb->task_refs);
++ if (atomic_read(&tctx->in_idle))
++ wake_up(&tctx->wait);
+ put_task_struct_many(rb->task, rb->task_refs);
+ }
+ rb->task = req->task;
+@@ -3437,7 +3461,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
+
+ /* read it all, or we did blocking attempt. no retry. */
+ if (!iov_iter_count(iter) || !force_nonblock ||
+- (req->file->f_flags & O_NONBLOCK))
++ (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
+ goto done;
+
+ io_size -= ret;
+@@ -4226,7 +4250,6 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ * io_wq_work.flags, so initialize io_wq_work firstly.
+ */
+ io_req_init_async(req);
+- req->work.flags |= IO_WQ_WORK_NO_CANCEL;
+
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
+@@ -4259,6 +4282,8 @@ static int io_close(struct io_kiocb *req, bool force_nonblock,
+
+ /* if the file has a flush method, be safe and punt to async */
+ if (close->put_file->f_op->flush && force_nonblock) {
++ /* not safe to cancel at this point */
++ req->work.flags |= IO_WQ_WORK_NO_CANCEL;
+ /* was never set, but play safe */
+ req->flags &= ~REQ_F_NOWAIT;
+ /* avoid grabbing files - we don't need the files */
+@@ -5582,6 +5607,12 @@ static int io_timeout(struct io_kiocb *req)
+ tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+ req->timeout.target_seq = tail + off;
+
++ /* Update the last seq here in case io_flush_timeouts() hasn't.
++ * This is safe because ->completion_lock is held, and submissions
++ * and completions are never mixed in the same ->completion_lock section.
++ */
++ ctx->cq_last_tm_flush = tail;
++
+ /*
+ * Insertion sort, ensuring the first entry in the list is always
+ * the one we need first.
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index f277d023ebcd1..c757193121475 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -14,6 +14,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/sched/mm.h>
+ #include <linux/fsnotify.h>
++#include <linux/uio.h>
+
+ #include "kernfs-internal.h"
+
+@@ -180,11 +181,10 @@ static const struct seq_operations kernfs_seq_ops = {
+ * it difficult to use seq_file. Implement simplistic custom buffering for
+ * bin files.
+ */
+-static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
+- char __user *user_buf, size_t count,
+- loff_t *ppos)
++static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ {
+- ssize_t len = min_t(size_t, count, PAGE_SIZE);
++ struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
++ ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
+ const struct kernfs_ops *ops;
+ char *buf;
+
+@@ -210,7 +210,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
+ of->event = atomic_read(&of->kn->attr.open->event);
+ ops = kernfs_ops(of->kn);
+ if (ops->read)
+- len = ops->read(of, buf, len, *ppos);
++ len = ops->read(of, buf, len, iocb->ki_pos);
+ else
+ len = -EINVAL;
+
+@@ -220,12 +220,12 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
+ if (len < 0)
+ goto out_free;
+
+- if (copy_to_user(user_buf, buf, len)) {
++ if (copy_to_iter(buf, len, iter) != len) {
+ len = -EFAULT;
+ goto out_free;
+ }
+
+- *ppos += len;
++ iocb->ki_pos += len;
+
+ out_free:
+ if (buf == of->prealloc_buf)
+@@ -235,31 +235,14 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
+ return len;
+ }
+
+-/**
+- * kernfs_fop_read - kernfs vfs read callback
+- * @file: file pointer
+- * @user_buf: data to write
+- * @count: number of bytes
+- * @ppos: starting offset
+- */
+-static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
+- size_t count, loff_t *ppos)
++static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ {
+- struct kernfs_open_file *of = kernfs_of(file);
+-
+- if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
+- return seq_read(file, user_buf, count, ppos);
+- else
+- return kernfs_file_direct_read(of, user_buf, count, ppos);
++ if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW)
++ return seq_read_iter(iocb, iter);
++ return kernfs_file_read_iter(iocb, iter);
+ }
+
+-/**
+- * kernfs_fop_write - kernfs vfs write callback
+- * @file: file pointer
+- * @user_buf: data to write
+- * @count: number of bytes
+- * @ppos: starting offset
+- *
++/*
+ * Copy data in from userland and pass it to the matching kernfs write
+ * operation.
+ *
+@@ -269,20 +252,18 @@ static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
+ * modify only the the value you're changing, then write entire buffer
+ * back.
+ */
+-static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
+- size_t count, loff_t *ppos)
++static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ {
+- struct kernfs_open_file *of = kernfs_of(file);
++ struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
++ ssize_t len = iov_iter_count(iter);
+ const struct kernfs_ops *ops;
+- ssize_t len;
+ char *buf;
+
+ if (of->atomic_write_len) {
+- len = count;
+ if (len > of->atomic_write_len)
+ return -E2BIG;
+ } else {
+- len = min_t(size_t, count, PAGE_SIZE);
++ len = min_t(size_t, len, PAGE_SIZE);
+ }
+
+ buf = of->prealloc_buf;
+@@ -293,7 +274,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
+ if (!buf)
+ return -ENOMEM;
+
+- if (copy_from_user(buf, user_buf, len)) {
++ if (copy_from_iter(buf, len, iter) != len) {
+ len = -EFAULT;
+ goto out_free;
+ }
+@@ -312,7 +293,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
+
+ ops = kernfs_ops(of->kn);
+ if (ops->write)
+- len = ops->write(of, buf, len, *ppos);
++ len = ops->write(of, buf, len, iocb->ki_pos);
+ else
+ len = -EINVAL;
+
+@@ -320,7 +301,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
+ mutex_unlock(&of->mutex);
+
+ if (len > 0)
+- *ppos += len;
++ iocb->ki_pos += len;
+
+ out_free:
+ if (buf == of->prealloc_buf)
+@@ -673,7 +654,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
+
+ /*
+ * Write path needs to atomic_write_len outside active reference.
+- * Cache it in open_file. See kernfs_fop_write() for details.
++ * Cache it in open_file. See kernfs_fop_write_iter() for details.
+ */
+ of->atomic_write_len = ops->atomic_write_len;
+
+@@ -960,14 +941,16 @@ void kernfs_notify(struct kernfs_node *kn)
+ EXPORT_SYMBOL_GPL(kernfs_notify);
+
+ const struct file_operations kernfs_file_fops = {
+- .read = kernfs_fop_read,
+- .write = kernfs_fop_write,
++ .read_iter = kernfs_fop_read_iter,
++ .write_iter = kernfs_fop_write_iter,
+ .llseek = generic_file_llseek,
+ .mmap = kernfs_fop_mmap,
+ .open = kernfs_fop_open,
+ .release = kernfs_fop_release,
+ .poll = kernfs_fop_poll,
+ .fsync = noop_fsync,
++ .splice_read = generic_file_splice_read,
++ .splice_write = iter_file_splice_write,
+ };
+
+ /**
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 833a2c64dfe80..5f5169b9c2e90 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -4632,6 +4632,7 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
+ resp->rqstp->rq_vec, read->rd_vlen, maxcount, eof);
+ if (nfserr)
+ return nfserr;
++ xdr_truncate_encode(xdr, starting_len + 16 + xdr_align_size(*maxcount));
+
+ tmp = htonl(NFS4_CONTENT_DATA);
+ write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
+@@ -4639,6 +4640,10 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
+ write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp64, 8);
+ tmp = htonl(*maxcount);
+ write_bytes_to_xdr_buf(xdr->buf, starting_len + 12, &tmp, 4);
++
++ tmp = xdr_zero;
++ write_bytes_to_xdr_buf(xdr->buf, starting_len + 16 + *maxcount, &tmp,
++ xdr_pad_size(*maxcount));
+ return nfs_ok;
+ }
+
+@@ -4731,14 +4736,15 @@ out:
+ if (nfserr && segments == 0)
+ xdr_truncate_encode(xdr, starting_len);
+ else {
+- tmp = htonl(eof);
+- write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
+- tmp = htonl(segments);
+- write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
+ if (nfserr) {
+ xdr_truncate_encode(xdr, last_segment);
+ nfserr = nfs_ok;
++ eof = 0;
+ }
++ tmp = htonl(eof);
++ write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
++ tmp = htonl(segments);
++ write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
+ }
+
+ return nfserr;
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 0ac197658a2d6..412b3b618994c 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -1206,6 +1206,7 @@ const struct file_operations pipefifo_fops = {
+ .unlocked_ioctl = pipe_ioctl,
+ .release = pipe_release,
+ .fasync = pipe_fasync,
++ .splice_write = iter_file_splice_write,
+ };
+
+ /*
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 317899222d7fd..d2018f70d1fae 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -1770,6 +1770,12 @@ static int process_sysctl_arg(char *param, char *val,
+ return 0;
+ }
+
++ if (!val)
++ return -EINVAL;
++ len = strlen(val);
++ if (len == 0)
++ return -EINVAL;
++
+ /*
+ * To set sysctl options, we use a temporary mount of proc, look up the
+ * respective sys/ file and write to it. To avoid mounting it when no
+@@ -1811,7 +1817,6 @@ static int process_sysctl_arg(char *param, char *val,
+ file, param, val);
+ goto out;
+ }
+- len = strlen(val);
+ wret = kernel_write(file, val, len, &pos);
+ if (wret < 0) {
+ err = wret;
+diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
+index dd90c9792909d..0e7316a86240b 100644
+--- a/include/asm-generic/bitops/atomic.h
++++ b/include/asm-generic/bitops/atomic.h
+@@ -11,19 +11,19 @@
+ * See Documentation/atomic_bitops.txt for details.
+ */
+
+-static inline void set_bit(unsigned int nr, volatile unsigned long *p)
++static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
+ {
+ p += BIT_WORD(nr);
+ atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+ }
+
+-static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
++static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+ {
+ p += BIT_WORD(nr);
+ atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+ }
+
+-static inline void change_bit(unsigned int nr, volatile unsigned long *p)
++static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
+ {
+ p += BIT_WORD(nr);
+ atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 5ed101be7b2e7..2b39de35525a9 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -615,6 +615,18 @@ static inline const char *dev_name(const struct device *dev)
+ return kobject_name(&dev->kobj);
+ }
+
++/**
++ * dev_bus_name - Return a device's bus/class name, if at all possible
++ * @dev: struct device to get the bus/class name of
++ *
++ * Will return the name of the bus/class the device is attached to. If it is
++ * not attached to a bus/class, an empty string will be returned.
++ */
++static inline const char *dev_bus_name(const struct device *dev)
++{
++ return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
++}
++
+ __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
+
+ #ifdef CONFIG_NUMA
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index eb33d948788cc..bc8caac390fce 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -422,6 +422,7 @@ extern void tty_kclose(struct tty_struct *tty);
+ extern int tty_dev_name_to_number(const char *name, dev_t *number);
+ extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
+ extern void tty_ldisc_unlock(struct tty_struct *tty);
++extern ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
+ #else
+ static inline void tty_kref_put(struct tty_struct *tty)
+ { }
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 7338b3865a2a3..111d7771b2081 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -76,6 +76,8 @@ struct inet_connection_sock_af_ops {
+ * @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options)
+ * @icsk_ack: Delayed ACK control data
+ * @icsk_mtup; MTU probing control data
++ * @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack)
++ * @icsk_user_timeout: TCP_USER_TIMEOUT value
+ */
+ struct inet_connection_sock {
+ /* inet_sock has to be the first member! */
+@@ -129,6 +131,7 @@ struct inet_connection_sock {
+
+ u32 probe_timestamp;
+ } icsk_mtup;
++ u32 icsk_probes_tstamp;
+ u32 icsk_user_timeout;
+
+ u64 icsk_ca_priv[104 / sizeof(u64)];
+diff --git a/include/net/sock.h b/include/net/sock.h
+index a5c6ae78df77d..253202dcc5e61 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1903,10 +1903,13 @@ static inline void sk_set_txhash(struct sock *sk)
+ sk->sk_txhash = net_tx_rndhash();
+ }
+
+-static inline void sk_rethink_txhash(struct sock *sk)
++static inline bool sk_rethink_txhash(struct sock *sk)
+ {
+- if (sk->sk_txhash)
++ if (sk->sk_txhash) {
+ sk_set_txhash(sk);
++ return true;
++ }
++ return false;
+ }
+
+ static inline struct dst_entry *
+@@ -1929,12 +1932,10 @@ sk_dst_get(struct sock *sk)
+ return dst;
+ }
+
+-static inline void dst_negative_advice(struct sock *sk)
++static inline void __dst_negative_advice(struct sock *sk)
+ {
+ struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+
+- sk_rethink_txhash(sk);
+-
+ if (dst && dst->ops->negative_advice) {
+ ndst = dst->ops->negative_advice(dst);
+
+@@ -1946,6 +1947,12 @@ static inline void dst_negative_advice(struct sock *sk)
+ }
+ }
+
++static inline void dst_negative_advice(struct sock *sk)
++{
++ sk_rethink_txhash(sk);
++ __dst_negative_advice(sk);
++}
++
+ static inline void
+ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ {
+diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
+index 00c7235ae93e7..2c43b0ef1e4d5 100644
+--- a/include/xen/xenbus.h
++++ b/include/xen/xenbus.h
+@@ -192,7 +192,7 @@ void xs_suspend_cancel(void);
+
+ struct work_struct;
+
+-void xenbus_probe(struct work_struct *);
++void xenbus_probe(void);
+
+ #define XENBUS_IS_ERR_READ(str) ({ \
+ if (!IS_ERR(str) && strlen(str) == 0) { \
+diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
+index 6edff97ad594b..dbc1dbdd2cbf0 100644
+--- a/kernel/bpf/bpf_inode_storage.c
++++ b/kernel/bpf/bpf_inode_storage.c
+@@ -176,7 +176,7 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
+ * bpf_local_storage_update expects the owner to have a
+ * valid storage pointer.
+ */
+- if (!inode_storage_ptr(inode))
++ if (!inode || !inode_storage_ptr(inode))
+ return (unsigned long)NULL;
+
+ sdata = inode_storage_lookup(inode, map, true);
+@@ -200,6 +200,9 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
+ BPF_CALL_2(bpf_inode_storage_delete,
+ struct bpf_map *, map, struct inode *, inode)
+ {
++ if (!inode)
++ return -EINVAL;
++
+ /* This helper must only called from where the inode is gurranteed
+ * to have a refcount and cannot be freed.
+ */
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 8f50c9c19f1b0..9433ab9995cd7 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -2717,7 +2717,6 @@ out_unlock:
+ out_put_prog:
+ if (tgt_prog_fd && tgt_prog)
+ bpf_prog_put(tgt_prog);
+- bpf_prog_put(prog);
+ return err;
+ }
+
+@@ -2830,7 +2829,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
+ tp_name = prog->aux->attach_func_name;
+ break;
+ }
+- return bpf_tracing_prog_attach(prog, 0, 0);
++ err = bpf_tracing_prog_attach(prog, 0, 0);
++ if (err >= 0)
++ return err;
++ goto out_put_prog;
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
+ if (strncpy_from_user(buf,
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index c1418b47f625a..02bc5b8f1eb27 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -79,7 +79,7 @@ module_param(lock_stat, int, 0644);
+ DEFINE_PER_CPU(unsigned int, lockdep_recursion);
+ EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
+
+-static inline bool lockdep_enabled(void)
++static __always_inline bool lockdep_enabled(void)
+ {
+ if (!debug_locks)
+ return false;
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index bc1e3b5a97bdd..801f8bc52b34f 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3376,7 +3376,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ while (prb_read_valid_info(prb, seq, &info, &line_count)) {
+ if (r.info->seq >= dumper->next_seq)
+ break;
+- l += get_record_print_text_size(&info, line_count, true, time);
++ l += get_record_print_text_size(&info, line_count, syslog, time);
+ seq = r.info->seq + 1;
+ }
+
+@@ -3386,7 +3386,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ &info, &line_count)) {
+ if (r.info->seq >= dumper->next_seq)
+ break;
+- l -= get_record_print_text_size(&info, line_count, true, time);
++ l -= get_record_print_text_size(&info, line_count, syslog, time);
+ seq = r.info->seq + 1;
+ }
+
+diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
+index 74e25a1704f2b..617dd63589650 100644
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -1720,7 +1720,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
+
+ /* Caller interested in the line count? */
+ if (line_count)
+- *line_count = count_lines(data, data_size);
++ *line_count = count_lines(data, len);
+
+ /* Caller interested in the data content? */
+ if (!buf || !buf_size)
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index 1635111c5bd2a..a21e6a5792c5a 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -1658,7 +1658,7 @@ static int copy_compat_iovec_from_user(struct iovec *iov,
+ (const struct compat_iovec __user *)uvec;
+ int ret = -EFAULT, i;
+
+- if (!user_access_begin(uvec, nr_segs * sizeof(*uvec)))
++ if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
+ return -EFAULT;
+
+ for (i = 0; i < nr_segs; i++) {
+diff --git a/mm/kasan/init.c b/mm/kasan/init.c
+index fe6be0be1f763..b8c6ec172bb22 100644
+--- a/mm/kasan/init.c
++++ b/mm/kasan/init.c
+@@ -377,9 +377,10 @@ static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
+
+ if (kasan_pte_table(*pmd)) {
+ if (IS_ALIGNED(addr, PMD_SIZE) &&
+- IS_ALIGNED(next, PMD_SIZE))
++ IS_ALIGNED(next, PMD_SIZE)) {
+ pmd_clear(pmd);
+- continue;
++ continue;
++ }
+ }
+ pte = pte_offset_kernel(pmd, addr);
+ kasan_remove_pte_table(pte, addr, next);
+@@ -402,9 +403,10 @@ static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
+
+ if (kasan_pmd_table(*pud)) {
+ if (IS_ALIGNED(addr, PUD_SIZE) &&
+- IS_ALIGNED(next, PUD_SIZE))
++ IS_ALIGNED(next, PUD_SIZE)) {
+ pud_clear(pud);
+- continue;
++ continue;
++ }
+ }
+ pmd = pmd_offset(pud, addr);
+ pmd_base = pmd_offset(pud, 0);
+@@ -428,9 +430,10 @@ static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
+
+ if (kasan_pud_table(*p4d)) {
+ if (IS_ALIGNED(addr, P4D_SIZE) &&
+- IS_ALIGNED(next, P4D_SIZE))
++ IS_ALIGNED(next, P4D_SIZE)) {
+ p4d_clear(p4d);
+- continue;
++ continue;
++ }
+ }
+ pud = pud_offset(p4d, addr);
+ kasan_remove_pud_table(pud, addr, next);
+@@ -462,9 +465,10 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
+
+ if (kasan_p4d_table(*pgd)) {
+ if (IS_ALIGNED(addr, PGDIR_SIZE) &&
+- IS_ALIGNED(next, PGDIR_SIZE))
++ IS_ALIGNED(next, PGDIR_SIZE)) {
+ pgd_clear(pgd);
+- continue;
++ continue;
++ }
+ }
+
+ p4d = p4d_offset(pgd, addr);
+@@ -488,7 +492,6 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
+
+ ret = kasan_populate_early_shadow(shadow_start, shadow_end);
+ if (ret)
+- kasan_remove_zero_shadow(shadow_start,
+- size >> KASAN_SHADOW_SCALE_SHIFT);
++ kasan_remove_zero_shadow(start, size);
+ return ret;
+ }
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index a717728cc7b4a..8fc23d53f5500 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3083,9 +3083,7 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ page_counter_uncharge(&memcg->kmem, nr_pages);
+
+- page_counter_uncharge(&memcg->memory, nr_pages);
+- if (do_memsw_account())
+- page_counter_uncharge(&memcg->memsw, nr_pages);
++ refill_stock(memcg, nr_pages);
+ }
+
+ /**
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 8ea0c65f10756..9d7ca1bd7f4b3 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -406,6 +406,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ struct zone *oldzone, *newzone;
+ int dirty;
+ int expected_count = expected_page_refs(mapping, page) + extra_count;
++ int nr = thp_nr_pages(page);
+
+ if (!mapping) {
+ /* Anonymous page without mapping */
+@@ -441,7 +442,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ */
+ newpage->index = page->index;
+ newpage->mapping = page->mapping;
+- page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
++ page_ref_add(newpage, nr); /* add cache reference */
+ if (PageSwapBacked(page)) {
+ __SetPageSwapBacked(newpage);
+ if (PageSwapCache(page)) {
+@@ -463,7 +464,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ if (PageTransHuge(page)) {
+ int i;
+
+- for (i = 1; i < HPAGE_PMD_NR; i++) {
++ for (i = 1; i < nr; i++) {
+ xas_next(&xas);
+ xas_store(&xas, newpage);
+ }
+@@ -474,7 +475,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ * to one less reference.
+ * We know this isn't the last reference.
+ */
+- page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
++ page_ref_unfreeze(page, expected_count - nr);
+
+ xas_unlock(&xas);
+ /* Leave irq disabled to prevent preemption while updating stats */
+@@ -497,17 +498,17 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
+ new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
+
+- __dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
+- __inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
++ __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
++ __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
+ if (PageSwapBacked(page) && !PageSwapCache(page)) {
+- __dec_lruvec_state(old_lruvec, NR_SHMEM);
+- __inc_lruvec_state(new_lruvec, NR_SHMEM);
++ __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
++ __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
+ }
+ if (dirty && mapping_can_writeback(mapping)) {
+- __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
+- __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
+- __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
+- __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
++ __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
++ __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
++ __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
++ __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
+ }
+ }
+ local_irq_enable();
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index c1c30a9f76f34..8b796c499cbb2 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -272,7 +272,8 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+ kattr->test.repeat)
+ return -EINVAL;
+
+- if (ctx_size_in < prog->aux->max_ctx_offset)
++ if (ctx_size_in < prog->aux->max_ctx_offset ||
++ ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
+ return -EINVAL;
+
+ if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 38412e70f7618..81e5d482c238e 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -9602,6 +9602,11 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
+ }
+ }
+
++ if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
++ netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
++ features &= ~NETIF_F_HW_TLS_RX;
++ }
++
+ return features;
+ }
+
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+index 8c5ddffd707de..5d397838bceb6 100644
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -4134,7 +4134,7 @@ out:
+ static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+ {
+- struct devlink_port *devlink_port = info->user_ptr[0];
++ struct devlink_port *devlink_port = info->user_ptr[1];
+ struct devlink_param_item *param_item;
+ struct sk_buff *msg;
+ int err;
+@@ -4163,7 +4163,7 @@ static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
+ static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+ {
+- struct devlink_port *devlink_port = info->user_ptr[0];
++ struct devlink_port *devlink_port = info->user_ptr[1];
+
+ return __devlink_nl_cmd_param_set_doit(devlink_port->devlink,
+ devlink_port->index,
+diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
+index 80dbf2f4016e2..8e582e29a41e3 100644
+--- a/net/core/gen_estimator.c
++++ b/net/core/gen_estimator.c
+@@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t)
+ u64 rate, brate;
+
+ est_fetch_counters(est, &b);
+- brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
+- brate -= (est->avbps >> est->ewma_log);
++ brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
++ brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
+
+- rate = (b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
+- rate -= (est->avpps >> est->ewma_log);
++ rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
++ rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
+
+ write_seqcount_begin(&est->seq);
+ est->avbps += brate;
+@@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ if (parm->interval < -2 || parm->interval > 3)
+ return -EINVAL;
+
++ if (parm->ewma_log == 0 || parm->ewma_log >= 31)
++ return -EINVAL;
++
+ est = kzalloc(sizeof(*est), GFP_KERNEL);
+ if (!est)
+ return -ENOBUFS;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index f0d6dba37b43d..7ab56796bd3a9 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -432,7 +432,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
+
+ len += NET_SKB_PAD;
+
+- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
++ /* If requested length is either too small or too big,
++ * we use kmalloc() for skb->head allocation.
++ */
++ if (len <= SKB_WITH_OVERHEAD(1024) ||
++ len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
+ (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
+ skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
+ if (!skb)
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index f60869acbef02..48d2b615edc26 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -851,6 +851,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
+ newicsk->icsk_retransmits = 0;
+ newicsk->icsk_backoff = 0;
+ newicsk->icsk_probes_out = 0;
++ newicsk->icsk_probes_tstamp = 0;
+
+ /* Deinitialize accept_queue to trap illegal accesses. */
+ memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
+diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
+index cc23f1ce239c2..8cd3224d913e0 100644
+--- a/net/ipv4/netfilter/ipt_rpfilter.c
++++ b/net/ipv4/netfilter/ipt_rpfilter.c
+@@ -76,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ flow.daddr = iph->saddr;
+ flow.saddr = rpfilter_get_saddr(iph->daddr);
+ flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+- flow.flowi4_tos = RT_TOS(iph->tos);
++ flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
+ flow.flowi4_scope = RT_SCOPE_UNIVERSE;
+ flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index b2bc3d7fe9e80..41d03683b13d6 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2685,6 +2685,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+
+ icsk->icsk_backoff = 0;
+ icsk->icsk_probes_out = 0;
++ icsk->icsk_probes_tstamp = 0;
+ icsk->icsk_rto = TCP_TIMEOUT_INIT;
+ icsk->icsk_rto_min = TCP_RTO_MIN;
+ icsk->icsk_delack_max = TCP_DELACK_MAX;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index ef4bdb038a4bb..6bf066f924c15 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3370,6 +3370,7 @@ static void tcp_ack_probe(struct sock *sk)
+ return;
+ if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
+ icsk->icsk_backoff = 0;
++ icsk->icsk_probes_tstamp = 0;
+ inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
+ /* Socket must be waked up by subsequent tcp_data_snd_check().
+ * This function is not for random using!
+@@ -4379,10 +4380,9 @@ static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
+ * The receiver remembers and reflects via DSACKs. Leverage the
+ * DSACK state and change the txhash to re-route speculatively.
+ */
+- if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq) {
+- sk_rethink_txhash(sk);
++ if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq &&
++ sk_rethink_txhash(sk))
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
+- }
+ }
+
+ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 595dcc3afac5c..ab8ed0fc47697 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1590,6 +1590,8 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
+ tcp_move_syn(newtp, req);
+ ireq->ireq_opt = NULL;
+ } else {
++ newinet->inet_opt = NULL;
++
+ if (!req_unhash && found_dup_sk) {
+ /* This code path should only be executed in the
+ * syncookie case only
+@@ -1597,8 +1599,6 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
+ bh_unlock_sock(newsk);
+ sock_put(newsk);
+ newsk = NULL;
+- } else {
+- newinet->inet_opt = NULL;
+ }
+ }
+ return newsk;
+@@ -1755,6 +1755,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
+ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+ {
+ u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
++ u32 tail_gso_size, tail_gso_segs;
+ struct skb_shared_info *shinfo;
+ const struct tcphdr *th;
+ struct tcphdr *thtail;
+@@ -1762,6 +1763,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+ unsigned int hdrlen;
+ bool fragstolen;
+ u32 gso_segs;
++ u32 gso_size;
+ int delta;
+
+ /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
+@@ -1787,13 +1789,6 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+ */
+ th = (const struct tcphdr *)skb->data;
+ hdrlen = th->doff * 4;
+- shinfo = skb_shinfo(skb);
+-
+- if (!shinfo->gso_size)
+- shinfo->gso_size = skb->len - hdrlen;
+-
+- if (!shinfo->gso_segs)
+- shinfo->gso_segs = 1;
+
+ tail = sk->sk_backlog.tail;
+ if (!tail)
+@@ -1816,6 +1811,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+ goto no_coalesce;
+
+ __skb_pull(skb, hdrlen);
++
++ shinfo = skb_shinfo(skb);
++ gso_size = shinfo->gso_size ?: skb->len;
++ gso_segs = shinfo->gso_segs ?: 1;
++
++ shinfo = skb_shinfo(tail);
++ tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
++ tail_gso_segs = shinfo->gso_segs ?: 1;
++
+ if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
+ TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
+
+@@ -1842,11 +1846,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+ }
+
+ /* Not as strict as GRO. We only need to carry mss max value */
+- skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
+- skb_shinfo(tail)->gso_size);
+-
+- gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
+- skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
++ shinfo->gso_size = max(gso_size, tail_gso_size);
++ shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
+
+ sk->sk_backlog.len += delta;
+ __NET_INC_STATS(sock_net(sk),
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 99011768c2640..e58e2589d7f98 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -4080,6 +4080,7 @@ void tcp_send_probe0(struct sock *sk)
+ /* Cancel probe timer, if it is not required. */
+ icsk->icsk_probes_out = 0;
+ icsk->icsk_backoff = 0;
++ icsk->icsk_probes_tstamp = 0;
+ return;
+ }
+
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 6c62b9ea1320d..faa92948441ba 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -219,14 +219,8 @@ static int tcp_write_timeout(struct sock *sk)
+ int retry_until;
+
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+- if (icsk->icsk_retransmits) {
+- dst_negative_advice(sk);
+- } else {
+- sk_rethink_txhash(sk);
+- tp->timeout_rehash++;
+- __NET_INC_STATS(sock_net(sk),
+- LINUX_MIB_TCPTIMEOUTREHASH);
+- }
++ if (icsk->icsk_retransmits)
++ __dst_negative_advice(sk);
+ retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
+ expired = icsk->icsk_retransmits >= retry_until;
+ } else {
+@@ -234,12 +228,7 @@ static int tcp_write_timeout(struct sock *sk)
+ /* Black hole detection */
+ tcp_mtu_probing(icsk, sk);
+
+- dst_negative_advice(sk);
+- } else {
+- sk_rethink_txhash(sk);
+- tp->timeout_rehash++;
+- __NET_INC_STATS(sock_net(sk),
+- LINUX_MIB_TCPTIMEOUTREHASH);
++ __dst_negative_advice(sk);
+ }
+
+ retry_until = net->ipv4.sysctl_tcp_retries2;
+@@ -270,6 +259,11 @@ static int tcp_write_timeout(struct sock *sk)
+ return 1;
+ }
+
++ if (sk_rethink_txhash(sk)) {
++ tp->timeout_rehash++;
++ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
++ }
++
+ return 0;
+ }
+
+@@ -349,6 +343,7 @@ static void tcp_probe_timer(struct sock *sk)
+
+ if (tp->packets_out || !skb) {
+ icsk->icsk_probes_out = 0;
++ icsk->icsk_probes_tstamp = 0;
+ return;
+ }
+
+@@ -360,13 +355,12 @@ static void tcp_probe_timer(struct sock *sk)
+ * corresponding system limit. We also implement similar policy when
+ * we use RTO to probe window in tcp_retransmit_timer().
+ */
+- if (icsk->icsk_user_timeout) {
+- u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
+- tcp_probe0_base(sk));
+-
+- if (elapsed >= icsk->icsk_user_timeout)
+- goto abort;
+- }
++ if (!icsk->icsk_probes_tstamp)
++ icsk->icsk_probes_tstamp = tcp_jiffies32;
++ else if (icsk->icsk_user_timeout &&
++ (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
++ msecs_to_jiffies(icsk->icsk_user_timeout))
++ goto abort;
+
+ max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
+ if (sock_flag(sk, SOCK_DEAD)) {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 9eeebd4a00542..e37a2fa65c294 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2553,7 +2553,8 @@ int udp_v4_early_demux(struct sk_buff *skb)
+ */
+ if (!inet_sk(sk)->inet_daddr && in_dev)
+ return ip_mc_validate_source(skb, iph->daddr,
+- iph->saddr, iph->tos,
++ iph->saddr,
++ iph->tos & IPTOS_RT_MASK,
+ skb->dev, in_dev, &itag);
+ }
+ return 0;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 8b6eb384bac7c..4c881f5d9080c 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2466,8 +2466,9 @@ static void addrconf_add_mroute(struct net_device *dev)
+ .fc_ifindex = dev->ifindex,
+ .fc_dst_len = 8,
+ .fc_flags = RTF_UP,
+- .fc_type = RTN_UNICAST,
++ .fc_type = RTN_MULTICAST,
+ .fc_nlinfo.nl_net = dev_net(dev),
++ .fc_protocol = RTPROT_KERNEL,
+ };
+
+ ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 1319986693fc8..84f932532db7d 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -1272,6 +1272,10 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
+
+ nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
+ msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
++ if (!nla_ok(nla_opt_msk, msk_depth)) {
++ NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
++ return -EINVAL;
++ }
+ }
+
+ nla_for_each_attr(nla_opt_key, nla_enc_key,
+@@ -1307,9 +1311,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
+ NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
+ return -EINVAL;
+ }
+-
+- if (msk_depth)
+- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
+ break;
+ case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
+ if (key->enc_opts.dst_opt_type) {
+@@ -1340,9 +1341,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
+ NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
+ return -EINVAL;
+ }
+-
+- if (msk_depth)
+- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
+ break;
+ case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
+ if (key->enc_opts.dst_opt_type) {
+@@ -1373,14 +1371,20 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
+ NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
+ return -EINVAL;
+ }
+-
+- if (msk_depth)
+- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
+ return -EINVAL;
+ }
++
++ if (!msk_depth)
++ continue;
++
++ if (!nla_ok(nla_opt_msk, msk_depth)) {
++ NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
++ return -EINVAL;
++ }
++ nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
+ }
+
+ return 0;
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
+index 78bec347b8b66..c4007b9cd16d6 100644
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -366,9 +366,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ if (tb[TCA_TCINDEX_MASK])
+ cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
+
+- if (tb[TCA_TCINDEX_SHIFT])
++ if (tb[TCA_TCINDEX_SHIFT]) {
+ cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
+-
++ if (cp->shift > 16) {
++ err = -EINVAL;
++ goto errout;
++ }
++ }
+ if (!cp->hash) {
+ /* Hash not specified, use perfect hash if the upper limit
+ * of the hashing index is below the threshold.
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 2a76a2f5ed88c..5e8e49c4ab5ca 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -412,7 +412,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+ {
+ struct qdisc_rate_table *rtab;
+
+- if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
++ if (tab == NULL || r->rate == 0 ||
++ r->cell_log == 0 || r->cell_log >= 32 ||
+ nla_len(tab) != TC_RTAB_SIZE) {
+ NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
+ return NULL;
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index c2752e2b9ce34..4404c491eb388 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1062,6 +1062,90 @@ err_noclose:
+ return 0; /* record not complete */
+ }
+
++static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec,
++ int flags)
++{
++ return kernel_sendpage(sock, virt_to_page(vec->iov_base),
++ offset_in_page(vec->iov_base),
++ vec->iov_len, flags);
++}
++
++/*
++ * kernel_sendpage() is used exclusively to reduce the number of
++ * copy operations in this path. Therefore the caller must ensure
++ * that the pages backing @xdr are unchanging.
++ *
++ * In addition, the logic assumes that * .bv_len is never larger
++ * than PAGE_SIZE.
++ */
++static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
++ struct xdr_buf *xdr, rpc_fraghdr marker,
++ unsigned int *sentp)
++{
++ const struct kvec *head = xdr->head;
++ const struct kvec *tail = xdr->tail;
++ struct kvec rm = {
++ .iov_base = &marker,
++ .iov_len = sizeof(marker),
++ };
++ int flags, ret;
++
++ *sentp = 0;
++ xdr_alloc_bvec(xdr, GFP_KERNEL);
++
++ msg->msg_flags = MSG_MORE;
++ ret = kernel_sendmsg(sock, msg, &rm, 1, rm.iov_len);
++ if (ret < 0)
++ return ret;
++ *sentp += ret;
++ if (ret != rm.iov_len)
++ return -EAGAIN;
++
++ flags = head->iov_len < xdr->len ? MSG_MORE | MSG_SENDPAGE_NOTLAST : 0;
++ ret = svc_tcp_send_kvec(sock, head, flags);
++ if (ret < 0)
++ return ret;
++ *sentp += ret;
++ if (ret != head->iov_len)
++ goto out;
++
++ if (xdr->page_len) {
++ unsigned int offset, len, remaining;
++ struct bio_vec *bvec;
++
++ bvec = xdr->bvec;
++ offset = xdr->page_base;
++ remaining = xdr->page_len;
++ flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
++ while (remaining > 0) {
++ if (remaining <= PAGE_SIZE && tail->iov_len == 0)
++ flags = 0;
++ len = min(remaining, bvec->bv_len);
++ ret = kernel_sendpage(sock, bvec->bv_page,
++ bvec->bv_offset + offset,
++ len, flags);
++ if (ret < 0)
++ return ret;
++ *sentp += ret;
++ if (ret != len)
++ goto out;
++ remaining -= len;
++ offset = 0;
++ bvec++;
++ }
++ }
++
++ if (tail->iov_len) {
++ ret = svc_tcp_send_kvec(sock, tail, 0);
++ if (ret < 0)
++ return ret;
++ *sentp += ret;
++ }
++
++out:
++ return 0;
++}
++
+ /**
+ * svc_tcp_sendto - Send out a reply on a TCP socket
+ * @rqstp: completed svc_rqst
+@@ -1089,7 +1173,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
+ mutex_lock(&xprt->xpt_mutex);
+ if (svc_xprt_is_dead(xprt))
+ goto out_notconn;
+- err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent);
++ err = svc_tcp_sendmsg(svsk->sk_sock, &msg, xdr, marker, &sent);
+ xdr_free_bvec(xdr);
+ trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
+ if (err < 0 || sent != (xdr->len + sizeof(marker)))
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index d5f42c62fd79e..52fd1f96b241e 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -107,9 +107,9 @@ EXPORT_SYMBOL(xsk_get_pool_from_qid);
+
+ void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
+ {
+- if (queue_id < dev->real_num_rx_queues)
++ if (queue_id < dev->num_rx_queues)
+ dev->_rx[queue_id].pool = NULL;
+- if (queue_id < dev->real_num_tx_queues)
++ if (queue_id < dev->num_tx_queues)
+ dev->_tx[queue_id].pool = NULL;
+ }
+
+diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
+index 11554d0412f06..1b8409ec2c97f 100644
+--- a/sound/core/seq/oss/seq_oss_synth.c
++++ b/sound/core/seq/oss/seq_oss_synth.c
+@@ -611,7 +611,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in
+
+ if (info->is_midi) {
+ struct midi_info minf;
+- snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
++ if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf))
++ return -ENXIO;
+ inf->synth_type = SYNTH_TYPE_MIDI;
+ inf->synth_subtype = 0;
+ inf->nr_voices = 16;
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 687216e745267..eec1775dfffe9 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2934,7 +2934,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
+ snd_hdac_leave_pm(&codec->core);
+ }
+
+-static int hda_codec_suspend(struct device *dev)
++static int hda_codec_runtime_suspend(struct device *dev)
+ {
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+ unsigned int state;
+@@ -2953,7 +2953,7 @@ static int hda_codec_suspend(struct device *dev)
+ return 0;
+ }
+
+-static int hda_codec_resume(struct device *dev)
++static int hda_codec_runtime_resume(struct device *dev)
+ {
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+
+@@ -2968,16 +2968,6 @@ static int hda_codec_resume(struct device *dev)
+ return 0;
+ }
+
+-static int hda_codec_runtime_suspend(struct device *dev)
+-{
+- return hda_codec_suspend(dev);
+-}
+-
+-static int hda_codec_runtime_resume(struct device *dev)
+-{
+- return hda_codec_resume(dev);
+-}
+-
+ #endif /* CONFIG_PM */
+
+ #ifdef CONFIG_PM_SLEEP
+@@ -2998,31 +2988,31 @@ static void hda_codec_pm_complete(struct device *dev)
+ static int hda_codec_pm_suspend(struct device *dev)
+ {
+ dev->power.power_state = PMSG_SUSPEND;
+- return hda_codec_suspend(dev);
++ return pm_runtime_force_suspend(dev);
+ }
+
+ static int hda_codec_pm_resume(struct device *dev)
+ {
+ dev->power.power_state = PMSG_RESUME;
+- return hda_codec_resume(dev);
++ return pm_runtime_force_resume(dev);
+ }
+
+ static int hda_codec_pm_freeze(struct device *dev)
+ {
+ dev->power.power_state = PMSG_FREEZE;
+- return hda_codec_suspend(dev);
++ return pm_runtime_force_suspend(dev);
+ }
+
+ static int hda_codec_pm_thaw(struct device *dev)
+ {
+ dev->power.power_state = PMSG_THAW;
+- return hda_codec_resume(dev);
++ return pm_runtime_force_resume(dev);
+ }
+
+ static int hda_codec_pm_restore(struct device *dev)
+ {
+ dev->power.power_state = PMSG_RESTORE;
+- return hda_codec_resume(dev);
++ return pm_runtime_force_resume(dev);
+ }
+ #endif /* CONFIG_PM_SLEEP */
+
+diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
+index 70164d1428d40..361cf2041911a 100644
+--- a/sound/pci/hda/hda_tegra.c
++++ b/sound/pci/hda/hda_tegra.c
+@@ -388,7 +388,7 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
+ * in powers of 2, next available ratio is 16 which can be
+ * used as a limiting factor here.
+ */
+- if (of_device_is_compatible(np, "nvidia,tegra194-hda"))
++ if (of_device_is_compatible(np, "nvidia,tegra30-hda"))
+ chip->bus.core.sdo_limit = 16;
+
+ /* codec detection */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index dd82ff2bd5d65..ed5b6b894dc19 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6371,6 +6371,7 @@ enum {
+ ALC256_FIXUP_HP_HEADSET_MIC,
+ ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
+ ALC282_FIXUP_ACER_DISABLE_LINEOUT,
++ ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7808,6 +7809,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE
+ },
++ [ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc269_fixup_limit_int_mic_boost,
++ .chained = true,
++ .chain_id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7826,6 +7833,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
++ SND_PCI_QUIRK(0x1025, 0x1094, "Acer Aspire E5-575T", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK),
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 0ab40a8a68fb5..834367dd54e1b 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -113,6 +113,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
+ spec->codec_type = VT1708S;
+ spec->gen.indep_hp = 1;
+ spec->gen.keep_eapd_on = 1;
++ spec->gen.dac_min_mute = 1;
+ spec->gen.pcm_playback_hook = via_playback_pcm_hook;
+ spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
+ codec->power_save_node = 1;
+diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c
+index 65b59dbfb43c8..a9b1b4180c471 100644
+--- a/sound/soc/codecs/rt711.c
++++ b/sound/soc/codecs/rt711.c
+@@ -462,6 +462,8 @@ static int rt711_set_amp_gain_put(struct snd_kcontrol *kcontrol,
+ unsigned int read_ll, read_rl;
+ int i;
+
++ mutex_lock(&rt711->calibrate_mutex);
++
+ /* Can't use update bit function, so read the original value first */
+ addr_h = mc->reg;
+ addr_l = mc->rreg;
+@@ -547,6 +549,8 @@ static int rt711_set_amp_gain_put(struct snd_kcontrol *kcontrol,
+ if (dapm->bias_level <= SND_SOC_BIAS_STANDBY)
+ regmap_write(rt711->regmap,
+ RT711_SET_AUDIO_POWER_STATE, AC_PWRST_D3);
++
++ mutex_unlock(&rt711->calibrate_mutex);
+ return 0;
+ }
+
+@@ -859,9 +863,11 @@ static int rt711_set_bias_level(struct snd_soc_component *component,
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
++ mutex_lock(&rt711->calibrate_mutex);
+ regmap_write(rt711->regmap,
+ RT711_SET_AUDIO_POWER_STATE,
+ AC_PWRST_D3);
++ mutex_unlock(&rt711->calibrate_mutex);
+ break;
+
+ default:
+diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
+index c55d1239e705b..c763bfeb1f38f 100644
+--- a/sound/soc/intel/boards/haswell.c
++++ b/sound/soc/intel/boards/haswell.c
+@@ -189,6 +189,7 @@ static struct platform_driver haswell_audio = {
+ .probe = haswell_audio_probe,
+ .driver = {
+ .name = "haswell-audio",
++ .pm = &snd_soc_pm_ops,
+ },
+ };
+
+diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
+index 6875fa570c2c5..8b0ddc4b8227b 100644
+--- a/sound/soc/sof/intel/hda-codec.c
++++ b/sound/soc/sof/intel/hda-codec.c
+@@ -156,7 +156,8 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
+ if (!hdev->bus->audio_component) {
+ dev_dbg(sdev->dev,
+ "iDisp hw present but no driver\n");
+- goto error;
++ ret = -ENOENT;
++ goto out;
+ }
+ hda_priv->need_display_power = true;
+ }
+@@ -173,24 +174,23 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
+ * other return codes without modification
+ */
+ if (ret == 0)
+- goto error;
++ ret = -ENOENT;
+ }
+
+- return ret;
+-
+-error:
+- snd_hdac_ext_bus_device_exit(hdev);
+- return -ENOENT;
+-
++out:
++ if (ret < 0) {
++ snd_hdac_device_unregister(hdev);
++ put_device(&hdev->dev);
++ }
+ #else
+ hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev, HDA_DEV_ASOC);
++#endif
+
+ return ret;
+-#endif
+ }
+
+ /* Codec initialization */
+diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
+index 18ff1c2f5376e..2dbc1273e56bd 100644
+--- a/sound/soc/sof/intel/hda-dsp.c
++++ b/sound/soc/sof/intel/hda-dsp.c
+@@ -683,8 +683,10 @@ static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
+
+ #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* check jack status */
+- if (runtime_resume)
+- hda_codec_jack_check(sdev);
++ if (runtime_resume) {
++ if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
++ hda_codec_jack_check(sdev);
++ }
+
+ /* turn off the links that were off before suspend */
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
+index 90c3155f05b1e..84ae1039b0a87 100644
+--- a/tools/gpio/gpio-event-mon.c
++++ b/tools/gpio/gpio-event-mon.c
+@@ -107,8 +107,8 @@ int monitor_device(const char *device_name,
+ ret = -EIO;
+ break;
+ }
+- fprintf(stdout, "GPIO EVENT at %llu on line %d (%d|%d) ",
+- event.timestamp_ns, event.offset, event.line_seqno,
++ fprintf(stdout, "GPIO EVENT at %" PRIu64 " on line %d (%d|%d) ",
++ (uint64_t)event.timestamp_ns, event.offset, event.line_seqno,
+ event.seqno);
+ switch (event.id) {
+ case GPIO_V2_LINE_EVENT_RISING_EDGE:
+diff --git a/tools/gpio/gpio-watch.c b/tools/gpio/gpio-watch.c
+index f229ec62301b7..41e76d2441922 100644
+--- a/tools/gpio/gpio-watch.c
++++ b/tools/gpio/gpio-watch.c
+@@ -10,6 +10,7 @@
+ #include <ctype.h>
+ #include <errno.h>
+ #include <fcntl.h>
++#include <inttypes.h>
+ #include <linux/gpio.h>
+ #include <poll.h>
+ #include <stdbool.h>
+@@ -86,8 +87,8 @@ int main(int argc, char **argv)
+ return EXIT_FAILURE;
+ }
+
+- printf("line %u: %s at %llu\n",
+- chg.info.offset, event, chg.timestamp_ns);
++ printf("line %u: %s at %" PRIu64 "\n",
++ chg.info.offset, event, (uint64_t)chg.timestamp_ns);
+ }
+ }
+
+diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
+index cfcdbd7be066e..17465d454a0e3 100644
+--- a/tools/lib/perf/evlist.c
++++ b/tools/lib/perf/evlist.c
+@@ -367,21 +367,13 @@ static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, boo
+ return map;
+ }
+
+-static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
+- struct perf_evsel *evsel, int idx, int cpu,
+- int thread)
++static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
+ {
+ struct perf_sample_id *sid = SID(evsel, cpu, thread);
+
+ sid->idx = idx;
+- if (evlist->cpus && cpu >= 0)
+- sid->cpu = evlist->cpus->map[cpu];
+- else
+- sid->cpu = -1;
+- if (!evsel->system_wide && evlist->threads && thread >= 0)
+- sid->tid = perf_thread_map__pid(evlist->threads, thread);
+- else
+- sid->tid = -1;
++ sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
++ sid->tid = perf_thread_map__pid(evsel->threads, thread);
+ }
+
+ static struct perf_mmap*
+@@ -500,8 +492,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+ if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
+ fd) < 0)
+ return -1;
+- perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
+- thread);
++ perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
+ }
+ }
+
+diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c
+index c8d45091e7c26..c70e9e03af3e9 100644
+--- a/tools/lib/perf/tests/test-cpumap.c
++++ b/tools/lib/perf/tests/test-cpumap.c
+@@ -27,5 +27,5 @@ int main(int argc, char **argv)
+ perf_cpu_map__put(cpus);
+
+ __T_END;
+- return 0;
++ return tests_failed == 0 ? 0 : -1;
+ }
+diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
+index 6d8ebe0c25042..bd19cabddaf62 100644
+--- a/tools/lib/perf/tests/test-evlist.c
++++ b/tools/lib/perf/tests/test-evlist.c
+@@ -215,6 +215,7 @@ static int test_mmap_thread(void)
+ sysfs__mountpoint());
+
+ if (filename__read_int(path, &id)) {
++ tests_failed++;
+ fprintf(stderr, "error: failed to get tracepoint id: %s\n", path);
+ return -1;
+ }
+@@ -409,5 +410,5 @@ int main(int argc, char **argv)
+ test_mmap_cpus();
+
+ __T_END;
+- return 0;
++ return tests_failed == 0 ? 0 : -1;
+ }
+diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
+index 135722ac965bf..0ad82d7a2a51b 100644
+--- a/tools/lib/perf/tests/test-evsel.c
++++ b/tools/lib/perf/tests/test-evsel.c
+@@ -131,5 +131,5 @@ int main(int argc, char **argv)
+ test_stat_thread_enable();
+
+ __T_END;
+- return 0;
++ return tests_failed == 0 ? 0 : -1;
+ }
+diff --git a/tools/lib/perf/tests/test-threadmap.c b/tools/lib/perf/tests/test-threadmap.c
+index 7dc4d6fbeddee..384471441b484 100644
+--- a/tools/lib/perf/tests/test-threadmap.c
++++ b/tools/lib/perf/tests/test-threadmap.c
+@@ -27,5 +27,5 @@ int main(int argc, char **argv)
+ perf_thread_map__put(threads);
+
+ __T_END;
+- return 0;
++ return tests_failed == 0 ? 0 : -1;
+ }
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index 84205c3a55ebe..2b5707738609e 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -1055,7 +1055,6 @@ ipv6_addr_metric_test()
+
+ check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
+ log_test $? 0 "Set metric with peer route on local side"
+- log_test $? 0 "User specified metric on local address"
+ check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
+ log_test $? 0 "Set metric with peer route on peer side"
+
+diff --git a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
+index 9e5c7f3f498a7..0af4f02669a11 100644
+--- a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
++++ b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
+@@ -290,5 +290,5 @@ static int test(void)
+
+ int main(void)
+ {
+- test_harness(test, "pkey_exec_prot");
++ return test_harness(test, "pkey_exec_prot");
+ }
+diff --git a/tools/testing/selftests/powerpc/mm/pkey_siginfo.c b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
+index 4f815d7c12145..2db76e56d4cb9 100644
+--- a/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
++++ b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
+@@ -329,5 +329,5 @@ static int test(void)
+
+ int main(void)
+ {
+- test_harness(test, "pkey_siginfo");
++ return test_harness(test, "pkey_siginfo");
+ }