summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlice Ferrazzi <alicef@gentoo.org>2020-08-21 22:25:13 +0900
committerAlice Ferrazzi <alicef@gentoo.org>2020-08-21 22:25:20 +0900
commit8599420bcb7e835fb32a4d5ba5e524adbb2c0e59 (patch)
tree82da86602bb342d78db409a653634ce0521e7fef
parentLinux patch 5.4.59 (diff)
downloadlinux-patches-5.4-61.tar.gz
linux-patches-5.4-61.tar.bz2
linux-patches-5.4-61.zip
Linux patch 5.4.605.4-61
Signed-off-by: Alice Ferrazzi <alicef@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1059_linux-5.4.60.patch4875
2 files changed, 4879 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 78d02905..5a3df130 100644
--- a/0000_README
+++ b/0000_README
@@ -279,6 +279,10 @@ Patch: 1058_linux-5.4.59.patch
From: http://www.kernel.org
Desc: Linux 5.4.59
+Patch: 1059_linux-5.4.60.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.60
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1059_linux-5.4.60.patch b/1059_linux-5.4.60.patch
new file mode 100644
index 00000000..280c19f6
--- /dev/null
+++ b/1059_linux-5.4.60.patch
@@ -0,0 +1,4875 @@
+diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
+index c82794002595f..89647d7143879 100644
+--- a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
++++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
+@@ -21,7 +21,7 @@ controller state. The mux controller state is described in
+
+ Example:
+ mux: mux-controller {
+- compatible = "mux-gpio";
++ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+diff --git a/Makefile b/Makefile
+index cc72b8472f24a..7c001e21e28e7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 59
++SUBLEVEL = 60
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
+index fbcf03f86c967..05dc58c13fa41 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
+@@ -19,6 +19,12 @@
+ model = "Globalscale Marvell ESPRESSOBin Board";
+ compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710";
+
++ aliases {
++ ethernet0 = &eth0;
++ serial0 = &uart0;
++ serial1 = &uart1;
++ };
++
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index a0b4f1bca4917..19128d994ee97 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -155,7 +155,7 @@ armv8pmu_events_sysfs_show(struct device *dev,
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+- return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
++ return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
+ }
+
+ #define ARMV8_EVENT_ATTR(name, config) \
+@@ -303,10 +303,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj,
+ test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
+ return attr->mode;
+
+- pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
+- if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
+- test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
+- return attr->mode;
++ if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
++ u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
++
++ if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
++ test_bit(id, cpu_pmu->pmceid_ext_bitmap))
++ return attr->mode;
++ }
+
+ return 0;
+ }
+diff --git a/arch/mips/boot/dts/ingenic/qi_lb60.dts b/arch/mips/boot/dts/ingenic/qi_lb60.dts
+index 7a371d9c5a33f..eda37fb516f0e 100644
+--- a/arch/mips/boot/dts/ingenic/qi_lb60.dts
++++ b/arch/mips/boot/dts/ingenic/qi_lb60.dts
+@@ -69,7 +69,7 @@
+ "Speaker", "OUTL",
+ "Speaker", "OUTR",
+ "INL", "LOUT",
+- "INL", "ROUT";
++ "INR", "ROUT";
+
+ simple-audio-card,aux-devs = <&amp>;
+
+diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c
+index cd3e1f82e1a5d..08ad6371fbe08 100644
+--- a/arch/mips/kernel/topology.c
++++ b/arch/mips/kernel/topology.c
+@@ -20,7 +20,7 @@ static int __init topology_init(void)
+ for_each_present_cpu(i) {
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+- c->hotpluggable = 1;
++ c->hotpluggable = !!i;
+ ret = register_cpu(c, i);
+ if (ret)
+ printk(KERN_WARNING "topology_init: register_cpu %d "
+diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c
+index 43f140a28bc72..54d38809e22cb 100644
+--- a/arch/openrisc/kernel/stacktrace.c
++++ b/arch/openrisc/kernel/stacktrace.c
+@@ -13,6 +13,7 @@
+ #include <linux/export.h>
+ #include <linux/sched.h>
+ #include <linux/sched/debug.h>
++#include <linux/sched/task_stack.h>
+ #include <linux/stacktrace.h>
+
+ #include <asm/processor.h>
+@@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+ {
+ unsigned long *sp = NULL;
+
++ if (!try_get_task_stack(tsk))
++ return;
++
+ if (tsk == current)
+ sp = (unsigned long *) &sp;
+- else
+- sp = (unsigned long *) KSTK_ESP(tsk);
++ else {
++ unsigned long ksp;
++
++ /* Locate stack from kernel context */
++ ksp = task_thread_info(tsk)->ksp;
++ ksp += STACK_FRAME_OVERHEAD; /* redzone */
++ ksp += sizeof(struct pt_regs);
++
++ sp = (unsigned long *) ksp;
++ }
+
+ unwind_stack(trace, sp, save_stack_address_nosched);
++
++ put_task_stack(tsk);
+ }
+ EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
+index dce863a7635cd..8e5b7d0b851c6 100644
+--- a/arch/powerpc/include/asm/percpu.h
++++ b/arch/powerpc/include/asm/percpu.h
+@@ -10,8 +10,6 @@
+
+ #ifdef CONFIG_SMP
+
+-#include <asm/paca.h>
+-
+ #define __my_cpu_offset local_paca->data_offset
+
+ #endif /* CONFIG_SMP */
+@@ -19,4 +17,6 @@
+
+ #include <asm-generic/percpu.h>
+
++#include <asm/paca.h>
++
+ #endif /* _ASM_POWERPC_PERCPU_H_ */
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 881a026a603a6..187047592d53c 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -241,6 +241,9 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
+ return false;
+ }
+
++// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE
++#define SIGFRAME_MAX_SIZE (4096 + 128)
++
+ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
+ struct vm_area_struct *vma, unsigned int flags,
+ bool *must_retry)
+@@ -248,7 +251,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
+ /*
+ * N.B. The POWER/Open ABI allows programs to access up to
+ * 288 bytes below the stack pointer.
+- * The kernel signal delivery code writes up to about 1.5kB
++ * The kernel signal delivery code writes a bit over 4KB
+ * below the stack pointer (r1) before decrementing it.
+ * The exec code can write slightly over 640kB to the stack
+ * before setting the user r1. Thus we allow the stack to
+@@ -273,7 +276,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
+ * between the last mapped region and the stack will
+ * expand the stack rather than segfaulting.
+ */
+- if (address + 2048 >= uregs->gpr[1])
++ if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1])
+ return false;
+
+ if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
+diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c
+index a07278027c6f4..a2e8c3b2cf351 100644
+--- a/arch/powerpc/mm/ptdump/hashpagetable.c
++++ b/arch/powerpc/mm/ptdump/hashpagetable.c
+@@ -259,7 +259,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *
+ for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
+ lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
+
+- if (lpar_rc != H_SUCCESS)
++ if (lpar_rc)
+ continue;
+ for (j = 0; j < 4; j++) {
+ if (HPTE_V_COMPARE(ptes[j].v, want_v) &&
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index f1888352b4e0b..e7d23a933a0d3 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -27,7 +27,7 @@ static bool rtas_hp_event;
+ unsigned long pseries_memory_block_size(void)
+ {
+ struct device_node *np;
+- unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
++ u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
+ struct resource r;
+
+ np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c
+index 16b4d8b0bb850..2c44b94f82fb2 100644
+--- a/arch/sh/boards/mach-landisk/setup.c
++++ b/arch/sh/boards/mach-landisk/setup.c
+@@ -82,6 +82,9 @@ device_initcall(landisk_devices_setup);
+
+ static void __init landisk_setup(char **cmdline_p)
+ {
++ /* I/O port identity mapping */
++ __set_io_port_base(0);
++
+ /* LED ON */
+ __raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED);
+
+diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
+index 3c222d6fdee3b..187c72a58e69c 100644
+--- a/arch/x86/events/rapl.c
++++ b/arch/x86/events/rapl.c
+@@ -642,7 +642,7 @@ static const struct attribute_group *rapl_attr_update[] = {
+ &rapl_events_pkg_group,
+ &rapl_events_ram_group,
+ &rapl_events_gpu_group,
+- &rapl_events_gpu_group,
++ &rapl_events_psys_group,
+ NULL,
+ };
+
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index df4d5385e6ddd..c8203694d9ce4 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -554,6 +554,10 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
+ irqd->chip_data = apicd;
+ irqd->hwirq = virq + i;
+ irqd_set_single_target(irqd);
++
++ /* Don't invoke affinity setter on deactivated interrupts */
++ irqd_set_affinity_on_activate(irqd);
++
+ /*
+ * Legacy vectors are already assigned when the IOAPIC
+ * takes them over. They stay on the same vector. This is
+diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
+index c65adaf813848..41200706e6da1 100644
+--- a/arch/x86/kernel/tsc_msr.c
++++ b/arch/x86/kernel/tsc_msr.c
+@@ -133,10 +133,15 @@ static const struct freq_desc freq_desc_ann = {
+ .mask = 0x0f,
+ };
+
+-/* 24 MHz crystal? : 24 * 13 / 4 = 78 MHz */
++/*
++ * 24 MHz crystal? : 24 * 13 / 4 = 78 MHz
++ * Frequency step for Lightning Mountain SoC is fixed to 78 MHz,
++ * so all the frequency entries are 78000.
++ */
+ static const struct freq_desc freq_desc_lgm = {
+ .use_msr_plat = true,
+- .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
++ .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000,
++ 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
+ .mask = 0x0f,
+ };
+
+diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
+index f092cc3f4e66d..956d4d47c6cd1 100644
+--- a/arch/xtensa/include/asm/thread_info.h
++++ b/arch/xtensa/include/asm/thread_info.h
+@@ -55,6 +55,10 @@ struct thread_info {
+ mm_segment_t addr_limit; /* thread address space */
+
+ unsigned long cpenable;
++#if XCHAL_HAVE_EXCLUSIVE
++ /* result of the most recent exclusive store */
++ unsigned long atomctl8;
++#endif
+
+ /* Allocate storage for extra user states and coprocessor states. */
+ #if XTENSA_HAVE_COPROCESSORS
+diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
+index 33a257b33723a..dc5c83cad9be8 100644
+--- a/arch/xtensa/kernel/asm-offsets.c
++++ b/arch/xtensa/kernel/asm-offsets.c
+@@ -93,6 +93,9 @@ int main(void)
+ DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
+ DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
+ DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
++#if XCHAL_HAVE_EXCLUSIVE
++ DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8));
++#endif
+ #if XTENSA_HAVE_COPROCESSORS
+ DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
+ DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
+diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
+index 9e3676879168a..59671603c9c62 100644
+--- a/arch/xtensa/kernel/entry.S
++++ b/arch/xtensa/kernel/entry.S
+@@ -374,6 +374,11 @@ common_exception:
+ s32i a2, a1, PT_LCOUNT
+ #endif
+
++#if XCHAL_HAVE_EXCLUSIVE
++ /* Clear exclusive access monitor set by interrupted code */
++ clrex
++#endif
++
+ /* It is now save to restore the EXC_TABLE_FIXUP variable. */
+
+ rsr a2, exccause
+@@ -2024,6 +2029,12 @@ ENTRY(_switch_to)
+ s32i a3, a4, THREAD_CPENABLE
+ #endif
+
++#if XCHAL_HAVE_EXCLUSIVE
++ l32i a3, a5, THREAD_ATOMCTL8
++ getex a3
++ s32i a3, a4, THREAD_ATOMCTL8
++#endif
++
+ /* Flush register file. */
+
+ spill_registers_kernel
+diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
+index 9bae79f703013..86c9ba9631551 100644
+--- a/arch/xtensa/kernel/perf_event.c
++++ b/arch/xtensa/kernel/perf_event.c
+@@ -401,7 +401,7 @@ static struct pmu xtensa_pmu = {
+ .read = xtensa_pmu_read,
+ };
+
+-static int xtensa_pmu_setup(int cpu)
++static int xtensa_pmu_setup(unsigned int cpu)
+ {
+ unsigned i;
+
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index a3b9df99af6de..35e026ba2c7ed 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
+
+ if (!ctx->used)
+ ctx->merge = 0;
++ ctx->init = ctx->more;
+ }
+ EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
+
+@@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
+ *
+ * @sk socket of connection to user space
+ * @flags If MSG_DONTWAIT is set, then only report if function would sleep
++ * @min Set to minimum request size if partial requests are allowed.
+ * @return 0 when writable memory is available, < 0 upon error
+ */
+-int af_alg_wait_for_data(struct sock *sk, unsigned flags)
++int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
+ {
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct alg_sock *ask = alg_sk(sk);
+@@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags)
+ if (signal_pending(current))
+ break;
+ timeout = MAX_SCHEDULE_TIMEOUT;
+- if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
++ if (sk_wait_event(sk, &timeout,
++ ctx->init && (!ctx->more ||
++ (min && ctx->used >= min)),
+ &wait)) {
+ err = 0;
+ break;
+@@ -843,10 +847,11 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ }
+
+ lock_sock(sk);
+- if (!ctx->more && ctx->used) {
++ if (ctx->init && (init || !ctx->more)) {
+ err = -EINVAL;
+ goto unlock;
+ }
++ ctx->init = true;
+
+ if (init) {
+ ctx->enc = enc;
+diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
+index 0ae000a61c7f5..43c6aa784858b 100644
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t usedpages = 0; /* [in] RX bufs to be used from user */
+ size_t processed = 0; /* [in] TX bufs to be consumed */
+
+- if (!ctx->used) {
+- err = af_alg_wait_for_data(sk, flags);
++ if (!ctx->init || ctx->more) {
++ err = af_alg_wait_for_data(sk, flags, 0);
+ if (err)
+ return err;
+ }
+@@ -558,12 +558,6 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
+
+ INIT_LIST_HEAD(&ctx->tsgl_list);
+ ctx->len = len;
+- ctx->used = 0;
+- atomic_set(&ctx->rcvused, 0);
+- ctx->more = 0;
+- ctx->merge = 0;
+- ctx->enc = 0;
+- ctx->aead_assoclen = 0;
+ crypto_init_wait(&ctx->wait);
+
+ ask->private = ctx;
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index ec5567c87a6df..81c4022285a7c 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ int err = 0;
+ size_t len = 0;
+
+- if (!ctx->used) {
+- err = af_alg_wait_for_data(sk, flags);
++ if (!ctx->init || (ctx->more && ctx->used < bs)) {
++ err = af_alg_wait_for_data(sk, flags, bs);
+ if (err)
+ return err;
+ }
+@@ -333,6 +333,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
++ memset(ctx, 0, len);
+
+ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
+ GFP_KERNEL);
+@@ -340,16 +341,10 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
+ sock_kfree_s(sk, ctx, len);
+ return -ENOMEM;
+ }
+-
+ memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
+
+ INIT_LIST_HEAD(&ctx->tsgl_list);
+ ctx->len = len;
+- ctx->used = 0;
+- atomic_set(&ctx->rcvused, 0);
+- ctx->more = 0;
+- ctx->merge = 0;
+- ctx->enc = 0;
+ crypto_init_wait(&ctx->wait);
+
+ ask->private = ctx;
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index b25bcab2a26bd..1d5dd37f3abe4 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -872,7 +872,9 @@ static int __device_attach(struct device *dev, bool allow_async)
+ int ret = 0;
+
+ device_lock(dev);
+- if (dev->driver) {
++ if (dev->p->dead) {
++ goto out_unlock;
++ } else if (dev->driver) {
+ if (device_is_bound(dev)) {
+ ret = 1;
+ goto out_unlock;
+diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c
+index e2007ac4d235d..0eb83a0b70bcc 100644
+--- a/drivers/clk/actions/owl-s500.c
++++ b/drivers/clk/actions/owl-s500.c
+@@ -183,7 +183,7 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
+ static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
+
+ /* divider clocks */
+-static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
++static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
+ static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
+
+ /* factor clocks */
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index 6e5d635f030f4..45420b514149f 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -314,6 +314,7 @@ struct bcm2835_cprman {
+ struct device *dev;
+ void __iomem *regs;
+ spinlock_t regs_lock; /* spinlock for all clocks */
++ unsigned int soc;
+
+ /*
+ * Real names of cprman clock parents looked up through
+@@ -525,6 +526,20 @@ static int bcm2835_pll_is_on(struct clk_hw *hw)
+ A2W_PLL_CTRL_PRST_DISABLE;
+ }
+
++static u32 bcm2835_pll_get_prediv_mask(struct bcm2835_cprman *cprman,
++ const struct bcm2835_pll_data *data)
++{
++ /*
++ * On BCM2711 there isn't a pre-divisor available in the PLL feedback
++ * loop. Bits 13:14 of ANA1 (PLLA,PLLB,PLLC,PLLD) have been re-purposed
++ * for to for VCO RANGE bits.
++ */
++ if (cprman->soc & SOC_BCM2711)
++ return 0;
++
++ return data->ana->fb_prediv_mask;
++}
++
+ static void bcm2835_pll_choose_ndiv_and_fdiv(unsigned long rate,
+ unsigned long parent_rate,
+ u32 *ndiv, u32 *fdiv)
+@@ -582,7 +597,7 @@ static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
+ ndiv = (a2wctrl & A2W_PLL_CTRL_NDIV_MASK) >> A2W_PLL_CTRL_NDIV_SHIFT;
+ pdiv = (a2wctrl & A2W_PLL_CTRL_PDIV_MASK) >> A2W_PLL_CTRL_PDIV_SHIFT;
+ using_prediv = cprman_read(cprman, data->ana_reg_base + 4) &
+- data->ana->fb_prediv_mask;
++ bcm2835_pll_get_prediv_mask(cprman, data);
+
+ if (using_prediv) {
+ ndiv *= 2;
+@@ -665,6 +680,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
++ u32 prediv_mask = bcm2835_pll_get_prediv_mask(cprman, data);
+ bool was_using_prediv, use_fb_prediv, do_ana_setup_first;
+ u32 ndiv, fdiv, a2w_ctl;
+ u32 ana[4];
+@@ -682,7 +698,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
+ for (i = 3; i >= 0; i--)
+ ana[i] = cprman_read(cprman, data->ana_reg_base + i * 4);
+
+- was_using_prediv = ana[1] & data->ana->fb_prediv_mask;
++ was_using_prediv = ana[1] & prediv_mask;
+
+ ana[0] &= ~data->ana->mask0;
+ ana[0] |= data->ana->set0;
+@@ -692,10 +708,10 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
+ ana[3] |= data->ana->set3;
+
+ if (was_using_prediv && !use_fb_prediv) {
+- ana[1] &= ~data->ana->fb_prediv_mask;
++ ana[1] &= ~prediv_mask;
+ do_ana_setup_first = true;
+ } else if (!was_using_prediv && use_fb_prediv) {
+- ana[1] |= data->ana->fb_prediv_mask;
++ ana[1] |= prediv_mask;
+ do_ana_setup_first = false;
+ } else {
+ do_ana_setup_first = true;
+@@ -2234,6 +2250,7 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, cprman);
+
+ cprman->onecell.num = asize;
++ cprman->soc = pdata->soc;
+ hws = cprman->onecell.hws;
+
+ for (i = 0; i < asize; i++) {
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index 055318f979915..a69f53e435ed5 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -55,7 +55,6 @@
+ #define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
+ #define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
+ #define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
+-#define PLL_CAL_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_CAL_VAL])
+
+ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+@@ -114,7 +113,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+ [PLL_OFF_STATUS] = 0x30,
+ [PLL_OFF_OPMODE] = 0x38,
+ [PLL_OFF_ALPHA_VAL] = 0x40,
+- [PLL_OFF_CAL_VAL] = 0x44,
+ },
+ };
+ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
+diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
+index bf5730832ef3d..c6fb57cd576f5 100644
+--- a/drivers/clk/qcom/gcc-sdm660.c
++++ b/drivers/clk/qcom/gcc-sdm660.c
+@@ -1715,6 +1715,9 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
+
+ static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ .halt_reg = 0x8a004,
++ .halt_check = BRANCH_HALT,
++ .hwcg_reg = 0x8a004,
++ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
+index fad42897a7a7f..ee908fbfeab17 100644
+--- a/drivers/clk/qcom/gcc-sm8150.c
++++ b/drivers/clk/qcom/gcc-sm8150.c
+@@ -1616,6 +1616,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ };
+
+ static struct clk_branch gcc_gpu_gpll0_clk_src = {
++ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+@@ -1631,13 +1632,14 @@ static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ };
+
+ static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
++ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+- &gcc_gpu_gpll0_clk_src.clkr.hw },
++ &gpll0_out_even.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+@@ -1728,6 +1730,7 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = {
+ };
+
+ static struct clk_branch gcc_npu_gpll0_clk_src = {
++ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+@@ -1743,13 +1746,14 @@ static struct clk_branch gcc_npu_gpll0_clk_src = {
+ };
+
+ static struct clk_branch gcc_npu_gpll0_div_clk_src = {
++ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+- &gcc_npu_gpll0_clk_src.clkr.hw },
++ &gpll0_out_even.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
+index c84d5bab7ac28..b95483bb6a5ec 100644
+--- a/drivers/clk/sirf/clk-atlas6.c
++++ b/drivers/clk/sirf/clk-atlas6.c
+@@ -135,7 +135,7 @@ static void __init atlas6_clk_init(struct device_node *np)
+
+ for (i = pll1; i < maxclk; i++) {
+ atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
+- BUG_ON(!atlas6_clks[i]);
++ BUG_ON(IS_ERR(atlas6_clks[i]));
+ }
+ clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
+ clk_register_clkdev(atlas6_clks[io], NULL, "io");
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index 4ce9c2b4544a2..fdd994ee55e22 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -818,12 +818,6 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+ }
+
+-static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher,
+- const u8 *key, unsigned int keylen)
+-{
+- return skcipher_setkey(skcipher, key, keylen, 0);
+-}
+-
+ static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+ {
+@@ -2058,21 +2052,6 @@ static struct caam_skcipher_alg driver_algs[] = {
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
+ },
+- {
+- .skcipher = {
+- .base = {
+- .cra_name = "ecb(arc4)",
+- .cra_driver_name = "ecb-arc4-caam",
+- .cra_blocksize = ARC4_BLOCK_SIZE,
+- },
+- .setkey = arc4_skcipher_setkey,
+- .encrypt = skcipher_encrypt,
+- .decrypt = skcipher_decrypt,
+- .min_keysize = ARC4_MIN_KEY_SIZE,
+- .max_keysize = ARC4_MAX_KEY_SIZE,
+- },
+- .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
+- },
+ };
+
+ static struct caam_aead_alg driver_aeads[] = {
+@@ -3533,7 +3512,6 @@ int caam_algapi_init(struct device *ctrldev)
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ int i = 0, err = 0;
+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
+- u32 arc4_inst;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+ bool registered = false, gcm_support;
+
+@@ -3553,8 +3531,6 @@ int caam_algapi_init(struct device *ctrldev)
+ CHA_ID_LS_DES_SHIFT;
+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+- arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
+- CHA_ID_LS_ARC4_SHIFT;
+ ccha_inst = 0;
+ ptha_inst = 0;
+
+@@ -3575,7 +3551,6 @@ int caam_algapi_init(struct device *ctrldev)
+ md_inst = mdha & CHA_VER_NUM_MASK;
+ ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
+ ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+- arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
+
+ gcm_support = aesa & CHA_VER_MISC_AES_GCM;
+ }
+@@ -3598,10 +3573,6 @@ int caam_algapi_init(struct device *ctrldev)
+ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
+- /* Skip ARC4 algorithms if not supported by device */
+- if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
+- continue;
+-
+ /*
+ * Check support for AES modes not available
+ * on LP devices.
+diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
+index 60e2a54c19f11..c3c22a8de4c00 100644
+--- a/drivers/crypto/caam/compat.h
++++ b/drivers/crypto/caam/compat.h
+@@ -43,7 +43,6 @@
+ #include <crypto/akcipher.h>
+ #include <crypto/scatterwalk.h>
+ #include <crypto/skcipher.h>
+-#include <crypto/arc4.h>
+ #include <crypto/internal/skcipher.h>
+ #include <crypto/internal/hash.h>
+ #include <crypto/internal/rsa.h>
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+index 47f529ce280ae..2718396083ee4 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+@@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru
+ return disp_clk_threshold;
+ }
+
+-static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks)
++static void ramp_up_dispclk_with_dpp(
++ struct clk_mgr_internal *clk_mgr,
++ struct dc *dc,
++ struct dc_clocks *new_clocks,
++ bool safe_to_lower)
+ {
+ int i;
+ int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+
++ /* this function is to change dispclk, dppclk and dprefclk according to
++ * bandwidth requirement. Its call stack is rv1_update_clocks -->
++ * update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth
++ * --> prepare_bandwidth / optimize_bandwidth. before change dcn hw,
++ * prepare_bandwidth will be called first to allow enough clock,
++ * watermark for change, after end of dcn hw change, optimize_bandwidth
++ * is executed to lower clock to save power for new dcn hw settings.
++ *
++ * below is sequence of commit_planes_for_stream:
++ *
++ * step 1: prepare_bandwidth - raise clock to have enough bandwidth
++ * step 2: lock_doublebuffer_enable
++ * step 3: pipe_control_lock(true) - make dchubp register change will
++ * not take effect right way
++ * step 4: apply_ctx_for_surface - program dchubp
++ * step 5: pipe_control_lock(false) - dchubp register change take effect
++ * step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream
++ * for full_date, optimize clock to save power
++ *
++ * at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be
++ * changed for new dchubp configuration. but real dcn hub dchubps are
++ * still running with old configuration until end of step 5. this need
++ * clocks settings at step 1 should not less than that before step 1.
++ * this is checked by two conditions: 1. if (should_set_clock(safe_to_lower
++ * , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) ||
++ * new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz)
++ * 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz
++ *
++ * the second condition is based on new dchubp configuration. dppclk
++ * for new dchubp may be different from dppclk before step 1.
++ * for example, before step 1, dchubps are as below:
++ * pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979)
++ * pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080)
++ * for dppclk for pipe0 need dppclk = dispclk
++ *
++ * new dchubp pipe split configuration:
++ * pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080)
++ * pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080)
++ * dppclk only needs dppclk = dispclk /2.
++ *
++ * dispclk, dppclk are not lock by otg master lock. they take effect
++ * after step 1. during this transition, dispclk are the same, but
++ * dppclk is changed to half of previous clock for old dchubp
++ * configuration between step 1 and step 6. This may cause p-state
++ * warning intermittently.
++ *
++ * for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we
++ * need make sure dppclk are not changed to less between step 1 and 6.
++ * for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz,
++ * new display clock is raised, but we do not know ratio of
++ * new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz,
++ * new_clocks->dispclk_khz /2 does not guarantee equal or higher than
++ * old dppclk. we could ignore power saving different between
++ * dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6.
++ * as long as safe_to_lower = false, set dpclk = dispclk to simplify
++ * condition check.
++ * todo: review this change for other asic.
++ **/
++ if (!safe_to_lower)
++ request_dpp_div = false;
++
+ /* set disp clk to dpp clk threshold */
+
+ clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
+@@ -206,7 +271,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
+ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
+ || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
+- ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks);
++ ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower);
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ send_request_to_lower = true;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+index 2e71ca3e19f58..09a3d8ae44491 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+@@ -2725,7 +2725,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+
+ static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
+ {
+- return ci_is_smc_ram_running(hwmgr);
++ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
++ CGS_IND_REG__SMC, FEATURE_STATUS,
++ VOLTAGE_CONTROLLER_ON))
++ ? true : false;
+ }
+
+ static int ci_smu_init(struct pp_hwmgr *hwmgr)
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 006d6087700fb..2de1eebe591f9 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -3369,11 +3369,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ {
+ int ret;
+
+- port = drm_dp_mst_topology_get_port_validated(mgr, port);
+- if (!port)
++ if (slots < 0)
+ return false;
+
+- if (slots < 0)
++ port = drm_dp_mst_topology_get_port_validated(mgr, port);
++ if (!port)
+ return false;
+
+ if (port->vcpi.vcpi > 0) {
+@@ -3389,6 +3389,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ if (ret) {
+ DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
+ DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
++ drm_dp_mst_topology_put_port(port);
+ goto out;
+ }
+ DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index d00ea384dcbfe..58f5dc2f6dd52 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -121,6 +121,12 @@ static const struct dmi_system_id orientation_data[] = {
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
++ }, { /* Asus T103HAF */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
++ },
++ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* GPD MicroPC (generic strings, also match on bios date) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
+index 9af5a08d5490f..d6629fc869f3f 100644
+--- a/drivers/gpu/drm/imx/imx-ldb.c
++++ b/drivers/gpu/drm/imx/imx-ldb.c
+@@ -302,18 +302,19 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
+ {
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
++ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+ int mux, ret;
+
+ drm_panel_disable(imx_ldb_ch->panel);
+
+- if (imx_ldb_ch == &ldb->channel[0])
++ if (imx_ldb_ch == &ldb->channel[0] || dual)
+ ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
+- else if (imx_ldb_ch == &ldb->channel[1])
++ if (imx_ldb_ch == &ldb->channel[1] || dual)
+ ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
+
+ regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
+
+- if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
++ if (dual) {
+ clk_disable_unprepare(ldb->clk[0]);
+ clk_disable_unprepare(ldb->clk[1]);
+ }
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
+index 77c3a3855c682..c05e013bb8e3d 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
+@@ -46,7 +46,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
+ sg_free_table(&bo->sgts[i]);
+ }
+ }
+- kfree(bo->sgts);
++ kvfree(bo->sgts);
+ }
+
+ drm_gem_shmem_free_object(obj);
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 5d75f8cf64776..3dc9b30a64b01 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -486,7 +486,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
+ sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
+ if (!pages) {
+- kfree(bo->sgts);
++ kvfree(bo->sgts);
+ bo->sgts = NULL;
+ mutex_unlock(&bo->base.pages_lock);
+ ret = -ENOMEM;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index f47d5710cc951..33b1519887474 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -2666,7 +2666,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+ ++i;
+ }
+
+- if (i != unit) {
++ if (&con->head == &dev_priv->dev->mode_config.connector_list) {
+ DRM_ERROR("Could not find initial display unit.\n");
+ ret = -EINVAL;
+ goto out_unlock;
+@@ -2690,13 +2690,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+ break;
+ }
+
+- if (mode->type & DRM_MODE_TYPE_PREFERRED)
+- *p_mode = mode;
+- else {
++ if (&mode->head == &con->modes) {
+ WARN_ONCE(true, "Could not find initial preferred mode.\n");
+ *p_mode = list_first_entry(&con->modes,
+ struct drm_display_mode,
+ head);
++ } else {
++ *p_mode = mode;
+ }
+
+ out_unlock:
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+index 5702219ec38f6..7b54c1f56208f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+@@ -81,7 +81,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
+ struct vmw_legacy_display_unit *entry;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_crtc *crtc = NULL;
+- int i = 0;
++ int i;
+
+ /* If there is no display topology the host just assumes
+ * that the guest will set the same layout as the host.
+@@ -92,12 +92,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
+ crtc = &entry->base.crtc;
+ w = max(w, crtc->x + crtc->mode.hdisplay);
+ h = max(h, crtc->y + crtc->mode.vdisplay);
+- i++;
+ }
+
+ if (crtc == NULL)
+ return 0;
+- fb = entry->base.crtc.primary->state->fb;
++ fb = crtc->primary->state->fb;
+
+ return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
+ fb->format->cpp[0] * 8,
+diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
+index eeca50d9a1ee4..aa1d4b6d278f7 100644
+--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
++++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
+@@ -137,6 +137,17 @@ struct ipu_image_convert_ctx;
+ struct ipu_image_convert_chan;
+ struct ipu_image_convert_priv;
+
++enum eof_irq_mask {
++ EOF_IRQ_IN = BIT(0),
++ EOF_IRQ_ROT_IN = BIT(1),
++ EOF_IRQ_OUT = BIT(2),
++ EOF_IRQ_ROT_OUT = BIT(3),
++};
++
++#define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT)
++#define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \
++ EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT)
++
+ struct ipu_image_convert_ctx {
+ struct ipu_image_convert_chan *chan;
+
+@@ -173,6 +184,9 @@ struct ipu_image_convert_ctx {
+ /* where to place converted tile in dest image */
+ unsigned int out_tile_map[MAX_TILES];
+
++ /* mask of completed EOF irqs at every tile conversion */
++ enum eof_irq_mask eof_mask;
++
+ struct list_head list;
+ };
+
+@@ -189,6 +203,8 @@ struct ipu_image_convert_chan {
+ struct ipuv3_channel *rotation_out_chan;
+
+ /* the IPU end-of-frame irqs */
++ int in_eof_irq;
++ int rot_in_eof_irq;
+ int out_eof_irq;
+ int rot_out_eof_irq;
+
+@@ -1380,6 +1396,9 @@ static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
+ dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
+ __func__, chan->ic_task, ctx, run, tile, dst_tile);
+
++ /* clear EOF irq mask */
++ ctx->eof_mask = 0;
++
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* swap width/height for resizer */
+ dest_width = d_image->tile[dst_tile].height;
+@@ -1615,7 +1634,7 @@ static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
+ }
+
+ /* hold irqlock when calling */
+-static irqreturn_t do_irq(struct ipu_image_convert_run *run)
++static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run)
+ {
+ struct ipu_image_convert_ctx *ctx = run->ctx;
+ struct ipu_image_convert_chan *chan = ctx->chan;
+@@ -1700,6 +1719,7 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run)
+ ctx->cur_buf_num ^= 1;
+ }
+
++ ctx->eof_mask = 0; /* clear EOF irq mask for next tile */
+ ctx->next_tile++;
+ return IRQ_HANDLED;
+ done:
+@@ -1709,13 +1729,15 @@ done:
+ return IRQ_WAKE_THREAD;
+ }
+
+-static irqreturn_t norotate_irq(int irq, void *data)
++static irqreturn_t eof_irq(int irq, void *data)
+ {
+ struct ipu_image_convert_chan *chan = data;
++ struct ipu_image_convert_priv *priv = chan->priv;
+ struct ipu_image_convert_ctx *ctx;
+ struct ipu_image_convert_run *run;
++ irqreturn_t ret = IRQ_HANDLED;
++ bool tile_complete = false;
+ unsigned long flags;
+- irqreturn_t ret;
+
+ spin_lock_irqsave(&chan->irqlock, flags);
+
+@@ -1728,46 +1750,33 @@ static irqreturn_t norotate_irq(int irq, void *data)
+
+ ctx = run->ctx;
+
+- if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+- /* this is a rotation operation, just ignore */
+- spin_unlock_irqrestore(&chan->irqlock, flags);
+- return IRQ_HANDLED;
+- }
+-
+- ret = do_irq(run);
+-out:
+- spin_unlock_irqrestore(&chan->irqlock, flags);
+- return ret;
+-}
+-
+-static irqreturn_t rotate_irq(int irq, void *data)
+-{
+- struct ipu_image_convert_chan *chan = data;
+- struct ipu_image_convert_priv *priv = chan->priv;
+- struct ipu_image_convert_ctx *ctx;
+- struct ipu_image_convert_run *run;
+- unsigned long flags;
+- irqreturn_t ret;
+-
+- spin_lock_irqsave(&chan->irqlock, flags);
+-
+- /* get current run and its context */
+- run = chan->current_run;
+- if (!run) {
++ if (irq == chan->in_eof_irq) {
++ ctx->eof_mask |= EOF_IRQ_IN;
++ } else if (irq == chan->out_eof_irq) {
++ ctx->eof_mask |= EOF_IRQ_OUT;
++ } else if (irq == chan->rot_in_eof_irq ||
++ irq == chan->rot_out_eof_irq) {
++ if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
++ /* this was NOT a rotation op, shouldn't happen */
++ dev_err(priv->ipu->dev,
++ "Unexpected rotation interrupt\n");
++ goto out;
++ }
++ ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ?
++ EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT;
++ } else {
++ dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
+ ret = IRQ_NONE;
+ goto out;
+ }
+
+- ctx = run->ctx;
+-
+- if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
+- /* this was NOT a rotation operation, shouldn't happen */
+- dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
+- spin_unlock_irqrestore(&chan->irqlock, flags);
+- return IRQ_HANDLED;
+- }
++ if (ipu_rot_mode_is_irt(ctx->rot_mode))
++ tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE);
++ else
++ tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE);
+
+- ret = do_irq(run);
++ if (tile_complete)
++ ret = do_tile_complete(run);
+ out:
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+ return ret;
+@@ -1801,6 +1810,10 @@ static void force_abort(struct ipu_image_convert_ctx *ctx)
+
+ static void release_ipu_resources(struct ipu_image_convert_chan *chan)
+ {
++ if (chan->in_eof_irq >= 0)
++ free_irq(chan->in_eof_irq, chan);
++ if (chan->rot_in_eof_irq >= 0)
++ free_irq(chan->rot_in_eof_irq, chan);
+ if (chan->out_eof_irq >= 0)
+ free_irq(chan->out_eof_irq, chan);
+ if (chan->rot_out_eof_irq >= 0)
+@@ -1819,7 +1832,27 @@ static void release_ipu_resources(struct ipu_image_convert_chan *chan)
+
+ chan->in_chan = chan->out_chan = chan->rotation_in_chan =
+ chan->rotation_out_chan = NULL;
+- chan->out_eof_irq = chan->rot_out_eof_irq = -1;
++ chan->in_eof_irq = -1;
++ chan->rot_in_eof_irq = -1;
++ chan->out_eof_irq = -1;
++ chan->rot_out_eof_irq = -1;
++}
++
++static int get_eof_irq(struct ipu_image_convert_chan *chan,
++ struct ipuv3_channel *channel)
++{
++ struct ipu_image_convert_priv *priv = chan->priv;
++ int ret, irq;
++
++ irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF);
++
++ ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan);
++ if (ret < 0) {
++ dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq);
++ return ret;
++ }
++
++ return irq;
+ }
+
+ static int get_ipu_resources(struct ipu_image_convert_chan *chan)
+@@ -1855,31 +1888,33 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan)
+ }
+
+ /* acquire the EOF interrupts */
+- chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+- chan->out_chan,
+- IPU_IRQ_EOF);
++ ret = get_eof_irq(chan, chan->in_chan);
++ if (ret < 0) {
++ chan->in_eof_irq = -1;
++ goto err;
++ }
++ chan->in_eof_irq = ret;
+
+- ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
+- 0, "ipu-ic", chan);
++ ret = get_eof_irq(chan, chan->rotation_in_chan);
+ if (ret < 0) {
+- dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+- chan->out_eof_irq);
+- chan->out_eof_irq = -1;
++ chan->rot_in_eof_irq = -1;
+ goto err;
+ }
++ chan->rot_in_eof_irq = ret;
+
+- chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+- chan->rotation_out_chan,
+- IPU_IRQ_EOF);
++ ret = get_eof_irq(chan, chan->out_chan);
++ if (ret < 0) {
++ chan->out_eof_irq = -1;
++ goto err;
++ }
++ chan->out_eof_irq = ret;
+
+- ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
+- 0, "ipu-ic", chan);
++ ret = get_eof_irq(chan, chan->rotation_out_chan);
+ if (ret < 0) {
+- dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+- chan->rot_out_eof_irq);
+ chan->rot_out_eof_irq = -1;
+ goto err;
+ }
++ chan->rot_out_eof_irq = ret;
+
+ return 0;
+ err:
+@@ -2458,6 +2493,8 @@ int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
+ chan->ic_task = i;
+ chan->priv = priv;
+ chan->dma_ch = &image_convert_dma_chan[i];
++ chan->in_eof_irq = -1;
++ chan->rot_in_eof_irq = -1;
+ chan->out_eof_irq = -1;
+ chan->rot_out_eof_irq = -1;
+
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index 03475f1799730..dd9661c11782a 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -1037,7 +1037,7 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+ if (!iproc_i2c->slave)
+ return -EINVAL;
+
+- iproc_i2c->slave = NULL;
++ disable_irq(iproc_i2c->irq);
+
+ /* disable all slave interrupts */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+@@ -1050,6 +1050,17 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+ tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
+
++ /* flush TX/RX FIFOs */
++ tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
++ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
++
++ /* clear all pending slave interrupts */
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
++
++ iproc_i2c->slave = NULL;
++
++ enable_irq(iproc_i2c->irq);
++
+ return 0;
+ }
+
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index 36af8fdb66586..0b90aa0318df3 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -580,13 +580,14 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
+ rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
+ }
+
+- rcar_i2c_write(priv, ICSSR, ~SAR & 0xff);
++ /* Clear SSR, too, because of old STOPs to other clients than us */
++ rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff);
+ }
+
+ /* master sent stop */
+ if (ssr_filtered & SSR) {
+ i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
+- rcar_i2c_write(priv, ICSIER, SAR | SSR);
++ rcar_i2c_write(priv, ICSIER, SAR);
+ rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
+ }
+
+@@ -850,7 +851,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
+ priv->slave = slave;
+ rcar_i2c_write(priv, ICSAR, slave->addr);
+ rcar_i2c_write(priv, ICSSR, 0);
+- rcar_i2c_write(priv, ICSIER, SAR | SSR);
++ rcar_i2c_write(priv, ICSIER, SAR);
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS);
+
+ return 0;
+@@ -862,12 +863,14 @@ static int rcar_unreg_slave(struct i2c_client *slave)
+
+ WARN_ON(!priv->slave);
+
+- /* disable irqs and ensure none is running before clearing ptr */
++ /* ensure no irq is running before clearing ptr */
++ disable_irq(priv->irq);
+ rcar_i2c_write(priv, ICSIER, 0);
+- rcar_i2c_write(priv, ICSCR, 0);
++ rcar_i2c_write(priv, ICSSR, 0);
++ enable_irq(priv->irq);
++ rcar_i2c_write(priv, ICSCR, SDBS);
+ rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+
+- synchronize_irq(priv->irq);
+ priv->slave = NULL;
+
+ pm_runtime_put(rcar_i2c_priv_to_dev(priv));
+diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
+index 2d897e64c6a9e..424922cad1e39 100644
+--- a/drivers/iio/dac/ad5592r-base.c
++++ b/drivers/iio/dac/ad5592r-base.c
+@@ -416,7 +416,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
+ s64 tmp = *val * (3767897513LL / 25LL);
+ *val = div_s64_rem(tmp, 1000000000LL, val2);
+
+- ret = IIO_VAL_INT_PLUS_MICRO;
++ return IIO_VAL_INT_PLUS_MICRO;
+ } else {
+ int mult;
+
+@@ -447,7 +447,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
+ ret = IIO_VAL_INT;
+ break;
+ default:
+- ret = -EINVAL;
++ return -EINVAL;
+ }
+
+ unlock:
+diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
+index 11210bf7fd61b..f454d63008d69 100644
+--- a/drivers/infiniband/core/counters.c
++++ b/drivers/infiniband/core/counters.c
+@@ -284,7 +284,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
+ struct rdma_counter *counter;
+ int ret;
+
+- if (!qp->res.valid)
++ if (!qp->res.valid || rdma_is_kernel_res(&qp->res))
+ return 0;
+
+ if (!rdma_is_port_valid(dev, port))
+@@ -487,7 +487,7 @@ int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
+ goto err;
+ }
+
+- if (counter->res.task != qp->res.task) {
++ if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) {
+ ret = -EINVAL;
+ goto err_task;
+ }
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index e2ddcb0dc4ee3..c398d1a64614c 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -757,6 +757,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
+ mr->uobject = uobj;
+ atomic_inc(&pd->usecnt);
+ mr->res.type = RDMA_RESTRACK_MR;
++ mr->iova = cmd.hca_va;
+ rdma_restrack_uadd(&mr->res);
+
+ uobj->object = mr;
+@@ -847,6 +848,9 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
+ atomic_dec(&old_pd->usecnt);
+ }
+
++ if (cmd.flags & IB_MR_REREG_TRANS)
++ mr->iova = cmd.hca_va;
++
+ memset(&resp, 0, sizeof(resp));
+ resp.lkey = mr->lkey;
+ resp.rkey = mr->rkey;
+diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
+index 35c284af574da..dcb58cef336d9 100644
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -399,7 +399,6 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
+ mmid = stag >> 8;
+ mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+ mhp->ibmr.length = mhp->attr.len;
+- mhp->ibmr.iova = mhp->attr.va_fbo;
+ mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
+ pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
+ return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
+diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
+index 6ae503cfc5264..9114cb7307692 100644
+--- a/drivers/infiniband/hw/mlx4/mr.c
++++ b/drivers/infiniband/hw/mlx4/mr.c
+@@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
+ mr->ibmr.length = length;
+- mr->ibmr.iova = virt_addr;
+ mr->ibmr.page_size = 1U << shift;
+
+ return &mr->ibmr;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
+index 0e5f27caf2b2d..50a3557386090 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib.h
++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
+@@ -515,7 +515,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev);
+
+ int ipoib_ib_dev_open_default(struct net_device *dev);
+ int ipoib_ib_dev_open(struct net_device *dev);
+-int ipoib_ib_dev_stop(struct net_device *dev);
++void ipoib_ib_dev_stop(struct net_device *dev);
+ void ipoib_ib_dev_up(struct net_device *dev);
+ void ipoib_ib_dev_down(struct net_device *dev);
+ int ipoib_ib_dev_stop_default(struct net_device *dev);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index da3c5315bbb51..494f413dc3c6c 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -670,13 +670,12 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
+ return rc;
+ }
+
+-static void __ipoib_reap_ah(struct net_device *dev)
++static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
+ {
+- struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_ah *ah, *tah;
+ unsigned long flags;
+
+- netif_tx_lock_bh(dev);
++ netif_tx_lock_bh(priv->dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
+@@ -687,37 +686,37 @@ static void __ipoib_reap_ah(struct net_device *dev)
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+- netif_tx_unlock_bh(dev);
++ netif_tx_unlock_bh(priv->dev);
+ }
+
+ void ipoib_reap_ah(struct work_struct *work)
+ {
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
+- struct net_device *dev = priv->dev;
+
+- __ipoib_reap_ah(dev);
++ ipoib_reap_dead_ahs(priv);
+
+ if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ round_jiffies_relative(HZ));
+ }
+
+-static void ipoib_flush_ah(struct net_device *dev)
++static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
+ {
+- struct ipoib_dev_priv *priv = ipoib_priv(dev);
+-
+- cancel_delayed_work(&priv->ah_reap_task);
+- flush_workqueue(priv->wq);
+- ipoib_reap_ah(&priv->ah_reap_task.work);
++ clear_bit(IPOIB_STOP_REAPER, &priv->flags);
++ queue_delayed_work(priv->wq, &priv->ah_reap_task,
++ round_jiffies_relative(HZ));
+ }
+
+-static void ipoib_stop_ah(struct net_device *dev)
++static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
+ {
+- struct ipoib_dev_priv *priv = ipoib_priv(dev);
+-
+ set_bit(IPOIB_STOP_REAPER, &priv->flags);
+- ipoib_flush_ah(dev);
++ cancel_delayed_work(&priv->ah_reap_task);
++ /*
++ * After ipoib_stop_ah_reaper() we always go through
++ * ipoib_reap_dead_ahs() which ensures the work is really stopped and
++ * does a final flush out of the dead_ah's list
++ */
+ }
+
+ static int recvs_pending(struct net_device *dev)
+@@ -846,18 +845,6 @@ timeout:
+ return 0;
+ }
+
+-int ipoib_ib_dev_stop(struct net_device *dev)
+-{
+- struct ipoib_dev_priv *priv = ipoib_priv(dev);
+-
+- priv->rn_ops->ndo_stop(dev);
+-
+- clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+- ipoib_flush_ah(dev);
+-
+- return 0;
+-}
+-
+ int ipoib_ib_dev_open_default(struct net_device *dev)
+ {
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+@@ -901,10 +888,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
+ return -1;
+ }
+
+- clear_bit(IPOIB_STOP_REAPER, &priv->flags);
+- queue_delayed_work(priv->wq, &priv->ah_reap_task,
+- round_jiffies_relative(HZ));
+-
++ ipoib_start_ah_reaper(priv);
+ if (priv->rn_ops->ndo_open(dev)) {
+ pr_warn("%s: Failed to open dev\n", dev->name);
+ goto dev_stop;
+@@ -915,13 +899,20 @@ int ipoib_ib_dev_open(struct net_device *dev)
+ return 0;
+
+ dev_stop:
+- set_bit(IPOIB_STOP_REAPER, &priv->flags);
+- cancel_delayed_work(&priv->ah_reap_task);
+- set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+- ipoib_ib_dev_stop(dev);
++ ipoib_stop_ah_reaper(priv);
+ return -1;
+ }
+
++void ipoib_ib_dev_stop(struct net_device *dev)
++{
++ struct ipoib_dev_priv *priv = ipoib_priv(dev);
++
++ priv->rn_ops->ndo_stop(dev);
++
++ clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
++ ipoib_stop_ah_reaper(priv);
++}
++
+ void ipoib_pkey_dev_check_presence(struct net_device *dev)
+ {
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+@@ -1232,7 +1223,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
+ ipoib_mcast_dev_flush(dev);
+ if (oper_up)
+ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+- ipoib_flush_ah(dev);
++ ipoib_reap_dead_ahs(priv);
+ }
+
+ if (level >= IPOIB_FLUSH_NORMAL)
+@@ -1307,7 +1298,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
+ * the neighbor garbage collection is stopped and reaped.
+ * That should all be done now, so make a final ah flush.
+ */
+- ipoib_stop_ah(dev);
++ ipoib_reap_dead_ahs(priv);
+
+ clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 4fd095fd63b6f..044bcacad6e48 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -1979,6 +1979,8 @@ static void ipoib_ndo_uninit(struct net_device *dev)
+
+ /* no more works over the priv->wq */
+ if (priv->wq) {
++ /* See ipoib_mcast_carrier_on_task() */
++ WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
+index e99d9bf1a267d..e78c4c7eda34d 100644
+--- a/drivers/input/mouse/sentelic.c
++++ b/drivers/input/mouse/sentelic.c
+@@ -441,7 +441,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
+
+ fsp_reg_write_enable(psmouse, false);
+
+- return count;
++ return retval;
+ }
+
+ PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg);
+diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
+index 8e19bfa94121e..a99afb5d9011c 100644
+--- a/drivers/iommu/omap-iommu-debug.c
++++ b/drivers/iommu/omap-iommu-debug.c
+@@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
+ mutex_lock(&iommu_debug_lock);
+
+ bytes = omap_iommu_dump_ctx(obj, p, count);
++ if (bytes < 0)
++ goto err;
+ bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
+
++err:
+ mutex_unlock(&iommu_debug_lock);
+ kfree(buf);
+
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 263cf9240b168..7966b19ceba79 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -2581,6 +2581,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ msi_alloc_info_t *info = args;
+ struct its_device *its_dev = info->scratchpad[0].ptr;
+ struct its_node *its = its_dev->its;
++ struct irq_data *irqd;
+ irq_hw_number_t hwirq;
+ int err;
+ int i;
+@@ -2600,7 +2601,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i,
+ hwirq + i, &its_irq_chip, its_dev);
+- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
++ irqd = irq_get_irq_data(virq + i);
++ irqd_set_single_target(irqd);
++ irqd_set_affinity_on_activate(irqd);
+ pr_debug("ID:%d pID:%d vID:%d\n",
+ (int)(hwirq + i - its_dev->event_map.lpi_base),
+ (int)(hwirq + i), virq + i);
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 3d2b63585da95..217c838a1b405 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -264,7 +264,7 @@ struct bcache_device {
+ #define BCACHE_DEV_UNLINK_DONE 2
+ #define BCACHE_DEV_WB_RUNNING 3
+ #define BCACHE_DEV_RATE_DW_RUNNING 4
+- unsigned int nr_stripes;
++ int nr_stripes;
+ unsigned int stripe_size;
+ atomic_t *stripe_sectors_dirty;
+ unsigned long *full_dirty_stripes;
+diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
+index 08768796b5439..fda68c00ddd53 100644
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -321,7 +321,7 @@ int bch_btree_keys_alloc(struct btree_keys *b,
+
+ b->page_order = page_order;
+
+- t->data = (void *) __get_free_pages(gfp, b->page_order);
++ t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order);
+ if (!t->data)
+ goto err;
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 3c1109fceb2fb..46556bde032e2 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -840,7 +840,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
+ mutex_init(&c->verify_lock);
+
+ c->verify_ondisk = (void *)
+- __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
++ __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c)));
+
+ c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
+
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 6730820780b06..8250d2d1d780c 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -1002,8 +1002,8 @@ int bch_journal_alloc(struct cache_set *c)
+ j->w[1].c = c;
+
+ if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
+- !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
+- !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
++ !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
++ !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
+ return -ENOMEM;
+
+ return 0;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 168d647078591..25ad64a3919f6 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1754,7 +1754,7 @@ void bch_cache_set_unregister(struct cache_set *c)
+ }
+
+ #define alloc_bucket_pages(gfp, c) \
+- ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
++ ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
+
+ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ {
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index d60268fe49e10..0b02210ab4355 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -519,15 +519,19 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
+ uint64_t offset, int nr_sectors)
+ {
+ struct bcache_device *d = c->devices[inode];
+- unsigned int stripe_offset, stripe, sectors_dirty;
++ unsigned int stripe_offset, sectors_dirty;
++ int stripe;
+
+ if (!d)
+ return;
+
++ stripe = offset_to_stripe(d, offset);
++ if (stripe < 0)
++ return;
++
+ if (UUID_FLASH_ONLY(&c->uuids[inode]))
+ atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
+
+- stripe = offset_to_stripe(d, offset);
+ stripe_offset = offset & (d->stripe_size - 1);
+
+ while (nr_sectors) {
+@@ -567,12 +571,12 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+ static void refill_full_stripes(struct cached_dev *dc)
+ {
+ struct keybuf *buf = &dc->writeback_keys;
+- unsigned int start_stripe, stripe, next_stripe;
++ unsigned int start_stripe, next_stripe;
++ int stripe;
+ bool wrapped = false;
+
+ stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
+-
+- if (stripe >= dc->disk.nr_stripes)
++ if (stripe < 0)
+ stripe = 0;
+
+ start_stripe = stripe;
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 4e4c6810dc3c7..c4ff76037227b 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -33,10 +33,22 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
+ return ret;
+ }
+
+-static inline unsigned int offset_to_stripe(struct bcache_device *d,
++static inline int offset_to_stripe(struct bcache_device *d,
+ uint64_t offset)
+ {
+ do_div(offset, d->stripe_size);
++
++ /* d->nr_stripes is in range [1, INT_MAX] */
++ if (unlikely(offset >= d->nr_stripes)) {
++ pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
++ offset, d->nr_stripes);
++ return -EINVAL;
++ }
++
++ /*
++ * Here offset is definitly smaller than INT_MAX,
++ * return it as int will never overflow.
++ */
+ return offset;
+ }
+
+@@ -44,7 +56,10 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
+ uint64_t offset,
+ unsigned int nr_sectors)
+ {
+- unsigned int stripe = offset_to_stripe(&dc->disk, offset);
++ int stripe = offset_to_stripe(&dc->disk, offset);
++
++ if (stripe < 0)
++ return false;
+
+ while (1) {
+ if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index 3f8577e2c13be..2bd2444ad99c6 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -70,9 +70,6 @@ void dm_start_queue(struct request_queue *q)
+
+ void dm_stop_queue(struct request_queue *q)
+ {
+- if (blk_mq_queue_stopped(q))
+- return;
+-
+ blk_mq_quiesce_queue(q);
+ }
+
+diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
+index 73fd50e779754..d50737ec40394 100644
+--- a/drivers/md/md-cluster.c
++++ b/drivers/md/md-cluster.c
+@@ -1139,6 +1139,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
+ bitmap = get_bitmap_from_slot(mddev, i);
+ if (IS_ERR(bitmap)) {
+ pr_err("can't get bitmap from slot %d\n", i);
++ bitmap = NULL;
+ goto out;
+ }
+ counts = &bitmap->counts;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index a3cbc9f4fec17..02acd5d5a8488 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3604,6 +3604,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
+ * is missing/faulty, then we need to read everything we can.
+ */
+ if (sh->raid_conf->level != 6 &&
++ sh->raid_conf->rmw_level != PARITY_DISABLE_RMW &&
+ sh->sector < sh->raid_conf->mddev->recovery_cp)
+ /* reconstruct-write isn't being forced */
+ return 0;
+@@ -4839,7 +4840,7 @@ static void handle_stripe(struct stripe_head *sh)
+ * or to load a block that is being partially written.
+ */
+ if (s.to_read || s.non_overwrite
+- || (conf->level == 6 && s.to_write && s.failed)
++ || (s.to_write && s.failed)
+ || (s.syncing && (s.uptodate + s.compute < disks))
+ || s.replacing
+ || s.expanding)
+diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
+index 4be6dcf292fff..aaa96f256356b 100644
+--- a/drivers/media/platform/rockchip/rga/rga-hw.c
++++ b/drivers/media/platform/rockchip/rga/rga-hw.c
+@@ -200,22 +200,25 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
+ dst_info.data.format = ctx->out.fmt->hw_format;
+ dst_info.data.swap = ctx->out.fmt->color_swap;
+
+- if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
+- if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) {
+- switch (ctx->in.colorspace) {
+- case V4L2_COLORSPACE_REC709:
+- src_info.data.csc_mode =
+- RGA_SRC_CSC_MODE_BT709_R0;
+- break;
+- default:
+- src_info.data.csc_mode =
+- RGA_SRC_CSC_MODE_BT601_R0;
+- break;
+- }
++ /*
++ * CSC mode must only be set when the colorspace families differ between
++ * input and output. It must remain unset (zeroed) if both are the same.
++ */
++
++ if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) &&
++ RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) {
++ switch (ctx->in.colorspace) {
++ case V4L2_COLORSPACE_REC709:
++ src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
++ break;
++ default:
++ src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0;
++ break;
+ }
+ }
+
+- if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
++ if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) &&
++ RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) {
+ switch (ctx->out.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
+diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h
+index 96cb0314dfa70..e8917e5630a48 100644
+--- a/drivers/media/platform/rockchip/rga/rga-hw.h
++++ b/drivers/media/platform/rockchip/rga/rga-hw.h
+@@ -95,6 +95,11 @@
+ #define RGA_COLOR_FMT_CP_8BPP 15
+ #define RGA_COLOR_FMT_MASK 15
+
++#define RGA_COLOR_FMT_IS_YUV(fmt) \
++ (((fmt) >= RGA_COLOR_FMT_YUV422SP) && ((fmt) < RGA_COLOR_FMT_CP_1BPP))
++#define RGA_COLOR_FMT_IS_RGB(fmt) \
++ ((fmt) < RGA_COLOR_FMT_YUV422SP)
++
+ #define RGA_COLOR_NONE_SWAP 0
+ #define RGA_COLOR_RB_SWAP 1
+ #define RGA_COLOR_ALPHA_SWAP 2
+diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
+index d7b43037e500a..e07b135613eb5 100644
+--- a/drivers/media/platform/vsp1/vsp1_dl.c
++++ b/drivers/media/platform/vsp1/vsp1_dl.c
+@@ -431,6 +431,8 @@ vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
+ if (!pool)
+ return NULL;
+
++ pool->vsp1 = vsp1;
++
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free);
+
+diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
+index 4a31907a4525f..3ff872c205eeb 100644
+--- a/drivers/mfd/arizona-core.c
++++ b/drivers/mfd/arizona-core.c
+@@ -1430,6 +1430,15 @@ err_irq:
+ arizona_irq_exit(arizona);
+ err_pm:
+ pm_runtime_disable(arizona->dev);
++
++ switch (arizona->pdata.clk32k_src) {
++ case ARIZONA_32KZ_MCLK1:
++ case ARIZONA_32KZ_MCLK2:
++ arizona_clk32k_disable(arizona);
++ break;
++ default:
++ break;
++ }
+ err_reset:
+ arizona_enable_reset(arizona);
+ regulator_disable(arizona->dcvdd);
+@@ -1452,6 +1461,15 @@ int arizona_dev_exit(struct arizona *arizona)
+ regulator_disable(arizona->dcvdd);
+ regulator_put(arizona->dcvdd);
+
++ switch (arizona->pdata.clk32k_src) {
++ case ARIZONA_32KZ_MCLK1:
++ case ARIZONA_32KZ_MCLK2:
++ arizona_clk32k_disable(arizona);
++ break;
++ default:
++ break;
++ }
++
+ mfd_remove_devices(arizona->dev);
+ arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona);
+ arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona);
+diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
+index 4faa8d2e5d045..707f4287ab4a0 100644
+--- a/drivers/mfd/dln2.c
++++ b/drivers/mfd/dln2.c
+@@ -287,7 +287,11 @@ static void dln2_rx(struct urb *urb)
+ len = urb->actual_length - sizeof(struct dln2_header);
+
+ if (handle == DLN2_HANDLE_EVENT) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&dln2->event_cb_lock, flags);
+ dln2_run_event_callbacks(dln2, id, echo, data, len);
++ spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
+ } else {
+ /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
+ if (dln2_transfer_complete(dln2, urb, handle, echo))
+diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+index a66f8d6d61d1b..cb89f0578d425 100644
+--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+@@ -229,15 +229,12 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
+ DTRAN_CTRL_DM_START);
+ }
+
+-static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
++static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
+ {
+- struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+ enum dma_data_direction dir;
+
+- spin_lock_irq(&host->lock);
+-
+ if (!host->data)
+- goto out;
++ return false;
+
+ if (host->data->flags & MMC_DATA_READ)
+ dir = DMA_FROM_DEVICE;
+@@ -250,6 +247,17 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
+ if (dir == DMA_FROM_DEVICE)
+ clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+
++ return true;
++}
++
++static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
++{
++ struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
++
++ spin_lock_irq(&host->lock);
++ if (!renesas_sdhi_internal_dmac_complete(host))
++ goto out;
++
+ tmio_mmc_do_data_irq(host);
+ out:
+ spin_unlock_irq(&host->lock);
+diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
+index 1054cc070747e..20b0ee174dc61 100644
+--- a/drivers/mtd/nand/raw/fsl_upm.c
++++ b/drivers/mtd/nand/raw/fsl_upm.c
+@@ -62,7 +62,6 @@ static int fun_chip_ready(struct nand_chip *chip)
+ static void fun_wait_rnb(struct fsl_upm_nand *fun)
+ {
+ if (fun->rnb_gpio[fun->mchip_number] >= 0) {
+- struct mtd_info *mtd = nand_to_mtd(&fun->chip);
+ int cnt = 1000000;
+
+ while (--cnt && !fun_chip_ready(&fun->chip))
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
+index 413c3f254cf85..c881a573da662 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
+@@ -43,7 +43,7 @@ struct qmem {
+ void *base;
+ dma_addr_t iova;
+ int alloc_sz;
+- u8 entry_sz;
++ u16 entry_sz;
+ u8 align;
+ u32 qsize;
+ };
+diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
+index c84ab052ef265..3eee8df359a12 100644
+--- a/drivers/net/ethernet/qualcomm/emac/emac.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac.c
+@@ -485,13 +485,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
+ if (ret)
+- return ret;
++ goto disable_clk_axi;
+
+ ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
+ if (ret)
+- return ret;
++ goto disable_clk_cfg_ahb;
++
++ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
++ if (ret)
++ goto disable_clk_cfg_ahb;
+
+- return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
++ return 0;
++
++disable_clk_cfg_ahb:
++ clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]);
++disable_clk_axi:
++ clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]);
++
++ return ret;
+ }
+
+ /* Enable clocks; needs emac_clks_phase1_init to be called before */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+index 4d75158c64b29..826626e870d5c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+@@ -350,6 +350,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
+ plat_dat->has_gmac = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
++ plat_dat->multicast_filter_bins = 0;
+
+ err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (err)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+index bc9b01376e807..1d0b64bd1e1a9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -166,6 +166,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
+ value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
++ } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) {
++ /* Fall back to all multicast if we've no filter */
++ value = GMAC_FRAME_FILTER_PM;
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+
+diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
+index 89b85970912db..35d265014e1ec 100644
+--- a/drivers/nvdimm/security.c
++++ b/drivers/nvdimm/security.c
+@@ -450,14 +450,19 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
+ else
+ dev_dbg(&nvdimm->dev, "overwrite completed\n");
+
+- if (nvdimm->sec.overwrite_state)
+- sysfs_notify_dirent(nvdimm->sec.overwrite_state);
++ /*
++ * Mark the overwrite work done and update dimm security flags,
++ * then send a sysfs event notification to wake up userspace
++ * poll threads to picked up the changed state.
++ */
+ nvdimm->sec.overwrite_tmo = 0;
+ clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
+ clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
+- put_device(&nvdimm->dev);
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
+- nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
++ nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
++ if (nvdimm->sec.overwrite_state)
++ sysfs_notify_dirent(nvdimm->sec.overwrite_state);
++ put_device(&nvdimm->dev);
+ }
+
+ void nvdimm_security_overwrite_query(struct work_struct *work)
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index 8e40b3e6da77d..3cef835b375fd 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -322,12 +322,8 @@ void pci_bus_add_device(struct pci_dev *dev)
+
+ dev->match_driver = true;
+ retval = device_attach(&dev->dev);
+- if (retval < 0 && retval != -EPROBE_DEFER) {
++ if (retval < 0 && retval != -EPROBE_DEFER)
+ pci_warn(dev, "device attach failed (%d)\n", retval);
+- pci_proc_detach_device(dev);
+- pci_remove_sysfs_dev_files(dev);
+- return;
+- }
+
+ pci_dev_assign_added(dev, true);
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 70ded8900e285..270d502b8cd50 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -45,7 +45,13 @@
+ #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
+
+ #define PCIE20_PARF_PHY_CTRL 0x40
++#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
++#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
++
+ #define PCIE20_PARF_PHY_REFCLK 0x4C
++#define PHY_REFCLK_SSP_EN BIT(16)
++#define PHY_REFCLK_USE_PAD BIT(12)
++
+ #define PCIE20_PARF_DBI_BASE_ADDR 0x168
+ #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
+ #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
+@@ -76,6 +82,18 @@
+ #define DBI_RO_WR_EN 1
+
+ #define PERST_DELAY_US 1000
++/* PARF registers */
++#define PCIE20_PARF_PCS_DEEMPH 0x34
++#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
++#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
++#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
++
++#define PCIE20_PARF_PCS_SWING 0x38
++#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
++#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
++
++#define PCIE20_PARF_CONFIG_BITS 0x50
++#define PHY_RX0_EQ(x) ((x) << 24)
+
+ #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
+ #define SLV_ADDR_SPACE_SZ 0x10000000
+@@ -275,6 +293,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
++ struct device_node *node = dev->of_node;
+ u32 val;
+ int ret;
+
+@@ -319,9 +338,29 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
++ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
++ writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
++ PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
++ PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
++ pcie->parf + PCIE20_PARF_PCS_DEEMPH);
++ writel(PCS_SWING_TX_SWING_FULL(120) |
++ PCS_SWING_TX_SWING_LOW(120),
++ pcie->parf + PCIE20_PARF_PCS_SWING);
++ writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
++ }
++
++ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
++ /* set TX termination offset */
++ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
++ val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
++ val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
++ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
++ }
++
+ /* enable external reference clock */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+- val |= BIT(16);
++ val &= ~PHY_REFCLK_USE_PAD;
++ val |= PHY_REFCLK_SSP_EN;
+ writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+
+ ret = reset_control_deassert(res->phy_reset);
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index b3869951c0eb7..6e60b4b1bf53b 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -122,13 +122,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
+ struct acpiphp_context *context;
+
+ acpi_lock_hp_context();
++
+ context = acpiphp_get_context(adev);
+- if (!context || context->func.parent->is_going_away) {
+- acpi_unlock_hp_context();
+- return NULL;
++ if (!context)
++ goto unlock;
++
++ if (context->func.parent->is_going_away) {
++ acpiphp_put_context(context);
++ context = NULL;
++ goto unlock;
+ }
++
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
++
++unlock:
+ acpi_unlock_hp_context();
+ return context;
+ }
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 9bc0f321aaf0e..c98067579e9f3 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5208,7 +5208,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
+ */
+ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
+ {
+- if (pdev->device == 0x7340 && pdev->revision != 0xc5)
++ if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
++ (pdev->device == 0x7340 && pdev->revision != 0xc5))
+ return;
+
+ pci_info(pdev, "disabling ATS\n");
+@@ -5219,6 +5220,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
+ /* AMD Iceland dGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
++/* AMD Navi10 dGPU */
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
+ /* AMD Navi14 dGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
+index 6e2683016c1f0..8bd0a078bfc47 100644
+--- a/drivers/pinctrl/pinctrl-ingenic.c
++++ b/drivers/pinctrl/pinctrl-ingenic.c
+@@ -1500,9 +1500,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
+ */
+ high = ingenic_gpio_get_value(jzgc, irq);
+ if (high)
+- irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING);
++ irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_LOW);
+ else
+- irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
++ irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
+ }
+
+ if (jzgc->jzpc->version >= ID_JZ4760)
+@@ -1538,7 +1538,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
+ */
+ bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq);
+
+- type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
++ type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH;
+ }
+
+ irq_set_type(jzgc, irqd->hwirq, type);
+@@ -1644,7 +1644,8 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+ unsigned int pin = gc->base + offset;
+
+ if (jzpc->version >= ID_JZ4760)
+- return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
++ return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
++ ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
+
+ if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
+ return true;
+diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
+index 25ca2c894b4de..ab0662a33b41a 100644
+--- a/drivers/platform/chrome/cros_ec_ishtp.c
++++ b/drivers/platform/chrome/cros_ec_ishtp.c
+@@ -645,8 +645,10 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device)
+
+ /* Register croc_ec_dev mfd */
+ rv = cros_ec_dev_init(client_data);
+- if (rv)
++ if (rv) {
++ down_write(&init_lock);
+ goto end_cros_ec_dev_init_error;
++ }
+
+ return 0;
+
+diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
+index 1f829edd8ee70..d392a828fc493 100644
+--- a/drivers/pwm/pwm-bcm-iproc.c
++++ b/drivers/pwm/pwm-bcm-iproc.c
+@@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ u64 tmp, multi, rate;
+ u32 value, prescale;
+
+- rate = clk_get_rate(ip->clk);
+-
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm)))
+@@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ else
+ state->polarity = PWM_POLARITY_INVERSED;
+
++ rate = clk_get_rate(ip->clk);
++ if (rate == 0) {
++ state->period = 0;
++ state->duty_cycle = 0;
++ return;
++ }
++
+ value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
+ prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
+ prescale &= IPROC_PWM_PRESCALE_MAX;
+diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
+index cb0f4a0be0322..eaeb6aee6da5c 100644
+--- a/drivers/remoteproc/qcom_q6v5.c
++++ b/drivers/remoteproc/qcom_q6v5.c
+@@ -151,6 +151,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
+ {
+ int ret;
+
++ q6v5->running = false;
++
+ qcom_smem_state_update_bits(q6v5->state,
+ BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
+
+diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
+index d84e9f306086b..a67c55785b4de 100644
+--- a/drivers/remoteproc/qcom_q6v5_mss.c
++++ b/drivers/remoteproc/qcom_q6v5_mss.c
+@@ -381,6 +381,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
+ {
+ struct q6v5 *qproc = rproc->priv;
+
++ /* MBA is restricted to a maximum size of 1M */
++ if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
++ dev_err(qproc->dev, "MBA firmware load failed\n");
++ return -EINVAL;
++ }
++
+ memcpy(qproc->mba_region, fw->data, fw->size);
+
+ return 0;
+@@ -1028,15 +1034,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
+ } else if (phdr->p_filesz) {
+ /* Replace "xxx.xxx" with "xxx.bxx" */
+ sprintf(fw_name + fw_name_len - 3, "b%02d", i);
+- ret = request_firmware(&seg_fw, fw_name, qproc->dev);
++ ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
++ ptr, phdr->p_filesz);
+ if (ret) {
+ dev_err(qproc->dev, "failed to load %s\n", fw_name);
+ iounmap(ptr);
+ goto release_firmware;
+ }
+
+- memcpy(ptr, seg_fw->data, seg_fw->size);
+-
+ release_firmware(seg_fw);
+ }
+
+diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
+index 9884228800a50..f14394ab0e037 100644
+--- a/drivers/scsi/lpfc/lpfc_nvmet.c
++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
+@@ -1923,7 +1923,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
+ }
+ tgtp->tport_unreg_cmp = &tport_unreg_cmp;
+ nvmet_fc_unregister_targetport(phba->targetport);
+- if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
++ if (!wait_for_completion_timeout(&tport_unreg_cmp,
+ msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6179 Unreg targetport x%px timeout "
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 9ad44a96dfe3a..33f1cca7eaa61 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -2480,12 +2480,11 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
+ #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE)
+
+ static int ftdi_process_packet(struct usb_serial_port *port,
+- struct ftdi_private *priv, char *packet, int len)
++ struct ftdi_private *priv, unsigned char *buf, int len)
+ {
++ unsigned char status;
+ int i;
+- char status;
+ char flag;
+- char *ch;
+
+ if (len < 2) {
+ dev_dbg(&port->dev, "malformed packet\n");
+@@ -2495,7 +2494,7 @@ static int ftdi_process_packet(struct usb_serial_port *port,
+ /* Compare new line status to the old one, signal if different/
+ N.B. packet may be processed more than once, but differences
+ are only processed once. */
+- status = packet[0] & FTDI_STATUS_B0_MASK;
++ status = buf[0] & FTDI_STATUS_B0_MASK;
+ if (status != priv->prev_status) {
+ char diff_status = status ^ priv->prev_status;
+
+@@ -2521,13 +2520,12 @@ static int ftdi_process_packet(struct usb_serial_port *port,
+ }
+
+ /* save if the transmitter is empty or not */
+- if (packet[1] & FTDI_RS_TEMT)
++ if (buf[1] & FTDI_RS_TEMT)
+ priv->transmit_empty = 1;
+ else
+ priv->transmit_empty = 0;
+
+- len -= 2;
+- if (!len)
++ if (len == 2)
+ return 0; /* status only */
+
+ /*
+@@ -2535,40 +2533,41 @@ static int ftdi_process_packet(struct usb_serial_port *port,
+ * data payload to avoid over-reporting.
+ */
+ flag = TTY_NORMAL;
+- if (packet[1] & FTDI_RS_ERR_MASK) {
++ if (buf[1] & FTDI_RS_ERR_MASK) {
+ /* Break takes precedence over parity, which takes precedence
+ * over framing errors */
+- if (packet[1] & FTDI_RS_BI) {
++ if (buf[1] & FTDI_RS_BI) {
+ flag = TTY_BREAK;
+ port->icount.brk++;
+ usb_serial_handle_break(port);
+- } else if (packet[1] & FTDI_RS_PE) {
++ } else if (buf[1] & FTDI_RS_PE) {
+ flag = TTY_PARITY;
+ port->icount.parity++;
+- } else if (packet[1] & FTDI_RS_FE) {
++ } else if (buf[1] & FTDI_RS_FE) {
+ flag = TTY_FRAME;
+ port->icount.frame++;
+ }
+ /* Overrun is special, not associated with a char */
+- if (packet[1] & FTDI_RS_OE) {
++ if (buf[1] & FTDI_RS_OE) {
+ port->icount.overrun++;
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
+ }
+ }
+
+- port->icount.rx += len;
+- ch = packet + 2;
++ port->icount.rx += len - 2;
+
+ if (port->port.console && port->sysrq) {
+- for (i = 0; i < len; i++, ch++) {
+- if (!usb_serial_handle_sysrq_char(port, *ch))
+- tty_insert_flip_char(&port->port, *ch, flag);
++ for (i = 2; i < len; i++) {
++ if (usb_serial_handle_sysrq_char(port, buf[i]))
++ continue;
++ tty_insert_flip_char(&port->port, buf[i], flag);
+ }
+ } else {
+- tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len);
++ tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag,
++ len - 2);
+ }
+
+- return len;
++ return len - 2;
+ }
+
+ static void ftdi_process_read_urb(struct urb *urb)
+diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
+index e46104c2fd94e..893cef70c1599 100644
+--- a/drivers/watchdog/f71808e_wdt.c
++++ b/drivers/watchdog/f71808e_wdt.c
+@@ -689,9 +689,9 @@ static int __init watchdog_init(int sioaddr)
+ * into the module have been registered yet.
+ */
+ watchdog.sioaddr = sioaddr;
+- watchdog.ident.options = WDIOC_SETTIMEOUT
+- | WDIOF_MAGICCLOSE
+- | WDIOF_KEEPALIVEPING;
++ watchdog.ident.options = WDIOF_MAGICCLOSE
++ | WDIOF_KEEPALIVEPING
++ | WDIOF_CARDRESET;
+
+ snprintf(watchdog.ident.identity,
+ sizeof(watchdog.ident.identity), "%s watchdog",
+@@ -705,6 +705,13 @@ static int __init watchdog_init(int sioaddr)
+ wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
+ watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS);
+
++ /*
++ * We don't want WDTMOUT_STS to stick around till regular reboot.
++ * Write 1 to the bit to clear it to zero.
++ */
++ superio_outb(sioaddr, F71808FG_REG_WDT_CONF,
++ wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS));
++
+ superio_exit(sioaddr);
+
+ err = watchdog_set_timeout(timeout);
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index c4147e93aa7d4..3729f99fd8eca 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -974,6 +974,15 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ if (IS_ERR_OR_NULL(watchdog_kworker))
+ return -ENODEV;
+
++ device_initialize(&wd_data->dev);
++ wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
++ wd_data->dev.class = &watchdog_class;
++ wd_data->dev.parent = wdd->parent;
++ wd_data->dev.groups = wdd->groups;
++ wd_data->dev.release = watchdog_core_data_release;
++ dev_set_drvdata(&wd_data->dev, wdd);
++ dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
++
+ kthread_init_work(&wd_data->work, watchdog_ping_work);
+ hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+ wd_data->timer.function = watchdog_timer_expired;
+@@ -994,15 +1003,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ }
+ }
+
+- device_initialize(&wd_data->dev);
+- wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
+- wd_data->dev.class = &watchdog_class;
+- wd_data->dev.parent = wdd->parent;
+- wd_data->dev.groups = wdd->groups;
+- wd_data->dev.release = watchdog_core_data_release;
+- dev_set_drvdata(&wd_data->dev, wdd);
+- dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
+-
+ /* Fill in the data structures */
+ cdev_init(&wd_data->cdev, &watchdog_fops);
+
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 36cd210ee2ef7..2374f3f6f3b70 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -990,8 +990,10 @@ struct btrfs_root {
+ wait_queue_head_t log_writer_wait;
+ wait_queue_head_t log_commit_wait[2];
+ struct list_head log_ctxs[2];
++ /* Used only for log trees of subvolumes, not for the log root tree */
+ atomic_t log_writers;
+ atomic_t log_commit[2];
++ /* Used only for log trees of subvolumes, not for the log root tree */
+ atomic_t log_batch;
+ int log_transid;
+ /* No matter the commit succeeds or not*/
+@@ -3164,7 +3166,7 @@ do { \
+ /* Report first abort since mount */ \
+ if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
+ &((trans)->fs_info->fs_state))) { \
+- if ((errno) != -EIO) { \
++ if ((errno) != -EIO && (errno) != -EROFS) { \
+ WARN(1, KERN_DEBUG \
+ "BTRFS: Transaction aborted (error %d)\n", \
+ (errno)); \
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 273d1ccdd45df..ad1c8e3b8133a 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1475,9 +1475,16 @@ int btrfs_init_fs_root(struct btrfs_root *root)
+ spin_lock_init(&root->ino_cache_lock);
+ init_waitqueue_head(&root->ino_cache_wait);
+
+- ret = get_anon_bdev(&root->anon_dev);
+- if (ret)
+- goto fail;
++ /*
++ * Don't assign anonymous block device to roots that are not exposed to
++ * userspace, the id pool is limited to 1M
++ */
++ if (is_fstree(root->root_key.objectid) &&
++ btrfs_root_refs(&root->root_item) > 0) {
++ ret = get_anon_bdev(&root->anon_dev);
++ if (ret)
++ goto fail;
++ }
+
+ mutex_lock(&root->objectid_mutex);
+ ret = btrfs_find_highest_objectid(root,
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 47ecf7216b3e5..739332b462059 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5221,7 +5221,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
+ goto out;
+ }
+
+- trans = btrfs_start_transaction(tree_root, 0);
++ /*
++ * Use join to avoid potential EINTR from transaction start. See
++ * wait_reserve_ticket and the whole reservation callchain.
++ */
++ if (for_reloc)
++ trans = btrfs_join_transaction(tree_root);
++ else
++ trans = btrfs_start_transaction(tree_root, 0);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ goto out_free;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 99dcb38976592..035ea5bc692ad 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4467,15 +4467,25 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
+ free_extent_map(em);
+ break;
+ }
+- if (!test_range_bit(tree, em->start,
+- extent_map_end(em) - 1,
+- EXTENT_LOCKED, 0, NULL)) {
++ if (test_range_bit(tree, em->start,
++ extent_map_end(em) - 1,
++ EXTENT_LOCKED, 0, NULL))
++ goto next;
++ /*
++ * If it's not in the list of modified extents, used
++ * by a fast fsync, we can remove it. If it's being
++ * logged we can safely remove it since fsync took an
++ * extra reference on the em.
++ */
++ if (list_empty(&em->list) ||
++ test_bit(EXTENT_FLAG_LOGGING, &em->flags)) {
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &btrfs_inode->runtime_flags);
+ remove_extent_mapping(map, em);
+ /* once for the rb tree */
+ free_extent_map(em);
+ }
++next:
+ start = extent_map_end(em);
+ write_unlock(&map->lock);
+
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index d86ada9c3c541..8bfc0f348ad55 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2166,7 +2166,7 @@ out:
+ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info, bool update_stat)
+ {
+- struct btrfs_free_space *left_info;
++ struct btrfs_free_space *left_info = NULL;
+ struct btrfs_free_space *right_info;
+ bool merged = false;
+ u64 offset = info->offset;
+@@ -2181,7 +2181,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
+ if (right_info && rb_prev(&right_info->offset_index))
+ left_info = rb_entry(rb_prev(&right_info->offset_index),
+ struct btrfs_free_space, offset_index);
+- else
++ else if (!right_info)
+ left_info = tree_search_offset(ctl, offset - 1, 0, 0);
+
+ if (right_info && !right_info->bitmap) {
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index e408181a5eba3..fa7f3a59813ea 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -641,12 +641,18 @@ cont:
+ page_error_op |
+ PAGE_END_WRITEBACK);
+
+- for (i = 0; i < nr_pages; i++) {
+- WARN_ON(pages[i]->mapping);
+- put_page(pages[i]);
++ /*
++ * Ensure we only free the compressed pages if we have
++ * them allocated, as we can still reach here with
++ * inode_need_compress() == false.
++ */
++ if (pages) {
++ for (i = 0; i < nr_pages; i++) {
++ WARN_ON(pages[i]->mapping);
++ put_page(pages[i]);
++ }
++ kfree(pages);
+ }
+- kfree(pages);
+-
+ return 0;
+ }
+ }
+@@ -4681,6 +4687,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
+ }
+ }
+
++ free_anon_bdev(dest->anon_dev);
++ dest->anon_dev = 0;
+ out_end_trans:
+ trans->block_rsv = NULL;
+ trans->bytes_reserved = 0;
+@@ -7186,7 +7194,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
+ extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ /* Only regular file could have regular/prealloc extent */
+ if (!S_ISREG(inode->vfs_inode.i_mode)) {
+- ret = -EUCLEAN;
++ err = -EUCLEAN;
+ btrfs_crit(fs_info,
+ "regular/prealloc extent found for non-regular inode %llu",
+ btrfs_ino(inode));
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index d88b8d8897cc5..88745b5182126 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -167,8 +167,11 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
+ return 0;
+ }
+
+-/* Check if @flags are a supported and valid set of FS_*_FL flags */
+-static int check_fsflags(unsigned int flags)
++/*
++ * Check if @flags are a supported and valid set of FS_*_FL flags and that
++ * the old and new flags are not conflicting
++ */
++static int check_fsflags(unsigned int old_flags, unsigned int flags)
+ {
+ if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
+ FS_NOATIME_FL | FS_NODUMP_FL | \
+@@ -177,9 +180,19 @@ static int check_fsflags(unsigned int flags)
+ FS_NOCOW_FL))
+ return -EOPNOTSUPP;
+
++ /* COMPR and NOCOMP on new/old are valid */
+ if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
+ return -EINVAL;
+
++ if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL))
++ return -EINVAL;
++
++ /* NOCOW and compression options are mutually exclusive */
++ if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
++ return -EINVAL;
++ if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
++ return -EINVAL;
++
+ return 0;
+ }
+
+@@ -193,7 +206,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
+ unsigned int fsflags, old_fsflags;
+ int ret;
+ const char *comp = NULL;
+- u32 binode_flags = binode->flags;
++ u32 binode_flags;
+
+ if (!inode_owner_or_capable(inode))
+ return -EPERM;
+@@ -204,22 +217,23 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
+ if (copy_from_user(&fsflags, arg, sizeof(fsflags)))
+ return -EFAULT;
+
+- ret = check_fsflags(fsflags);
+- if (ret)
+- return ret;
+-
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+-
+ fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
+ old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
++
+ ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
+ if (ret)
+ goto out_unlock;
+
++ ret = check_fsflags(old_fsflags, fsflags);
++ if (ret)
++ goto out_unlock;
++
++ binode_flags = binode->flags;
+ if (fsflags & FS_SYNC_FL)
+ binode_flags |= BTRFS_INODE_SYNC;
+ else
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index 454a1015d026b..9a2f15f4c80e0 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -286,6 +286,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
+ exist_re = insert_root_entry(&exist->roots, re);
+ if (exist_re)
+ kfree(re);
++ } else {
++ kfree(re);
+ }
+ kfree(be);
+ return exist;
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 1b087ee338ccb..af3605a0bf2e0 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -2312,12 +2312,20 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
+ btrfs_unlock_up_safe(path, 0);
+ }
+
+- min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
++ /*
++ * In merge_reloc_root(), we modify the upper level pointer to swap the
++ * tree blocks between reloc tree and subvolume tree. Thus for tree
++ * block COW, we COW at most from level 1 to root level for each tree.
++ *
++ * Thus the needed metadata size is at most root_level * nodesize,
++ * and * 2 since we have two trees to COW.
++ */
++ min_reserved = fs_info->nodesize * btrfs_root_level(root_item) * 2;
+ memset(&next_key, 0, sizeof(next_key));
+
+ while (1) {
+ ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
+- BTRFS_RESERVE_FLUSH_ALL);
++ BTRFS_RESERVE_FLUSH_LIMIT);
+ if (ret) {
+ err = ret;
+ goto out;
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index aea24202cd355..4b0ee34aa65d5 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -435,6 +435,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ char *compress_type;
+ bool compress_force = false;
+ enum btrfs_compression_type saved_compress_type;
++ int saved_compress_level;
+ bool saved_compress_force;
+ int no_compress = 0;
+
+@@ -517,6 +518,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ info->compress_type : BTRFS_COMPRESS_NONE;
+ saved_compress_force =
+ btrfs_test_opt(info, FORCE_COMPRESS);
++ saved_compress_level = info->compress_level;
+ if (token == Opt_compress ||
+ token == Opt_compress_force ||
+ strncmp(args[0].from, "zlib", 4) == 0) {
+@@ -561,6 +563,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ no_compress = 0;
+ } else if (strncmp(args[0].from, "no", 2) == 0) {
+ compress_type = "no";
++ info->compress_level = 0;
++ info->compress_type = 0;
+ btrfs_clear_opt(info->mount_opt, COMPRESS);
+ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
+ compress_force = false;
+@@ -581,11 +585,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ */
+ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
+ }
+- if ((btrfs_test_opt(info, COMPRESS) &&
+- (info->compress_type != saved_compress_type ||
+- compress_force != saved_compress_force)) ||
+- (!btrfs_test_opt(info, COMPRESS) &&
+- no_compress == 1)) {
++ if (no_compress == 1) {
++ btrfs_info(info, "use no compression");
++ } else if ((info->compress_type != saved_compress_type) ||
++ (compress_force != saved_compress_force) ||
++ (info->compress_level != saved_compress_level)) {
+ btrfs_info(info, "%s %s compression, level %d",
+ (compress_force) ? "force" : "use",
+ compress_type, info->compress_level);
+@@ -1848,6 +1852,12 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
+ set_bit(BTRFS_FS_OPEN, &fs_info->flags);
+ }
+ out:
++ /*
++ * We need to set SB_I_VERSION here otherwise it'll get cleared by VFS,
++ * since the absence of the flag means it can be toggled off by remount.
++ */
++ *flags |= SB_I_VERSION;
++
+ wake_up_process(fs_info->transaction_kthread);
+ btrfs_remount_cleanup(fs_info, old_opts);
+ return 0;
+@@ -2254,9 +2264,7 @@ static int btrfs_unfreeze(struct super_block *sb)
+ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
+ {
+ struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
+- struct btrfs_fs_devices *cur_devices;
+ struct btrfs_device *dev, *first_dev = NULL;
+- struct list_head *head;
+
+ /*
+ * Lightweight locking of the devices. We should not need
+@@ -2266,18 +2274,13 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
+ * least until the rcu_read_unlock.
+ */
+ rcu_read_lock();
+- cur_devices = fs_info->fs_devices;
+- while (cur_devices) {
+- head = &cur_devices->devices;
+- list_for_each_entry_rcu(dev, head, dev_list) {
+- if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
+- continue;
+- if (!dev->name)
+- continue;
+- if (!first_dev || dev->devid < first_dev->devid)
+- first_dev = dev;
+- }
+- cur_devices = cur_devices->seed;
++ list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) {
++ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
++ continue;
++ if (!dev->name)
++ continue;
++ if (!first_dev || dev->devid < first_dev->devid)
++ first_dev = dev;
+ }
+
+ if (first_dev)
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index f6d3c80f2e289..5c299e1f2297e 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -975,7 +975,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
+ {
+ int error = 0;
+ struct btrfs_device *dev;
++ unsigned int nofs_flag;
+
++ nofs_flag = memalloc_nofs_save();
+ list_for_each_entry(dev, &fs_devices->devices, dev_list) {
+ struct hd_struct *disk;
+ struct kobject *disk_kobj;
+@@ -994,6 +996,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
+ if (error)
+ break;
+ }
++ memalloc_nofs_restore(nofs_flag);
+
+ return error;
+ }
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index f46afbff668eb..3c090549ed07d 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3140,29 +3140,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ btrfs_init_log_ctx(&root_log_ctx, NULL);
+
+ mutex_lock(&log_root_tree->log_mutex);
+- atomic_inc(&log_root_tree->log_batch);
+- atomic_inc(&log_root_tree->log_writers);
+
+ index2 = log_root_tree->log_transid % 2;
+ list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
+ root_log_ctx.log_transid = log_root_tree->log_transid;
+
+- mutex_unlock(&log_root_tree->log_mutex);
+-
+- mutex_lock(&log_root_tree->log_mutex);
+-
+ /*
+ * Now we are safe to update the log_root_tree because we're under the
+ * log_mutex, and we're a current writer so we're holding the commit
+ * open until we drop the log_mutex.
+ */
+ ret = update_log_root(trans, log, &new_root_item);
+-
+- if (atomic_dec_and_test(&log_root_tree->log_writers)) {
+- /* atomic_dec_and_test implies a barrier */
+- cond_wake_up_nomb(&log_root_tree->log_writer_wait);
+- }
+-
+ if (ret) {
+ if (!list_empty(&root_log_ctx.list))
+ list_del_init(&root_log_ctx.list);
+@@ -3208,8 +3196,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ root_log_ctx.log_transid - 1);
+ }
+
+- wait_for_writer(log_root_tree);
+-
+ /*
+ * now that we've moved on to the tree of log tree roots,
+ * check the full commit flag again
+@@ -4054,11 +4040,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
+ fs_info->csum_root,
+ ds + cs, ds + cs + cl - 1,
+ &ordered_sums, 0);
+- if (ret) {
+- btrfs_release_path(dst_path);
+- kfree(ins_data);
+- return ret;
+- }
++ if (ret)
++ break;
+ }
+ }
+ }
+@@ -4071,7 +4054,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
+ * we have to do this after the loop above to avoid changing the
+ * log tree while trying to change the log tree.
+ */
+- ret = 0;
+ while (!list_empty(&ordered_sums)) {
+ struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
+ struct btrfs_ordered_sum,
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 1e6e3c1d97dfa..196ddbcd29360 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -219,7 +219,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
+ *
+ * global::fs_devs - add, remove, updates to the global list
+ *
+- * does not protect: manipulation of the fs_devices::devices list!
++ * does not protect: manipulation of the fs_devices::devices list in general
++ * but in mount context it could be used to exclude list modifications by eg.
++ * scan ioctl
+ *
+ * btrfs_device::name - renames (write side), read is RCU
+ *
+@@ -232,6 +234,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
+ * may be used to exclude some operations from running concurrently without any
+ * modifications to the list (see write_all_supers)
+ *
++ * Is not required at mount and close times, because our device list is
++ * protected by the uuid_mutex at that point.
++ *
+ * balance_mutex
+ * -------------
+ * protects balance structures (status, state) and context accessed from
+@@ -778,6 +783,11 @@ static int btrfs_free_stale_devices(const char *path,
+ return ret;
+ }
+
++/*
++ * This is only used on mount, and we are protected from competing things
++ * messing with our fs_devices by the uuid_mutex, thus we do not need the
++ * fs_devices->device_list_mutex here.
++ */
+ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
+ struct btrfs_device *device, fmode_t flags,
+ void *holder)
+@@ -1418,8 +1428,14 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+ int ret;
+
+ lockdep_assert_held(&uuid_mutex);
++ /*
++ * The device_list_mutex cannot be taken here in case opening the
++ * underlying device takes further locks like bd_mutex.
++ *
++ * We also don't need the lock here as this is called during mount and
++ * exclusion is provided by uuid_mutex
++ */
+
+- mutex_lock(&fs_devices->device_list_mutex);
+ if (fs_devices->opened) {
+ fs_devices->opened++;
+ ret = 0;
+@@ -1427,7 +1443,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+ list_sort(NULL, &fs_devices->devices, devid_cmp);
+ ret = open_fs_devices(fs_devices, flags, holder);
+ }
+- mutex_unlock(&fs_devices->device_list_mutex);
+
+ return ret;
+ }
+@@ -3283,7 +3298,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
+ if (!path)
+ return -ENOMEM;
+
+- trans = btrfs_start_transaction(root, 0);
++ trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
+ if (IS_ERR(trans)) {
+ btrfs_free_path(path);
+ return PTR_ERR(trans);
+@@ -4246,7 +4261,22 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
+ mutex_lock(&fs_info->balance_mutex);
+ if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
+ btrfs_info(fs_info, "balance: paused");
+- else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req))
++ /*
++ * Balance can be canceled by:
++ *
++ * - Regular cancel request
++ * Then ret == -ECANCELED and balance_cancel_req > 0
++ *
++ * - Fatal signal to "btrfs" process
++ * Either the signal caught by wait_reserve_ticket() and callers
++ * got -EINTR, or caught by btrfs_should_cancel_balance() and
++ * got -ECANCELED.
++ * Either way, in this case balance_cancel_req = 0, and
++ * ret == -EINTR or ret == -ECANCELED.
++ *
++ * So here we only check the return value to catch canceled balance.
++ */
++ else if (ret == -ECANCELED || ret == -EINTR)
+ btrfs_info(fs_info, "balance: canceled");
+ else
+ btrfs_info(fs_info, "balance: ended with status: %d", ret);
+@@ -7267,7 +7297,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
+ * otherwise we don't need it.
+ */
+ mutex_lock(&uuid_mutex);
+- mutex_lock(&fs_info->chunk_mutex);
+
+ /*
+ * It is possible for mount and umount to race in such a way that
+@@ -7312,7 +7341,9 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
+ } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
+ struct btrfs_chunk *chunk;
+ chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
++ mutex_lock(&fs_info->chunk_mutex);
+ ret = read_one_chunk(&found_key, leaf, chunk);
++ mutex_unlock(&fs_info->chunk_mutex);
+ if (ret)
+ goto error;
+ }
+@@ -7342,7 +7373,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
+ }
+ ret = 0;
+ error:
+- mutex_unlock(&fs_info->chunk_mutex);
+ mutex_unlock(&uuid_mutex);
+
+ btrfs_free_path(path);
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index 2e4764fd18727..3367a8194f24b 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -920,6 +920,10 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
+ req->r_num_caps = 2;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+ req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
++ if (as_ctx.pagelist) {
++ req->r_pagelist = as_ctx.pagelist;
++ as_ctx.pagelist = NULL;
++ }
+ err = ceph_mdsc_do_request(mdsc, dir, req);
+ if (!err && !req->r_reply_info.head->is_dentry)
+ err = ceph_handle_notrace_create(dir, dentry);
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index b79fe6549df6f..701bc3f4d4ba1 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -3091,8 +3091,10 @@ static void handle_session(struct ceph_mds_session *session,
+ goto bad;
+ /* version >= 3, feature bits */
+ ceph_decode_32_safe(&p, end, len, bad);
+- ceph_decode_64_safe(&p, end, features, bad);
+- p += len - sizeof(features);
++ if (len) {
++ ceph_decode_64_safe(&p, end, features, bad);
++ p += len - sizeof(features);
++ }
+ }
+
+ mutex_lock(&mdsc->mutex);
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 14265b4bbcc00..2fc96f7923ee5 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -509,15 +509,31 @@ cifs_ses_oplock_break(struct work_struct *work)
+ kfree(lw);
+ }
+
++static void
++smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key,
++ __le32 new_lease_state)
++{
++ struct smb2_lease_break_work *lw;
++
++ lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
++ if (!lw) {
++ cifs_put_tlink(tlink);
++ return;
++ }
++
++ INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
++ lw->tlink = tlink;
++ lw->lease_state = new_lease_state;
++ memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE);
++ queue_work(cifsiod_wq, &lw->lease_break);
++}
++
+ static bool
+-smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
+- struct smb2_lease_break_work *lw)
++smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
+ {
+- bool found;
+ __u8 lease_state;
+ struct list_head *tmp;
+ struct cifsFileInfo *cfile;
+- struct cifs_pending_open *open;
+ struct cifsInodeInfo *cinode;
+ int ack_req = le32_to_cpu(rsp->Flags &
+ SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
+@@ -556,22 +572,29 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
+ &cinode->flags);
+
+ cifs_queue_oplock_break(cfile);
+- kfree(lw);
+ return true;
+ }
+
+- found = false;
++ return false;
++}
++
++static struct cifs_pending_open *
++smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
++ struct smb2_lease_break *rsp)
++{
++ __u8 lease_state = le32_to_cpu(rsp->NewLeaseState);
++ int ack_req = le32_to_cpu(rsp->Flags &
++ SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
++ struct cifs_pending_open *open;
++ struct cifs_pending_open *found = NULL;
++
+ list_for_each_entry(open, &tcon->pending_opens, olist) {
+ if (memcmp(open->lease_key, rsp->LeaseKey,
+ SMB2_LEASE_KEY_SIZE))
+ continue;
+
+ if (!found && ack_req) {
+- found = true;
+- memcpy(lw->lease_key, open->lease_key,
+- SMB2_LEASE_KEY_SIZE);
+- lw->tlink = cifs_get_tlink(open->tlink);
+- queue_work(cifsiod_wq, &lw->lease_break);
++ found = open;
+ }
+
+ cifs_dbg(FYI, "found in the pending open list\n");
+@@ -592,14 +615,7 @@ smb2_is_valid_lease_break(char *buffer)
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+- struct smb2_lease_break_work *lw;
+-
+- lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
+- if (!lw)
+- return false;
+-
+- INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
+- lw->lease_state = rsp->NewLeaseState;
++ struct cifs_pending_open *open;
+
+ cifs_dbg(FYI, "Checking for lease break\n");
+
+@@ -617,11 +633,27 @@ smb2_is_valid_lease_break(char *buffer)
+ spin_lock(&tcon->open_file_lock);
+ cifs_stats_inc(
+ &tcon->stats.cifs_stats.num_oplock_brks);
+- if (smb2_tcon_has_lease(tcon, rsp, lw)) {
++ if (smb2_tcon_has_lease(tcon, rsp)) {
+ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
++ open = smb2_tcon_find_pending_open_lease(tcon,
++ rsp);
++ if (open) {
++ __u8 lease_key[SMB2_LEASE_KEY_SIZE];
++ struct tcon_link *tlink;
++
++ tlink = cifs_get_tlink(open->tlink);
++ memcpy(lease_key, open->lease_key,
++ SMB2_LEASE_KEY_SIZE);
++ spin_unlock(&tcon->open_file_lock);
++ spin_unlock(&cifs_tcp_ses_lock);
++ smb2_queue_pending_open_break(tlink,
++ lease_key,
++ rsp->NewLeaseState);
++ return true;
++ }
+ spin_unlock(&tcon->open_file_lock);
+
+ if (tcon->crfid.is_valid &&
+@@ -639,7 +671,6 @@ smb2_is_valid_lease_break(char *buffer)
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+- kfree(lw);
+ cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
+ return false;
+ }
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 06b1a86d76b18..7ff05c06f2a4c 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1323,6 +1323,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+ spnego_key = cifs_get_spnego_key(ses);
+ if (IS_ERR(spnego_key)) {
+ rc = PTR_ERR(spnego_key);
++ if (rc == -ENOKEY)
++ cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
+ spnego_key = NULL;
+ goto out;
+ }
+diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
+index fda7d3f5b4be5..432c3febea6df 100644
+--- a/fs/ext2/ialloc.c
++++ b/fs/ext2/ialloc.c
+@@ -80,6 +80,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
+ if (dir)
+ le16_add_cpu(&desc->bg_used_dirs_count, -1);
+ spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
++ percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter);
+ if (dir)
+ percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
+ mark_buffer_dirty(bh);
+@@ -528,7 +529,7 @@ got:
+ goto fail;
+ }
+
+- percpu_counter_add(&sbi->s_freeinodes_counter, -1);
++ percpu_counter_dec(&sbi->s_freeinodes_counter);
+ if (S_ISDIR(mode))
+ percpu_counter_inc(&sbi->s_dirs_counter);
+
+diff --git a/fs/minix/inode.c b/fs/minix/inode.c
+index 0dd929346f3f3..7b09a9158e401 100644
+--- a/fs/minix/inode.c
++++ b/fs/minix/inode.c
+@@ -150,8 +150,10 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
+ return 0;
+ }
+
+-static bool minix_check_superblock(struct minix_sb_info *sbi)
++static bool minix_check_superblock(struct super_block *sb)
+ {
++ struct minix_sb_info *sbi = minix_sb(sb);
++
+ if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
+ return false;
+
+@@ -161,7 +163,7 @@ static bool minix_check_superblock(struct minix_sb_info *sbi)
+ * of indirect blocks which places the limit well above U32_MAX.
+ */
+ if (sbi->s_version == MINIX_V1 &&
+- sbi->s_max_size > (7 + 512 + 512*512) * BLOCK_SIZE)
++ sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE)
+ return false;
+
+ return true;
+@@ -202,7 +204,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
+ sbi->s_zmap_blocks = ms->s_zmap_blocks;
+ sbi->s_firstdatazone = ms->s_firstdatazone;
+ sbi->s_log_zone_size = ms->s_log_zone_size;
+- sbi->s_max_size = ms->s_max_size;
++ s->s_maxbytes = ms->s_max_size;
+ s->s_magic = ms->s_magic;
+ if (s->s_magic == MINIX_SUPER_MAGIC) {
+ sbi->s_version = MINIX_V1;
+@@ -233,7 +235,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
+ sbi->s_zmap_blocks = m3s->s_zmap_blocks;
+ sbi->s_firstdatazone = m3s->s_firstdatazone;
+ sbi->s_log_zone_size = m3s->s_log_zone_size;
+- sbi->s_max_size = m3s->s_max_size;
++ s->s_maxbytes = m3s->s_max_size;
+ sbi->s_ninodes = m3s->s_ninodes;
+ sbi->s_nzones = m3s->s_zones;
+ sbi->s_dirsize = 64;
+@@ -245,7 +247,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
+ } else
+ goto out_no_fs;
+
+- if (!minix_check_superblock(sbi))
++ if (!minix_check_superblock(s))
+ goto out_illegal_sb;
+
+ /*
+diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c
+index 046cc96ee7adb..1fed906042aa8 100644
+--- a/fs/minix/itree_v1.c
++++ b/fs/minix/itree_v1.c
+@@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
+ if (block < 0) {
+ printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
+ block, inode->i_sb->s_bdev);
+- } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
+- if (printk_ratelimit())
+- printk("MINIX-fs: block_to_path: "
+- "block %ld too big on dev %pg\n",
+- block, inode->i_sb->s_bdev);
+- } else if (block < 7) {
++ return 0;
++ }
++ if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes)
++ return 0;
++
++ if (block < 7) {
+ offsets[n++] = block;
+ } else if ((block -= 7) < 512) {
+ offsets[n++] = 7;
+diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c
+index f7fc7eccccccd..9d00f31a2d9d1 100644
+--- a/fs/minix/itree_v2.c
++++ b/fs/minix/itree_v2.c
+@@ -32,13 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
+ if (block < 0) {
+ printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
+ block, sb->s_bdev);
+- } else if ((u64)block * (u64)sb->s_blocksize >=
+- minix_sb(sb)->s_max_size) {
+- if (printk_ratelimit())
+- printk("MINIX-fs: block_to_path: "
+- "block %ld too big on dev %pg\n",
+- block, sb->s_bdev);
+- } else if (block < DIRCOUNT) {
++ return 0;
++ }
++ if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes)
++ return 0;
++
++ if (block < DIRCOUNT) {
+ offsets[n++] = block;
+ } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) {
+ offsets[n++] = DIRCOUNT;
+diff --git a/fs/minix/minix.h b/fs/minix/minix.h
+index df081e8afcc3c..168d45d3de73e 100644
+--- a/fs/minix/minix.h
++++ b/fs/minix/minix.h
+@@ -32,7 +32,6 @@ struct minix_sb_info {
+ unsigned long s_zmap_blocks;
+ unsigned long s_firstdatazone;
+ unsigned long s_log_zone_size;
+- unsigned long s_max_size;
+ int s_dirsize;
+ int s_namelen;
+ struct buffer_head ** s_imap;
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 95dc90570786c..387a2cfa7e172 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -140,6 +140,7 @@ static int
+ nfs_file_flush(struct file *file, fl_owner_t id)
+ {
+ struct inode *inode = file_inode(file);
++ errseq_t since;
+
+ dprintk("NFS: flush(%pD2)\n", file);
+
+@@ -148,7 +149,9 @@ nfs_file_flush(struct file *file, fl_owner_t id)
+ return 0;
+
+ /* Flush writes to the server and return any errors */
+- return nfs_wb_all(inode);
++ since = filemap_sample_wb_err(file->f_mapping);
++ nfs_wb_all(inode);
++ return filemap_check_wb_err(file->f_mapping, since);
+ }
+
+ ssize_t
+@@ -580,12 +583,14 @@ static const struct vm_operations_struct nfs_file_vm_ops = {
+ .page_mkwrite = nfs_vm_page_mkwrite,
+ };
+
+-static int nfs_need_check_write(struct file *filp, struct inode *inode)
++static int nfs_need_check_write(struct file *filp, struct inode *inode,
++ int error)
+ {
+ struct nfs_open_context *ctx;
+
+ ctx = nfs_file_open_context(filp);
+- if (nfs_ctx_key_to_expire(ctx, inode))
++ if (nfs_error_is_fatal_on_server(error) ||
++ nfs_ctx_key_to_expire(ctx, inode))
+ return 1;
+ return 0;
+ }
+@@ -596,6 +601,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
+ struct inode *inode = file_inode(file);
+ unsigned long written = 0;
+ ssize_t result;
++ errseq_t since;
++ int error;
+
+ result = nfs_key_timeout_notify(file, inode);
+ if (result)
+@@ -620,6 +627,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
+ if (iocb->ki_pos > i_size_read(inode))
+ nfs_revalidate_mapping(inode, file->f_mapping);
+
++ since = filemap_sample_wb_err(file->f_mapping);
+ nfs_start_io_write(inode);
+ result = generic_write_checks(iocb, from);
+ if (result > 0) {
+@@ -638,7 +646,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
+ goto out;
+
+ /* Return error values */
+- if (nfs_need_check_write(file, inode)) {
++ error = filemap_check_wb_err(file->f_mapping, since);
++ if (nfs_need_check_write(file, inode, error)) {
+ int err = nfs_wb_all(inode);
+ if (err < 0)
+ result = err;
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index fb55c04cdc6bd..534b6fd70ffdb 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -109,6 +109,7 @@ static int
+ nfs4_file_flush(struct file *file, fl_owner_t id)
+ {
+ struct inode *inode = file_inode(file);
++ errseq_t since;
+
+ dprintk("NFS: flush(%pD2)\n", file);
+
+@@ -124,7 +125,9 @@ nfs4_file_flush(struct file *file, fl_owner_t id)
+ return filemap_fdatawrite(file->f_mapping);
+
+ /* Flush writes to the server and return any errors */
+- return nfs_wb_all(inode);
++ since = filemap_sample_wb_err(file->f_mapping);
++ nfs_wb_all(inode);
++ return filemap_check_wb_err(file->f_mapping, since);
+ }
+
+ #ifdef CONFIG_NFS_V4_2
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 1a1bd2fe6e98d..d0cb827b72cfa 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5811,8 +5811,6 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
+ return ret;
+ if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
+ return -ENOENT;
+- if (buflen < label.len)
+- return -ERANGE;
+ return 0;
+ }
+
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 7c0ff1a3b5914..677751bc3a334 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -4169,7 +4169,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
+ return -EIO;
+ if (len < NFS4_MAXLABELLEN) {
+ if (label) {
+- memcpy(label->label, p, len);
++ if (label->len) {
++ if (label->len < len)
++ return -ERANGE;
++ memcpy(label->label, p, len);
++ }
+ label->len = len;
+ label->pi = pi;
+ label->lfs = lfs;
+diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
+index 9461bd3e1c0c8..0a8cd8e59a92c 100644
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -326,8 +326,8 @@ struct ocfs2_super
+ spinlock_t osb_lock;
+ u32 s_next_generation;
+ unsigned long osb_flags;
+- s16 s_inode_steal_slot;
+- s16 s_meta_steal_slot;
++ u16 s_inode_steal_slot;
++ u16 s_meta_steal_slot;
+ atomic_t s_num_inodes_stolen;
+ atomic_t s_num_meta_stolen;
+
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index 503e724d39f53..5e0eaea474055 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -879,9 +879,9 @@ static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
+ {
+ spin_lock(&osb->osb_lock);
+ if (type == INODE_ALLOC_SYSTEM_INODE)
+- osb->s_inode_steal_slot = slot;
++ osb->s_inode_steal_slot = (u16)slot;
+ else if (type == EXTENT_ALLOC_SYSTEM_INODE)
+- osb->s_meta_steal_slot = slot;
++ osb->s_meta_steal_slot = (u16)slot;
+ spin_unlock(&osb->osb_lock);
+ }
+
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index c81e86c623807..70d8857b161df 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -78,7 +78,7 @@ struct mount_options
+ unsigned long commit_interval;
+ unsigned long mount_opt;
+ unsigned int atime_quantum;
+- signed short slot;
++ unsigned short slot;
+ int localalloc_opt;
+ unsigned int resv_level;
+ int dir_resv_level;
+@@ -1334,7 +1334,7 @@ static int ocfs2_parse_options(struct super_block *sb,
+ goto bail;
+ }
+ if (option)
+- mopt->slot = (s16)option;
++ mopt->slot = (u16)option;
+ break;
+ case Opt_commit:
+ if (match_int(&args[0], &option)) {
+diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
+index a5612abc09363..bcd4fd5ad1751 100644
+--- a/fs/orangefs/file.c
++++ b/fs/orangefs/file.c
+@@ -311,23 +311,8 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb,
+ struct iov_iter *iter)
+ {
+ int ret;
+- struct orangefs_read_options *ro;
+-
+ orangefs_stats.reads++;
+
+- /*
+- * Remember how they set "count" in read(2) or pread(2) or whatever -
+- * users can use count as a knob to control orangefs io size and later
+- * we can try to help them fill as many pages as possible in readpage.
+- */
+- if (!iocb->ki_filp->private_data) {
+- iocb->ki_filp->private_data = kmalloc(sizeof *ro, GFP_KERNEL);
+- if (!iocb->ki_filp->private_data)
+- return(ENOMEM);
+- ro = iocb->ki_filp->private_data;
+- ro->blksiz = iter->count;
+- }
+-
+ down_read(&file_inode(iocb->ki_filp)->i_rwsem);
+ ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp));
+ if (ret)
+@@ -615,12 +600,6 @@ static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
+ return rc;
+ }
+
+-static int orangefs_file_open(struct inode * inode, struct file *file)
+-{
+- file->private_data = NULL;
+- return generic_file_open(inode, file);
+-}
+-
+ static int orangefs_flush(struct file *file, fl_owner_t id)
+ {
+ /*
+@@ -634,9 +613,6 @@ static int orangefs_flush(struct file *file, fl_owner_t id)
+ struct inode *inode = file->f_mapping->host;
+ int r;
+
+- kfree(file->private_data);
+- file->private_data = NULL;
+-
+ if (inode->i_state & I_DIRTY_TIME) {
+ spin_lock(&inode->i_lock);
+ inode->i_state &= ~I_DIRTY_TIME;
+@@ -659,7 +635,7 @@ const struct file_operations orangefs_file_operations = {
+ .lock = orangefs_lock,
+ .unlocked_ioctl = orangefs_ioctl,
+ .mmap = orangefs_file_mmap,
+- .open = orangefs_file_open,
++ .open = generic_file_open,
+ .flush = orangefs_flush,
+ .release = orangefs_file_release,
+ .fsync = orangefs_fsync,
+diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
+index efb12197da181..636892ffec0ba 100644
+--- a/fs/orangefs/inode.c
++++ b/fs/orangefs/inode.c
+@@ -259,46 +259,19 @@ static int orangefs_readpage(struct file *file, struct page *page)
+ pgoff_t index; /* which page */
+ struct page *next_page;
+ char *kaddr;
+- struct orangefs_read_options *ro = file->private_data;
+ loff_t read_size;
+- loff_t roundedup;
+ int buffer_index = -1; /* orangefs shared memory slot */
+ int slot_index; /* index into slot */
+ int remaining;
+
+ /*
+- * If they set some miniscule size for "count" in read(2)
+- * (for example) then let's try to read a page, or the whole file
+- * if it is smaller than a page. Once "count" goes over a page
+- * then lets round up to the highest page size multiple that is
+- * less than or equal to "count" and do that much orangefs IO and
+- * try to fill as many pages as we can from it.
+- *
+- * "count" should be represented in ro->blksiz.
+- *
+- * inode->i_size = file size.
++ * Get up to this many bytes from Orangefs at a time and try
++ * to fill them into the page cache at once. Tests with dd made
++ * this seem like a reasonable static number, if there was
++ * interest perhaps this number could be made setable through
++ * sysfs...
+ */
+- if (ro) {
+- if (ro->blksiz < PAGE_SIZE) {
+- if (inode->i_size < PAGE_SIZE)
+- read_size = inode->i_size;
+- else
+- read_size = PAGE_SIZE;
+- } else {
+- roundedup = ((PAGE_SIZE - 1) & ro->blksiz) ?
+- ((ro->blksiz + PAGE_SIZE) & ~(PAGE_SIZE -1)) :
+- ro->blksiz;
+- if (roundedup > inode->i_size)
+- read_size = inode->i_size;
+- else
+- read_size = roundedup;
+-
+- }
+- } else {
+- read_size = PAGE_SIZE;
+- }
+- if (!read_size)
+- read_size = PAGE_SIZE;
++ read_size = 524288;
+
+ if (PageDirty(page))
+ orangefs_launder_page(page);
+diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
+index 34a6c99fa29bd..3003007681a05 100644
+--- a/fs/orangefs/orangefs-kernel.h
++++ b/fs/orangefs/orangefs-kernel.h
+@@ -239,10 +239,6 @@ struct orangefs_write_range {
+ kgid_t gid;
+ };
+
+-struct orangefs_read_options {
+- ssize_t blksiz;
+-};
+-
+ extern struct orangefs_stats orangefs_stats;
+
+ /*
+diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
+index 826dad0243dcc..a6ae2428e4c96 100644
+--- a/fs/ubifs/journal.c
++++ b/fs/ubifs/journal.c
+@@ -539,7 +539,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
+ const struct fscrypt_name *nm, const struct inode *inode,
+ int deletion, int xent)
+ {
+- int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
++ int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0;
+ int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
+ int last_reference = !!(deletion && inode->i_nlink == 0);
+ struct ubifs_inode *ui = ubifs_inode(inode);
+@@ -630,6 +630,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
+ goto out_finish;
+ }
+ ui->del_cmtno = c->cmt_no;
++ orphan_added = 1;
+ }
+
+ err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
+@@ -702,7 +703,7 @@ out_release:
+ kfree(dent);
+ out_ro:
+ ubifs_ro_mode(c, err);
+- if (last_reference)
++ if (orphan_added)
+ ubifs_delete_orphan(c, inode->i_ino);
+ finish_reservation(c);
+ return err;
+@@ -1217,7 +1218,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
+ void *p;
+ union ubifs_key key;
+ struct ubifs_dent_node *dent, *dent2;
+- int err, dlen1, dlen2, ilen, lnum, offs, len;
++ int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0;
+ int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
+ int last_reference = !!(new_inode && new_inode->i_nlink == 0);
+ int move = (old_dir != new_dir);
+@@ -1333,6 +1334,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
+ goto out_finish;
+ }
+ new_ui->del_cmtno = c->cmt_no;
++ orphan_added = 1;
+ }
+
+ err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
+@@ -1414,7 +1416,7 @@ out_release:
+ release_head(c, BASEHD);
+ out_ro:
+ ubifs_ro_mode(c, err);
+- if (last_reference)
++ if (orphan_added)
+ ubifs_delete_orphan(c, new_inode->i_ino);
+ out_finish:
+ finish_reservation(c);
+diff --git a/fs/ufs/super.c b/fs/ufs/super.c
+index 1da0be667409b..e3b69fb280e8c 100644
+--- a/fs/ufs/super.c
++++ b/fs/ufs/super.c
+@@ -101,7 +101,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+ struct inode *inode;
+
+- if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg)
++ if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg)
+ return ERR_PTR(-ESTALE);
+
+ inode = ufs_iget(sb, ino);
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index 864849e942c45..c1a8d4a41bb16 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -135,6 +135,7 @@ struct af_alg_async_req {
+ * SG?
+ * @enc: Cryptographic operation to be performed when
+ * recvmsg is invoked.
++ * @init: True if metadata has been sent.
+ * @len: Length of memory allocated for this data structure.
+ */
+ struct af_alg_ctx {
+@@ -151,6 +152,7 @@ struct af_alg_ctx {
+ bool more;
+ bool merge;
+ bool enc;
++ bool init;
+
+ unsigned int len;
+ };
+@@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
+ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
+ size_t dst_offset);
+ void af_alg_wmem_wakeup(struct sock *sk);
+-int af_alg_wait_for_data(struct sock *sk, unsigned flags);
++int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
+ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ unsigned int ivsize);
+ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
+diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
+index 1e5dad8b8e59b..ed870da78326b 100644
+--- a/include/linux/intel-iommu.h
++++ b/include/linux/intel-iommu.h
+@@ -359,8 +359,8 @@ enum {
+
+ #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
+ #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
+-#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
+-#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
++#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1)
++#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
+ #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
+ #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
+ #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index f8755e5fcd742..e9e69c511ea92 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -211,6 +211,8 @@ struct irq_data {
+ * IRQD_CAN_RESERVE - Can use reservation mode
+ * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
+ * required
++ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
++ * irq_chip::irq_set_affinity() when deactivated.
+ */
+ enum {
+ IRQD_TRIGGER_MASK = 0xf,
+@@ -234,6 +236,7 @@ enum {
+ IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
+ IRQD_CAN_RESERVE = (1 << 26),
+ IRQD_MSI_NOMASK_QUIRK = (1 << 27),
++ IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
+ };
+
+ #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+@@ -408,6 +411,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+ return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+ }
+
++static inline void irqd_set_affinity_on_activate(struct irq_data *d)
++{
++ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
++}
++
++static inline bool irqd_affinity_on_activate(struct irq_data *d)
++{
++ return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
++}
++
+ #undef __irqd_to_state
+
+ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 8263bbf756a22..6d9c1131fe5c8 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -849,6 +849,8 @@ static inline int sk_memalloc_socks(void)
+ {
+ return static_branch_unlikely(&memalloc_socks_key);
+ }
++
++void __receive_sock(struct file *file);
+ #else
+
+ static inline int sk_memalloc_socks(void)
+@@ -856,6 +858,8 @@ static inline int sk_memalloc_socks(void)
+ return 0;
+ }
+
++static inline void __receive_sock(struct file *file)
++{ }
+ #endif
+
+ static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index df73685de1144..3b1d0a4725a49 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -281,12 +281,16 @@ static bool irq_set_affinity_deactivated(struct irq_data *data,
+ struct irq_desc *desc = irq_data_to_desc(data);
+
+ /*
++ * Handle irq chips which can handle affinity only in activated
++ * state correctly
++ *
+ * If the interrupt is not yet activated, just store the affinity
+ * mask and do not call the chip driver at all. On activation the
+ * driver has to make sure anyway that the interrupt is in a
+ * useable state so startup works.
+ */
+- if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data))
++ if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
++ irqd_is_activated(data) || !irqd_affinity_on_activate(data))
+ return false;
+
+ cpumask_copy(desc->irq_common_data.affinity, mask);
+diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
+index 8f557fa1f4fe4..c6c7e187ae748 100644
+--- a/kernel/irq/pm.c
++++ b/kernel/irq/pm.c
+@@ -185,14 +185,18 @@ void rearm_wake_irq(unsigned int irq)
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+
+- if (!desc || !(desc->istate & IRQS_SUSPENDED) ||
+- !irqd_is_wakeup_set(&desc->irq_data))
++ if (!desc)
+ return;
+
++ if (!(desc->istate & IRQS_SUSPENDED) ||
++ !irqd_is_wakeup_set(&desc->irq_data))
++ goto unlock;
++
+ desc->istate &= ~IRQS_SUSPENDED;
+ irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
+ __enable_irq(desc);
+
++unlock:
+ irq_put_desc_busunlock(desc, flags);
+ }
+
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 0a967db226d8a..bbff4bccb885d 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2104,6 +2104,13 @@ static void kill_kprobe(struct kprobe *p)
+ * the original probed function (which will be freed soon) any more.
+ */
+ arch_remove_kprobe(p);
++
++ /*
++ * The module is going away. We should disarm the kprobe which
++ * is using ftrace.
++ */
++ if (kprobe_ftrace(p))
++ disarm_kprobe_ftrace(p);
+ }
+
+ /* Disable one kprobe */
+diff --git a/kernel/module.c b/kernel/module.c
+index 6baa1080cdb76..819c5d3b4c295 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -1517,18 +1517,34 @@ struct module_sect_attrs {
+ struct module_sect_attr attrs[0];
+ };
+
++#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
+ static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *battr,
+ char *buf, loff_t pos, size_t count)
+ {
+ struct module_sect_attr *sattr =
+ container_of(battr, struct module_sect_attr, battr);
++ char bounce[MODULE_SECT_READ_SIZE + 1];
++ size_t wrote;
+
+ if (pos != 0)
+ return -EINVAL;
+
+- return sprintf(buf, "0x%px\n",
+- kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL);
++ /*
++ * Since we're a binary read handler, we must account for the
++ * trailing NUL byte that sprintf will write: if "buf" is
++ * too small to hold the NUL, or the NUL is exactly the last
++ * byte, the read will look like it got truncated by one byte.
++ * Since there is no way to ask sprintf nicely to not write
++ * the NUL, we have to use a bounce buffer.
++ */
++ wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n",
++ kallsyms_show_value(file->f_cred)
++ ? (void *)sattr->address : NULL);
++ count = min(count, wrote);
++ memcpy(buf, bounce, count);
++
++ return count;
+ }
+
+ static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
+@@ -1577,7 +1593,7 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
+ goto out;
+ sect_attrs->nsections++;
+ sattr->battr.read = module_sect_read;
+- sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4);
++ sattr->battr.size = MODULE_SECT_READ_SIZE;
+ sattr->battr.attr.mode = 0400;
+ *(gattr++) = &(sattr++)->battr;
+ }
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 15160d707da45..705852c1724aa 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5699,8 +5699,11 @@ static int referenced_filters(struct dyn_ftrace *rec)
+ int cnt = 0;
+
+ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
+- if (ops_references_rec(ops, rec))
+- cnt++;
++ if (ops_references_rec(ops, rec)) {
++ cnt++;
++ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
++ rec->flags |= FTRACE_FL_REGS;
++ }
+ }
+
+ return cnt;
+@@ -5877,8 +5880,8 @@ void ftrace_module_enable(struct module *mod)
+ if (ftrace_start_up)
+ cnt += referenced_filters(rec);
+
+- /* This clears FTRACE_FL_DISABLED */
+- rec->flags = cnt;
++ rec->flags &= ~FTRACE_FL_DISABLED;
++ rec->flags += cnt;
+
+ if (ftrace_start_up && cnt) {
+ int failed = __ftrace_replace_code(rec, 1);
+@@ -6459,12 +6462,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
+ if (enable) {
+ register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
+ tr);
+- register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
++ register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
+ tr);
+ } else {
+ unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
+ tr);
+- unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
++ unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
+ tr);
+ }
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 721947b9962db..f9c2bdbbd8936 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5686,7 +5686,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ }
+
+ /* If trace pipe files are being read, we can't change the tracer */
+- if (tr->current_trace->ref) {
++ if (tr->trace_ref) {
+ ret = -EBUSY;
+ goto out;
+ }
+@@ -5902,7 +5902,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
+
+ nonseekable_open(inode, filp);
+
+- tr->current_trace->ref++;
++ tr->trace_ref++;
+ out:
+ mutex_unlock(&trace_types_lock);
+ return ret;
+@@ -5921,7 +5921,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
+
+ mutex_lock(&trace_types_lock);
+
+- tr->current_trace->ref--;
++ tr->trace_ref--;
+
+ if (iter->trace->pipe_close)
+ iter->trace->pipe_close(iter);
+@@ -7230,7 +7230,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
+
+ filp->private_data = info;
+
+- tr->current_trace->ref++;
++ tr->trace_ref++;
+
+ mutex_unlock(&trace_types_lock);
+
+@@ -7331,7 +7331,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
+
+ mutex_lock(&trace_types_lock);
+
+- iter->tr->current_trace->ref--;
++ iter->tr->trace_ref--;
+
+ __trace_array_put(iter->tr);
+
+@@ -8470,7 +8470,7 @@ static int __remove_instance(struct trace_array *tr)
+ {
+ int i;
+
+- if (tr->ref || (tr->current_trace && tr->current_trace->ref))
++ if (tr->ref || (tr->current_trace && tr->trace_ref))
+ return -EBUSY;
+
+ list_del(&tr->list);
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index a3c29d5fcc616..4055158c1dd25 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -309,6 +309,7 @@ struct trace_array {
+ struct trace_event_file *trace_marker_file;
+ cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
+ int ref;
++ int trace_ref;
+ #ifdef CONFIG_FUNCTION_TRACER
+ struct ftrace_ops *ops;
+ struct trace_pid_list __rcu *function_pids;
+@@ -498,7 +499,6 @@ struct tracer {
+ struct tracer *next;
+ struct tracer_flags *flags;
+ int enabled;
+- int ref;
+ bool print_max;
+ bool allow_instances;
+ #ifdef CONFIG_TRACER_MAX_TRACE
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 995061bb2deca..ed9eb97b64b47 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -527,12 +527,12 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable)
+ if (enable) {
+ register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
+ tr, INT_MIN);
+- register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
++ register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
+ tr, INT_MAX);
+ } else {
+ unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
+ tr);
+- unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
++ unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
+ tr);
+ }
+ }
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index 862f4b0139fcb..35512ed26d9ff 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -270,6 +270,7 @@ static bool disable_migrate;
+ static void move_to_next_cpu(void)
+ {
+ struct cpumask *current_mask = &save_cpumask;
++ struct trace_array *tr = hwlat_trace;
+ int next_cpu;
+
+ if (disable_migrate)
+@@ -283,7 +284,7 @@ static void move_to_next_cpu(void)
+ goto disable;
+
+ get_online_cpus();
+- cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
++ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+ next_cpu = cpumask_next(smp_processor_id(), current_mask);
+ put_online_cpus();
+
+@@ -360,7 +361,7 @@ static int start_kthread(struct trace_array *tr)
+ /* Just pick the first CPU on first iteration */
+ current_mask = &save_cpumask;
+ get_online_cpus();
+- cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
++ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+ put_online_cpus();
+ next_cpu = cpumask_first(current_mask);
+
+diff --git a/lib/devres.c b/lib/devres.c
+index 17624d35e82d4..77c80ca9e4856 100644
+--- a/lib/devres.c
++++ b/lib/devres.c
+@@ -155,6 +155,7 @@ void __iomem *devm_ioremap_resource(struct device *dev,
+ {
+ resource_size_t size;
+ void __iomem *dest_ptr;
++ char *pretty_name;
+
+ BUG_ON(!dev);
+
+@@ -165,7 +166,15 @@ void __iomem *devm_ioremap_resource(struct device *dev,
+
+ size = resource_size(res);
+
+- if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) {
++ if (res->name)
++ pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
++ dev_name(dev), res->name);
++ else
++ pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
++ if (!pretty_name)
++ return IOMEM_ERR_PTR(-ENOMEM);
++
++ if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return IOMEM_ERR_PTR(-EBUSY);
+ }
+diff --git a/lib/test_kmod.c b/lib/test_kmod.c
+index 9cf77628fc913..87a0cc750ea23 100644
+--- a/lib/test_kmod.c
++++ b/lib/test_kmod.c
+@@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
+ break;
+ case TEST_KMOD_FS_TYPE:
+ kfree_const(config->test_fs);
+- config->test_driver = NULL;
++ config->test_fs = NULL;
+ copied = config_copy_test_fs(config, test_str,
+ strlen(test_str));
+ break;
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 5977f7824a9ac..719f49d1fba2f 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1294,7 +1294,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
+ {
+ unsigned long haddr = addr & HPAGE_PMD_MASK;
+ struct vm_area_struct *vma = find_vma(mm, haddr);
+- struct page *hpage = NULL;
++ struct page *hpage;
+ pte_t *start_pte, *pte;
+ pmd_t *pmd, _pmd;
+ spinlock_t *ptl;
+@@ -1314,9 +1314,17 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
+ if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
+ return;
+
++ hpage = find_lock_page(vma->vm_file->f_mapping,
++ linear_page_index(vma, haddr));
++ if (!hpage)
++ return;
++
++ if (!PageHead(hpage))
++ goto drop_hpage;
++
+ pmd = mm_find_pmd(mm, haddr);
+ if (!pmd)
+- return;
++ goto drop_hpage;
+
+ start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
+
+@@ -1335,30 +1343,11 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
+
+ page = vm_normal_page(vma, addr, *pte);
+
+- if (!page || !PageCompound(page))
+- goto abort;
+-
+- if (!hpage) {
+- hpage = compound_head(page);
+- /*
+- * The mapping of the THP should not change.
+- *
+- * Note that uprobe, debugger, or MAP_PRIVATE may
+- * change the page table, but the new page will
+- * not pass PageCompound() check.
+- */
+- if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
+- goto abort;
+- }
+-
+ /*
+- * Confirm the page maps to the correct subpage.
+- *
+- * Note that uprobe, debugger, or MAP_PRIVATE may change
+- * the page table, but the new page will not pass
+- * PageCompound() check.
++ * Note that uprobe, debugger, or MAP_PRIVATE may change the
++ * page table, but the new page will not be a subpage of hpage.
+ */
+- if (WARN_ON(hpage + i != page))
++ if (hpage + i != page)
+ goto abort;
+ count++;
+ }
+@@ -1377,21 +1366,26 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
+ pte_unmap_unlock(start_pte, ptl);
+
+ /* step 3: set proper refcount and mm_counters. */
+- if (hpage) {
++ if (count) {
+ page_ref_sub(hpage, count);
+ add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
+ }
+
+ /* step 4: collapse pmd */
+ ptl = pmd_lock(vma->vm_mm, pmd);
+- _pmd = pmdp_collapse_flush(vma, addr, pmd);
++ _pmd = pmdp_collapse_flush(vma, haddr, pmd);
+ spin_unlock(ptl);
+ mm_dec_nr_ptes(mm);
+ pte_free(mm, pmd_pgtable(_pmd));
++
++drop_hpage:
++ unlock_page(hpage);
++ put_page(hpage);
+ return;
+
+ abort:
+ pte_unmap_unlock(start_pte, ptl);
++ goto drop_hpage;
+ }
+
+ static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
+@@ -1420,6 +1414,7 @@ out:
+ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
+ {
+ struct vm_area_struct *vma;
++ struct mm_struct *mm;
+ unsigned long addr;
+ pmd_t *pmd, _pmd;
+
+@@ -1448,7 +1443,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
+ continue;
+ if (vma->vm_end < addr + HPAGE_PMD_SIZE)
+ continue;
+- pmd = mm_find_pmd(vma->vm_mm, addr);
++ mm = vma->vm_mm;
++ pmd = mm_find_pmd(mm, addr);
+ if (!pmd)
+ continue;
+ /*
+@@ -1458,17 +1454,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
+ * mmap_sem while holding page lock. Fault path does it in
+ * reverse order. Trylock is a way to avoid deadlock.
+ */
+- if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
+- spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
+- /* assume page table is clear */
+- _pmd = pmdp_collapse_flush(vma, addr, pmd);
+- spin_unlock(ptl);
+- up_write(&vma->vm_mm->mmap_sem);
+- mm_dec_nr_ptes(vma->vm_mm);
+- pte_free(vma->vm_mm, pmd_pgtable(_pmd));
++ if (down_write_trylock(&mm->mmap_sem)) {
++ if (!khugepaged_test_exit(mm)) {
++ spinlock_t *ptl = pmd_lock(mm, pmd);
++ /* assume page table is clear */
++ _pmd = pmdp_collapse_flush(vma, addr, pmd);
++ spin_unlock(ptl);
++ mm_dec_nr_ptes(mm);
++ pte_free(mm, pmd_pgtable(_pmd));
++ }
++ up_write(&mm->mmap_sem);
+ } else {
+ /* Try again later */
+- khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
++ khugepaged_add_pte_mapped_thp(mm, addr);
+ }
+ }
+ i_mmap_unlock_write(mapping);
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index c054945a9a742..3128d95847125 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1751,7 +1751,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
+ */
+ rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb);
+ if (rc)
+- goto done;
++ return rc;
+
+ /* remove memmap entry */
+ firmware_map_remove(start, start + size, "System RAM");
+@@ -1771,9 +1771,8 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
+
+ try_offline_node(nid);
+
+-done:
+ mem_hotplug_done();
+- return rc;
++ return 0;
+ }
+
+ /**
+diff --git a/mm/page_counter.c b/mm/page_counter.c
+index de31470655f66..147ff99187b81 100644
+--- a/mm/page_counter.c
++++ b/mm/page_counter.c
+@@ -77,7 +77,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
+ long new;
+
+ new = atomic_long_add_return(nr_pages, &c->usage);
+- propagate_protected_usage(counter, new);
++ propagate_protected_usage(c, new);
+ /*
+ * This is indeed racy, but we can live with some
+ * inaccuracy in the watermark.
+@@ -121,7 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter,
+ new = atomic_long_add_return(nr_pages, &c->usage);
+ if (new > c->max) {
+ atomic_long_sub(nr_pages, &c->usage);
+- propagate_protected_usage(counter, new);
++ propagate_protected_usage(c, new);
+ /*
+ * This is racy, but we can live with some
+ * inaccuracy in the failcnt.
+@@ -130,7 +130,7 @@ bool page_counter_try_charge(struct page_counter *counter,
+ *fail = c;
+ goto failed;
+ }
+- propagate_protected_usage(counter, new);
++ propagate_protected_usage(c, new);
+ /*
+ * Just like with failcnt, we can live with some
+ * inaccuracy in the watermark.
+diff --git a/net/compat.c b/net/compat.c
+index 0f7ded26059ec..c848bcb517f3e 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -291,6 +291,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
+ break;
+ }
+ /* Bump the usage count and install the file. */
++ __receive_sock(fp[i]);
+ fd_install(new_fd, get_file(fp[i]));
+ }
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 991ab80234cec..919f1a1739e90 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2736,6 +2736,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *
+ }
+ EXPORT_SYMBOL(sock_no_mmap);
+
++/*
++ * When a file is received (via SCM_RIGHTS, etc), we must bump the
++ * various sock-based usage counts.
++ */
++void __receive_sock(struct file *file)
++{
++ struct socket *sock;
++ int error;
++
++ /*
++ * The resulting value of "error" is ignored here since we only
++ * need to take action when the file is a socket and testing
++ * "sock" for NULL is sufficient.
++ */
++ sock = sock_from_file(file, &error);
++ if (sock) {
++ sock_update_netprioidx(&sock->sk->sk_cgrp_data);
++ sock_update_classid(&sock->sk->sk_cgrp_data);
++ }
++}
++
+ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
+ {
+ ssize_t res;
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index b1669f0244706..f5d96107af6de 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1033,7 +1033,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
+ might_sleep();
+ lockdep_assert_held(&local->sta_mtx);
+
+- while (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
++ if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+ ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+ WARN_ON_ONCE(ret);
+ }
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index e59022b3f1254..b9c2ee7ab43fa 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -42,6 +42,8 @@
+ #define R_ARM_THM_CALL 10
+ #define R_ARM_CALL 28
+
++#define R_AARCH64_CALL26 283
++
+ static int fd_map; /* File descriptor for file being modified. */
+ static int mmap_failed; /* Boolean flag. */
+ static char gpfx; /* prefix for global symbol name (sometimes '_') */
+diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
+index ca9125726be24..8596ae4c2bdef 100644
+--- a/sound/pci/echoaudio/echoaudio.c
++++ b/sound/pci/echoaudio/echoaudio.c
+@@ -2198,7 +2198,6 @@ static int snd_echo_resume(struct device *dev)
+ if (err < 0) {
+ kfree(commpage_bak);
+ dev_err(dev, "resume init_hw err=%d\n", err);
+- snd_echo_free(chip);
+ return err;
+ }
+
+@@ -2225,7 +2224,6 @@ static int snd_echo_resume(struct device *dev)
+ if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, chip)) {
+ dev_err(chip->card->dev, "cannot grab irq\n");
+- snd_echo_free(chip);
+ return -EBUSY;
+ }
+ chip->irq = pci->irq;
+diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
+index 8a19753cc26aa..8c6e1ea67f213 100644
+--- a/tools/build/Makefile.feature
++++ b/tools/build/Makefile.feature
+@@ -8,7 +8,7 @@ endif
+
+ feature_check = $(eval $(feature_check_code))
+ define feature_check_code
+- feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
++ feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC="$(CC)" CXX="$(CXX)" CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
+ endef
+
+ feature_set = $(eval $(feature_set_code))
+diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
+index 8499385365c02..054e09ab4a9e4 100644
+--- a/tools/build/feature/Makefile
++++ b/tools/build/feature/Makefile
+@@ -70,8 +70,6 @@ FILES= \
+
+ FILES := $(addprefix $(OUTPUT),$(FILES))
+
+-CC ?= $(CROSS_COMPILE)gcc
+-CXX ?= $(CROSS_COMPILE)g++
+ PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
+ LLVM_CONFIG ?= llvm-config
+
+diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
+index 9235b76501be8..19d45c377ac18 100644
+--- a/tools/perf/bench/mem-functions.c
++++ b/tools/perf/bench/mem-functions.c
+@@ -223,12 +223,8 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *
+ return 0;
+ }
+
+-static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
++static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst)
+ {
+- u64 cycle_start = 0ULL, cycle_end = 0ULL;
+- memcpy_t fn = r->fn.memcpy;
+- int i;
+-
+ /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
+ memset(src, 0, size);
+
+@@ -237,6 +233,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, vo
+ * to not measure page fault overhead:
+ */
+ fn(dst, src, size);
++}
++
++static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
++{
++ u64 cycle_start = 0ULL, cycle_end = 0ULL;
++ memcpy_t fn = r->fn.memcpy;
++ int i;
++
++ memcpy_prefault(fn, size, src, dst);
+
+ cycle_start = get_cycles();
+ for (i = 0; i < nr_loops; ++i)
+@@ -252,11 +257,7 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size, void
+ memcpy_t fn = r->fn.memcpy;
+ int i;
+
+- /*
+- * We prefault the freshly allocated memory range here,
+- * to not measure page fault overhead:
+- */
+- fn(dst, src, size);
++ memcpy_prefault(fn, size, src, dst);
+
+ BUG_ON(gettimeofday(&tv_start, NULL));
+ for (i = 0; i < nr_loops; ++i)
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index f8ccfd6be0eee..7ffcbd6fcd1ae 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1164,6 +1164,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
+ return 0;
+ if (err == -EAGAIN ||
+ intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
++ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+ if (intel_pt_fup_event(decoder))
+ return 0;
+ return -EAGAIN;
+@@ -1942,17 +1943,13 @@ next:
+ }
+ if (decoder->set_fup_mwait)
+ no_tip = true;
++ if (no_tip)
++ decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP;
++ else
++ decoder->pkt_state = INTEL_PT_STATE_FUP;
+ err = intel_pt_walk_fup(decoder);
+- if (err != -EAGAIN) {
+- if (err)
+- return err;
+- if (no_tip)
+- decoder->pkt_state =
+- INTEL_PT_STATE_FUP_NO_TIP;
+- else
+- decoder->pkt_state = INTEL_PT_STATE_FUP;
+- return 0;
+- }
++ if (err != -EAGAIN)
++ return err;
+ if (no_tip) {
+ no_tip = false;
+ break;
+@@ -1980,8 +1977,10 @@ next:
+ * possibility of another CBR change that gets caught up
+ * in the PSB+.
+ */
+- if (decoder->cbr != decoder->cbr_seen)
++ if (decoder->cbr != decoder->cbr_seen) {
++ decoder->state.type = 0;
+ return 0;
++ }
+ break;
+
+ case INTEL_PT_PIP:
+@@ -2022,8 +2021,10 @@ next:
+
+ case INTEL_PT_CBR:
+ intel_pt_calc_cbr(decoder);
+- if (decoder->cbr != decoder->cbr_seen)
++ if (decoder->cbr != decoder->cbr_seen) {
++ decoder->state.type = 0;
+ return 0;
++ }
+ break;
+
+ case INTEL_PT_MODE_EXEC:
+@@ -2599,15 +2600,11 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
+ err = intel_pt_walk_tip(decoder);
+ break;
+ case INTEL_PT_STATE_FUP:
+- decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+ err = intel_pt_walk_fup(decoder);
+ if (err == -EAGAIN)
+ err = intel_pt_walk_fup_tip(decoder);
+- else if (!err)
+- decoder->pkt_state = INTEL_PT_STATE_FUP;
+ break;
+ case INTEL_PT_STATE_FUP_NO_TIP:
+- decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+ err = intel_pt_walk_fup(decoder);
+ if (err == -EAGAIN)
+ err = intel_pt_walk_trace(decoder);
+diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
+index 8cb3469dd11f2..48bbe8e0ce48d 100644
+--- a/tools/testing/selftests/bpf/test_progs.c
++++ b/tools/testing/selftests/bpf/test_progs.c
+@@ -7,6 +7,8 @@
+ #include <argp.h>
+ #include <string.h>
+
++#define EXIT_NO_TEST 2
++
+ /* defined in test_progs.h */
+ struct test_env env;
+
+@@ -584,5 +586,8 @@ int main(int argc, char **argv)
+ free(env.test_selector.num_set);
+ free(env.subtest_selector.num_set);
+
++ if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
++ return EXIT_NO_TEST;
++
+ return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
+ }
+diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
+index bdbbbe8431e03..3694613f418f6 100644
+--- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
++++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
+@@ -44,7 +44,7 @@ struct shared_info {
+ unsigned long amr2;
+
+ /* AMR value that ptrace should refuse to write to the child. */
+- unsigned long amr3;
++ unsigned long invalid_amr;
+
+ /* IAMR value the parent expects to read from the child. */
+ unsigned long expected_iamr;
+@@ -57,8 +57,8 @@ struct shared_info {
+ * (even though they're valid ones) because userspace doesn't have
+ * access to those registers.
+ */
+- unsigned long new_iamr;
+- unsigned long new_uamor;
++ unsigned long invalid_iamr;
++ unsigned long invalid_uamor;
+ };
+
+ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
+@@ -66,11 +66,6 @@ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
+ return syscall(__NR_pkey_alloc, flags, init_access_rights);
+ }
+
+-static int sys_pkey_free(int pkey)
+-{
+- return syscall(__NR_pkey_free, pkey);
+-}
+-
+ static int child(struct shared_info *info)
+ {
+ unsigned long reg;
+@@ -100,28 +95,32 @@ static int child(struct shared_info *info)
+
+ info->amr1 |= 3ul << pkeyshift(pkey1);
+ info->amr2 |= 3ul << pkeyshift(pkey2);
+- info->amr3 |= info->amr2 | 3ul << pkeyshift(pkey3);
++ /*
++ * invalid amr value where we try to force write
++ * things which are deined by a uamor setting.
++ */
++ info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor);
+
++ /*
++ * if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr
++ */
+ if (disable_execute)
+ info->expected_iamr |= 1ul << pkeyshift(pkey1);
+ else
+ info->expected_iamr &= ~(1ul << pkeyshift(pkey1));
+
+- info->expected_iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3));
+-
+- info->expected_uamor |= 3ul << pkeyshift(pkey1) |
+- 3ul << pkeyshift(pkey2);
+- info->new_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2);
+- info->new_uamor |= 3ul << pkeyshift(pkey1);
++ /*
++ * We allocated pkey2 and pkey 3 above. Clear the IAMR bits.
++ */
++ info->expected_iamr &= ~(1ul << pkeyshift(pkey2));
++ info->expected_iamr &= ~(1ul << pkeyshift(pkey3));
+
+ /*
+- * We won't use pkey3. We just want a plausible but invalid key to test
+- * whether ptrace will let us write to AMR bits we are not supposed to.
+- *
+- * This also tests whether the kernel restores the UAMOR permissions
+- * after a key is freed.
++ * Create an IAMR value different from expected value.
++ * Kernel will reject an IAMR and UAMOR change.
+ */
+- sys_pkey_free(pkey3);
++ info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2));
++ info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1));
+
+ printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n",
+ user_write, info->amr1, pkey1, pkey2, pkey3);
+@@ -196,9 +195,9 @@ static int parent(struct shared_info *info, pid_t pid)
+ PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync);
+ PARENT_FAIL_IF(ret, &info->child_sync);
+
+- info->amr1 = info->amr2 = info->amr3 = regs[0];
+- info->expected_iamr = info->new_iamr = regs[1];
+- info->expected_uamor = info->new_uamor = regs[2];
++ info->amr1 = info->amr2 = regs[0];
++ info->expected_iamr = regs[1];
++ info->expected_uamor = regs[2];
+
+ /* Wake up child so that it can set itself up. */
+ ret = prod_child(&info->child_sync);
+@@ -234,10 +233,10 @@ static int parent(struct shared_info *info, pid_t pid)
+ return ret;
+
+ /* Write invalid AMR value in child. */
+- ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr3, 1);
++ ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1);
+ PARENT_FAIL_IF(ret, &info->child_sync);
+
+- printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr3);
++ printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr);
+
+ /* Wake up child so that it can verify it didn't change. */
+ ret = prod_child(&info->child_sync);
+@@ -249,7 +248,7 @@ static int parent(struct shared_info *info, pid_t pid)
+
+ /* Try to write to IAMR. */
+ regs[0] = info->amr1;
+- regs[1] = info->new_iamr;
++ regs[1] = info->invalid_iamr;
+ ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2);
+ PARENT_FAIL_IF(!ret, &info->child_sync);
+
+@@ -257,7 +256,7 @@ static int parent(struct shared_info *info, pid_t pid)
+ ptrace_write_running, regs[0], regs[1]);
+
+ /* Try to write to IAMR and UAMOR. */
+- regs[2] = info->new_uamor;
++ regs[2] = info->invalid_uamor;
+ ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3);
+ PARENT_FAIL_IF(!ret, &info->child_sync);
+