summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-06-17 12:41:08 -0400
committerMike Pagano <mpagano@gentoo.org>2020-06-17 12:41:08 -0400
commit0ac4bed9e42b4d0585db323da6203141d38adbc5 (patch)
treef16e7a2ddeceb912a7ea8936dd8b576b48aae4c5
parentLinux patch 5.6.18 (diff)
downloadlinux-patches-5.6.tar.gz
linux-patches-5.6.tar.bz2
linux-patches-5.6.zip
Linux patch 5.6.195.6-235.6
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1018_linux-5.6.19.patch5871
2 files changed, 5875 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index fd785d47..f3eae126 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch: 1017_linux-5.6.18.patch
From: http://www.kernel.org
Desc: Linux 5.6.18
+Patch: 1018_linux-5.6.19.patch
+From: http://www.kernel.org
+Desc: Linux 5.6.19
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1018_linux-5.6.19.patch b/1018_linux-5.6.19.patch
new file mode 100644
index 00000000..db84ad4b
--- /dev/null
+++ b/1018_linux-5.6.19.patch
@@ -0,0 +1,5871 @@
+diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
+index ca983328976b..f65b51523014 100644
+--- a/Documentation/lzo.txt
++++ b/Documentation/lzo.txt
+@@ -159,11 +159,15 @@ Byte sequences
+ distance = 16384 + (H << 14) + D
+ state = S (copy S literals after this block)
+ End of stream is reached if distance == 16384
++ In version 1 only, to prevent ambiguity with the RLE case when
++ ((distance & 0x803f) == 0x803f) && (261 <= length <= 264), the
++ compressor must not emit block copies where distance and length
++ meet these conditions.
+
+ In version 1 only, this instruction is also used to encode a run of
+- zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1.
++ zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1.
+ In this case, it is followed by a fourth byte, X.
+- run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4.
++ run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4
+
+ 0 0 1 L L L L L (32..63)
+ Copy of small block within 16kB distance (preferably less than 34B)
+diff --git a/Makefile b/Makefile
+index 2948731a235c..f927a4fc7fae 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 6
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+index ba7f3e646c26..1333a68b9373 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+@@ -125,8 +125,6 @@
+ bus-width = <8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdmmc0_default>;
+- non-removable;
+- mmc-ddr-1_8v;
+ status = "okay";
+ };
+
+diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
+index 3944305e81df..b26c1aaf1e3c 100644
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -367,6 +367,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+ }
+ }
+
+-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
++static inline bool vcpu_has_ptrauth(struct kvm_vcpu *vcpu) { return false; }
++static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { }
+
+ #endif /* __ARM_KVM_EMULATE_H__ */
+diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
+index a827b4d60d38..03932e172730 100644
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -453,4 +453,6 @@ static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
+ return true;
+ }
+
++#define kvm_arm_vcpu_loaded(vcpu) (false)
++
+ #endif /* __ARM_KVM_HOST_H__ */
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index b606cded90cd..4cc6a7eff635 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -219,8 +219,8 @@ static struct undef_hook arm_break_hook = {
+ };
+
+ static struct undef_hook thumb_break_hook = {
+- .instr_mask = 0xffff,
+- .instr_val = 0xde01,
++ .instr_mask = 0xffffffff,
++ .instr_val = 0x0000de01,
+ .cpsr_mask = PSR_T_BIT,
+ .cpsr_val = PSR_T_BIT,
+ .fn = break_trap,
+diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
+index b263e239cb59..a45366c3909b 100644
+--- a/arch/arm64/include/asm/acpi.h
++++ b/arch/arm64/include/asm/acpi.h
+@@ -12,6 +12,7 @@
+ #include <linux/efi.h>
+ #include <linux/memblock.h>
+ #include <linux/psci.h>
++#include <linux/stddef.h>
+
+ #include <asm/cputype.h>
+ #include <asm/io.h>
+@@ -31,14 +32,14 @@
+ * is therefore used to delimit the MADT GICC structure minimum length
+ * appropriately.
+ */
+-#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \
++#define ACPI_MADT_GICC_MIN_LENGTH offsetof( \
+ struct acpi_madt_generic_interrupt, efficiency_class)
+
+ #define BAD_MADT_GICC_ENTRY(entry, end) \
+ (!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \
+ (unsigned long)(entry) + (entry)->header.length > (end))
+
+-#define ACPI_MADT_GICC_SPE (ACPI_OFFSET(struct acpi_madt_generic_interrupt, \
++#define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \
+ spe_interrupt) + sizeof(u16))
+
+ /* Basic configuration for ACPI */
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index f658dda12364..0ab02e5ff712 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -111,12 +111,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
+ vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
+ }
+
+-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
+-{
+- if (vcpu_has_ptrauth(vcpu))
+- vcpu_ptrauth_disable(vcpu);
+-}
+-
+ static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
+ {
+ return vcpu->arch.vsesr_el2;
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 57fd46acd058..584d9792cbfe 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -404,8 +404,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
+ * CP14 and CP15 live in the same array, as they are backed by the
+ * same system registers.
+ */
+-#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
+-#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
++#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
++
++#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
++#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
+
+ struct kvm_vm_stat {
+ ulong remote_tlb_flush;
+@@ -683,4 +685,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
+ #define kvm_arm_vcpu_sve_finalized(vcpu) \
+ ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
+
++#define kvm_arm_vcpu_loaded(vcpu) ((vcpu)->arch.sysregs_loaded_on_cpu)
++
+ #endif /* __ARM64_KVM_HOST_H__ */
+diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
+index aacfc55de44c..e0a4bcdb9451 100644
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ return 1;
+ }
+
+-#define __ptrauth_save_key(regs, key) \
+-({ \
+- regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
+- regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
+-})
+-
+ /*
+ * Handle the guest trying to use a ptrauth instruction, or trying to access a
+ * ptrauth register.
+ */
+ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
+ {
+- struct kvm_cpu_context *ctxt;
+-
+- if (vcpu_has_ptrauth(vcpu)) {
++ if (vcpu_has_ptrauth(vcpu))
+ vcpu_ptrauth_enable(vcpu);
+- ctxt = vcpu->arch.host_cpu_context;
+- __ptrauth_save_key(ctxt->sys_regs, APIA);
+- __ptrauth_save_key(ctxt->sys_regs, APIB);
+- __ptrauth_save_key(ctxt->sys_regs, APDA);
+- __ptrauth_save_key(ctxt->sys_regs, APDB);
+- __ptrauth_save_key(ctxt->sys_regs, APGA);
+- } else {
++ else
+ kvm_inject_undefined(vcpu);
+- }
+ }
+
+ /*
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 3e909b117f0c..c3d15eaa9ae6 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1280,10 +1280,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+ {
++ int reg = r->reg;
++
++ /* See the 32bit mapping in kvm_host.h */
++ if (p->is_aarch32)
++ reg = r->reg / 2;
++
+ if (p->is_write)
+- vcpu_write_sys_reg(vcpu, p->regval, r->reg);
++ vcpu_write_sys_reg(vcpu, p->regval, reg);
+ else
+- p->regval = vcpu_read_sys_reg(vcpu, r->reg);
++ p->regval = vcpu_read_sys_reg(vcpu, reg);
+ return true;
+ }
+
+diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
+index 9023828ede97..ac8f65a3e75a 100644
+--- a/arch/csky/abiv2/inc/abi/entry.h
++++ b/arch/csky/abiv2/inc/abi/entry.h
+@@ -13,6 +13,8 @@
+ #define LSAVE_A1 28
+ #define LSAVE_A2 32
+ #define LSAVE_A3 36
++#define LSAVE_A4 40
++#define LSAVE_A5 44
+
+ #define KSPTOUSP
+ #define USPTOKSP
+diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S
+index 9718388448a4..ff908d28f0a0 100644
+--- a/arch/csky/kernel/entry.S
++++ b/arch/csky/kernel/entry.S
+@@ -170,8 +170,10 @@ csky_syscall_trace:
+ ldw a3, (sp, LSAVE_A3)
+ #if defined(__CSKYABIV2__)
+ subi sp, 8
+- stw r5, (sp, 0x4)
+- stw r4, (sp, 0x0)
++ ldw r9, (sp, LSAVE_A4)
++ stw r9, (sp, 0x0)
++ ldw r9, (sp, LSAVE_A5)
++ stw r9, (sp, 0x4)
+ #else
+ ldw r6, (sp, LSAVE_A4)
+ ldw r7, (sp, LSAVE_A5)
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index 41204a49cf95..7b47a323dc23 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -274,8 +274,12 @@ enum emulation_result {
+ #define MIPS3_PG_SHIFT 6
+ #define MIPS3_PG_FRAME 0x3fffffc0
+
++#if defined(CONFIG_64BIT)
++#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
++#else
+ #define VPN2_MASK 0xffffe000
+-#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
++#endif
++#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
+ #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
+ #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
+ #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index a32d478a7f41..b4c89a1acebb 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -303,12 +303,6 @@ SECTIONS
+ *(.branch_lt)
+ }
+
+-#ifdef CONFIG_DEBUG_INFO_BTF
+- .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {
+- *(.BTF)
+- }
+-#endif
+-
+ .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
+ __start_opd = .;
+ KEEP(*(.opd))
+diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
+index 206156255247..7f3faf0dea25 100644
+--- a/arch/powerpc/mm/ptdump/ptdump.c
++++ b/arch/powerpc/mm/ptdump/ptdump.c
+@@ -60,6 +60,7 @@ struct pg_state {
+ unsigned long start_address;
+ unsigned long start_pa;
+ unsigned long last_pa;
++ unsigned long page_size;
+ unsigned int level;
+ u64 current_flags;
+ bool check_wx;
+@@ -157,9 +158,9 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
+ #endif
+
+ pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
+- if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) {
++ if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) {
+ pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
+- delta = PAGE_SIZE >> 10;
++ delta = st->page_size >> 10;
+ } else {
+ pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
+ delta = (addr - st->start_address) >> 10;
+@@ -190,7 +191,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
+ }
+
+ static void note_page(struct pg_state *st, unsigned long addr,
+- unsigned int level, u64 val)
++ unsigned int level, u64 val, unsigned long page_size)
+ {
+ u64 flag = val & pg_level[level].mask;
+ u64 pa = val & PTE_RPN_MASK;
+@@ -202,6 +203,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
+ st->start_address = addr;
+ st->start_pa = pa;
+ st->last_pa = pa;
++ st->page_size = page_size;
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ /*
+ * Dump the section of virtual memory when:
+@@ -213,7 +215,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
+ */
+ } else if (flag != st->current_flags || level != st->level ||
+ addr >= st->marker[1].start_address ||
+- (pa != st->last_pa + PAGE_SIZE &&
++ (pa != st->last_pa + st->page_size &&
+ (pa != st->start_pa || st->start_pa != st->last_pa))) {
+
+ /* Check the PTE flags */
+@@ -241,6 +243,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
+ st->start_address = addr;
+ st->start_pa = pa;
+ st->last_pa = pa;
++ st->page_size = page_size;
+ st->current_flags = flag;
+ st->level = level;
+ } else {
+@@ -256,7 +259,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+
+ for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+ addr = start + i * PAGE_SIZE;
+- note_page(st, addr, 4, pte_val(*pte));
++ note_page(st, addr, 4, pte_val(*pte), PAGE_SIZE);
+
+ }
+ }
+@@ -273,7 +276,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
+ /* pmd exists */
+ walk_pte(st, pmd, addr);
+ else
+- note_page(st, addr, 3, pmd_val(*pmd));
++ note_page(st, addr, 3, pmd_val(*pmd), PMD_SIZE);
+ }
+ }
+
+@@ -289,7 +292,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+ /* pud exists */
+ walk_pmd(st, pud, addr);
+ else
+- note_page(st, addr, 2, pud_val(*pud));
++ note_page(st, addr, 2, pud_val(*pud), PUD_SIZE);
+ }
+ }
+
+@@ -308,7 +311,7 @@ static void walk_pagetables(struct pg_state *st)
+ /* pgd exists */
+ walk_pud(st, pgd, addr);
+ else
+- note_page(st, addr, 1, pgd_val(*pgd));
++ note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE);
+ }
+ }
+
+@@ -363,7 +366,7 @@ static int ptdump_show(struct seq_file *m, void *v)
+
+ /* Traverse kernel page tables */
+ walk_pagetables(&st);
+- note_page(&st, 0, 0, 0);
++ note_page(&st, 0, 0, 0, 0);
+ return 0;
+ }
+
+diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
+index fe8d396e2301..16df9cc8f360 100644
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -19,6 +19,7 @@
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ #include <linux/msi.h>
++#include <linux/vmalloc.h>
+
+ #include <asm/prom.h>
+ #include <asm/io.h>
+@@ -1013,12 +1014,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq);
+ void xive_cleanup_irq_data(struct xive_irq_data *xd)
+ {
+ if (xd->eoi_mmio) {
++ unmap_kernel_range((unsigned long)xd->eoi_mmio,
++ 1u << xd->esb_shift);
+ iounmap(xd->eoi_mmio);
+ if (xd->eoi_mmio == xd->trig_mmio)
+ xd->trig_mmio = NULL;
+ xd->eoi_mmio = NULL;
+ }
+ if (xd->trig_mmio) {
++ unmap_kernel_range((unsigned long)xd->trig_mmio,
++ 1u << xd->esb_shift);
+ iounmap(xd->trig_mmio);
+ xd->trig_mmio = NULL;
+ }
+diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
+index 0d3d8f170ea4..25208fa95426 100644
+--- a/arch/s390/pci/pci_clp.c
++++ b/arch/s390/pci/pci_clp.c
+@@ -309,14 +309,13 @@ out:
+
+ int clp_disable_fh(struct zpci_dev *zdev)
+ {
+- u32 fh = zdev->fh;
+ int rc;
+
+ if (!zdev_enabled(zdev))
+ return 0;
+
+ rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
+- zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
++ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
+ return rc;
+ }
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index dff6623804c2..dae71ebfa709 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -1892,8 +1892,8 @@ static __initconst const u64 tnt_hw_cache_extra_regs
+
+ static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
+ /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+- INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0),
+- INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1),
++ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
++ INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
+ EVENT_EXTRA_END
+ };
+
+diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
+index 64c3dce374e5..688d6f1e7a63 100644
+--- a/arch/x86/include/asm/set_memory.h
++++ b/arch/x86/include/asm/set_memory.h
+@@ -83,28 +83,35 @@ int set_direct_map_default_noflush(struct page *page);
+ extern int kernel_set_to_readonly;
+
+ #ifdef CONFIG_X86_64
+-static inline int set_mce_nospec(unsigned long pfn)
++/*
++ * Prevent speculative access to the page by either unmapping
++ * it (if we do not require access to any part of the page) or
++ * marking it uncacheable (if we want to try to retrieve data
++ * from non-poisoned lines in the page).
++ */
++static inline int set_mce_nospec(unsigned long pfn, bool unmap)
+ {
+ unsigned long decoy_addr;
+ int rc;
+
+ /*
+- * Mark the linear address as UC to make sure we don't log more
+- * errors because of speculative access to the page.
+ * We would like to just call:
+- * set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1);
++ * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
+ * but doing that would radically increase the odds of a
+ * speculative access to the poison page because we'd have
+ * the virtual address of the kernel 1:1 mapping sitting
+ * around in registers.
+ * Instead we get tricky. We create a non-canonical address
+ * that looks just like the one we want, but has bit 63 flipped.
+- * This relies on set_memory_uc() properly sanitizing any __pa()
++ * This relies on set_memory_XX() properly sanitizing any __pa()
+ * results with __PHYSICAL_MASK or PTE_PFN_MASK.
+ */
+ decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
+
+- rc = set_memory_uc(decoy_addr, 1);
++ if (unmap)
++ rc = set_memory_np(decoy_addr, 1);
++ else
++ rc = set_memory_uc(decoy_addr, 1);
+ if (rc)
+ pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
+ return rc;
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 1f875fbe1384..f04cc01e629e 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1111,8 +1111,7 @@ static const int amd_erratum_383[] =
+
+ /* #1054: Instructions Retired Performance Counter May Be Inaccurate */
+ static const int amd_erratum_1054[] =
+- AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
+-
++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
+
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+ {
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 56978cb06149..b53dcff21438 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -588,7 +588,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
+ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+ SPECTRE_V2_NONE;
+
+-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
++static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
++ SPECTRE_V2_USER_NONE;
++static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
+ SPECTRE_V2_USER_NONE;
+
+ #ifdef CONFIG_RETPOLINE
+@@ -734,15 +736,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+ break;
+ }
+
+- /*
+- * At this point, an STIBP mode other than "off" has been set.
+- * If STIBP support is not being forced, check if STIBP always-on
+- * is preferred.
+- */
+- if (mode != SPECTRE_V2_USER_STRICT &&
+- boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+- mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+-
+ /* Initialize Indirect Branch Prediction Barrier */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+@@ -765,23 +758,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+ pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+ static_key_enabled(&switch_mm_always_ibpb) ?
+ "always-on" : "conditional");
++
++ spectre_v2_user_ibpb = mode;
+ }
+
+- /* If enhanced IBRS is enabled no STIBP required */
+- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++ /*
++ * If enhanced IBRS is enabled or SMT impossible, STIBP is not
++ * required.
++ */
++ if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return;
+
+ /*
+- * If SMT is not possible or STIBP is not available clear the STIBP
+- * mode.
++ * At this point, an STIBP mode other than "off" has been set.
++ * If STIBP support is not being forced, check if STIBP always-on
++ * is preferred.
+ */
+- if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
++ if (mode != SPECTRE_V2_USER_STRICT &&
++ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
++ mode = SPECTRE_V2_USER_STRICT_PREFERRED;
++
++ /*
++ * If STIBP is not available, clear the STIBP mode.
++ */
++ if (!boot_cpu_has(X86_FEATURE_STIBP))
+ mode = SPECTRE_V2_USER_NONE;
++
++ spectre_v2_user_stibp = mode;
++
+ set_mode:
+- spectre_v2_user = mode;
+- /* Only print the STIBP mode when SMT possible */
+- if (smt_possible)
+- pr_info("%s\n", spectre_v2_user_strings[mode]);
++ pr_info("%s\n", spectre_v2_user_strings[mode]);
+ }
+
+ static const char * const spectre_v2_strings[] = {
+@@ -1014,7 +1020,7 @@ void cpu_bugs_smt_update(void)
+ {
+ mutex_lock(&spec_ctrl_mutex);
+
+- switch (spectre_v2_user) {
++ switch (spectre_v2_user_stibp) {
+ case SPECTRE_V2_USER_NONE:
+ break;
+ case SPECTRE_V2_USER_STRICT:
+@@ -1257,14 +1263,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+ switch (ctrl) {
+ case PR_SPEC_ENABLE:
+- if (spectre_v2_user == SPECTRE_V2_USER_NONE)
++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+ return 0;
+ /*
+ * Indirect branch speculation is always disabled in strict
+- * mode.
++ * mode. It can neither be enabled if it was force-disabled
++ * by a previous prctl call.
++
+ */
+- if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
+- spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
++ task_spec_ib_force_disable(task))
+ return -EPERM;
+ task_clear_spec_ib_disable(task);
+ task_update_spec_tif(task);
+@@ -1275,10 +1286,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+ * Indirect branch speculation is always allowed when
+ * mitigation is force disabled.
+ */
+- if (spectre_v2_user == SPECTRE_V2_USER_NONE)
++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+ return -EPERM;
+- if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
+- spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+ return 0;
+ task_set_spec_ib_disable(task);
+ if (ctrl == PR_SPEC_FORCE_DISABLE)
+@@ -1309,7 +1322,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
+ {
+ if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
+ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+- if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
++ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
+ ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+ }
+ #endif
+@@ -1340,22 +1354,24 @@ static int ib_prctl_get(struct task_struct *task)
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ return PR_SPEC_NOT_AFFECTED;
+
+- switch (spectre_v2_user) {
+- case SPECTRE_V2_USER_NONE:
++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+ return PR_SPEC_ENABLE;
+- case SPECTRE_V2_USER_PRCTL:
+- case SPECTRE_V2_USER_SECCOMP:
++ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
++ return PR_SPEC_DISABLE;
++ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
++ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
++ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
++ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
+ if (task_spec_ib_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+ if (task_spec_ib_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+- case SPECTRE_V2_USER_STRICT:
+- case SPECTRE_V2_USER_STRICT_PREFERRED:
+- return PR_SPEC_DISABLE;
+- default:
++ } else
+ return PR_SPEC_NOT_AFFECTED;
+- }
+ }
+
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+@@ -1594,7 +1610,7 @@ static char *stibp_state(void)
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return "";
+
+- switch (spectre_v2_user) {
++ switch (spectre_v2_user_stibp) {
+ case SPECTRE_V2_USER_NONE:
+ return ", STIBP: disabled";
+ case SPECTRE_V2_USER_STRICT:
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 2c4f949611e4..410d3868bf33 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -527,6 +527,13 @@ bool mce_is_memory_error(struct mce *m)
+ }
+ EXPORT_SYMBOL_GPL(mce_is_memory_error);
+
++static bool whole_page(struct mce *m)
++{
++ if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
++ return true;
++ return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
++}
++
+ bool mce_is_correctable(struct mce *m)
+ {
+ if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
+@@ -598,7 +605,7 @@ static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
+
+ pfn = mce->addr >> PAGE_SHIFT;
+ if (!memory_failure(pfn, 0))
+- set_mce_nospec(pfn);
++ set_mce_nospec(pfn, whole_page(mce));
+
+ return NOTIFY_OK;
+ }
+@@ -1096,7 +1103,7 @@ static int do_memory_failure(struct mce *m)
+ if (ret)
+ pr_err("Memory error not recovered");
+ else
+- set_mce_nospec(m->addr >> PAGE_SHIFT);
++ set_mce_nospec(m->addr >> PAGE_SHIFT, whole_page(m));
+ return ret;
+ }
+
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 9898f672b81d..3d88300ec306 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -546,28 +546,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
+
+ lockdep_assert_irqs_disabled();
+
+- /*
+- * If TIF_SSBD is different, select the proper mitigation
+- * method. Note that if SSBD mitigation is disabled or permanentely
+- * enabled this branch can't be taken because nothing can set
+- * TIF_SSBD.
+- */
+- if (tif_diff & _TIF_SSBD) {
+- if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
++ /* Handle change of TIF_SSBD depending on the mitigation method. */
++ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
++ if (tif_diff & _TIF_SSBD)
+ amd_set_ssb_virt_state(tifn);
+- } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
++ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
++ if (tif_diff & _TIF_SSBD)
+ amd_set_core_ssb_state(tifn);
+- } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+- static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+- msr |= ssbd_tif_to_spec_ctrl(tifn);
+- updmsr = true;
+- }
++ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
++ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ updmsr |= !!(tif_diff & _TIF_SSBD);
++ msr |= ssbd_tif_to_spec_ctrl(tifn);
+ }
+
+- /*
+- * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
+- * otherwise avoid the MSR write.
+- */
++ /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ static_branch_unlikely(&switch_to_cond_stibp)) {
+ updmsr |= !!(tif_diff & _TIF_SPEC_IB);
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 0cc7c0b106bb..762f5c1465a6 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
+ },
+ },
++ { /* Handle problems with rebooting on Apple MacBook6,1 */
++ .callback = set_pci_reboot,
++ .ident = "Apple MacBook6,1",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
++ },
++ },
+ { /* Handle problems with rebooting on Apple MacBookPro5 */
+ .callback = set_pci_reboot,
+ .ident = "Apple MacBookPro5",
+diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
+index d8673d8a779b..36a585b80d9e 100644
+--- a/arch/x86/kernel/time.c
++++ b/arch/x86/kernel/time.c
+@@ -25,10 +25,6 @@
+ #include <asm/hpet.h>
+ #include <asm/time.h>
+
+-#ifdef CONFIG_X86_64
+-__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
+-#endif
+-
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+ unsigned long pc = instruction_pointer(regs);
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index e3296aa028fe..ccb2dec210ef 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -39,13 +39,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
+ #ifdef CONFIG_X86_32
+ OUTPUT_ARCH(i386)
+ ENTRY(phys_startup_32)
+-jiffies = jiffies_64;
+ #else
+ OUTPUT_ARCH(i386:x86-64)
+ ENTRY(phys_startup_64)
+-jiffies_64 = jiffies;
+ #endif
+
++jiffies = jiffies_64;
++
+ #if defined(CONFIG_X86_64)
+ /*
+ * On 64-bit, align RODATA to 2MB so we retain large page mappings for
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 87e9ba27ada1..ea6fa05e2fd9 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -343,6 +343,8 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
+ {
+ BUG_ON((u64)(unsigned)access_mask != access_mask);
+ BUG_ON((mmio_mask & mmio_value) != mmio_value);
++ WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
++ WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
+ shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
+ shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
+ shadow_mmio_access_mask = access_mask;
+@@ -591,16 +593,15 @@ static void kvm_mmu_reset_all_pte_masks(void)
+ * the most significant bits of legal physical address space.
+ */
+ shadow_nonpresent_or_rsvd_mask = 0;
+- low_phys_bits = boot_cpu_data.x86_cache_bits;
+- if (boot_cpu_data.x86_cache_bits <
+- 52 - shadow_nonpresent_or_rsvd_mask_len) {
++ low_phys_bits = boot_cpu_data.x86_phys_bits;
++ if (boot_cpu_has_bug(X86_BUG_L1TF) &&
++ !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
++ 52 - shadow_nonpresent_or_rsvd_mask_len)) {
++ low_phys_bits = boot_cpu_data.x86_cache_bits
++ - shadow_nonpresent_or_rsvd_mask_len;
+ shadow_nonpresent_or_rsvd_mask =
+- rsvd_bits(boot_cpu_data.x86_cache_bits -
+- shadow_nonpresent_or_rsvd_mask_len,
+- boot_cpu_data.x86_cache_bits - 1);
+- low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
+- } else
+- WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));
++ rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
++ }
+
+ shadow_nonpresent_or_rsvd_lower_gfn_mask =
+ GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
+@@ -6131,25 +6132,16 @@ static void kvm_set_mmio_spte_mask(void)
+ u64 mask;
+
+ /*
+- * Set the reserved bits and the present bit of an paging-structure
+- * entry to generate page fault with PFER.RSV = 1.
+- */
+-
+- /*
+- * Mask the uppermost physical address bit, which would be reserved as
+- * long as the supported physical address width is less than 52.
++ * Set a reserved PA bit in MMIO SPTEs to generate page faults with
++ * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
++ * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
++ * 52-bit physical addresses then there are no reserved PA bits in the
++ * PTEs and so the reserved PA approach must be disabled.
+ */
+- mask = 1ull << 51;
+-
+- /* Set the present bit. */
+- mask |= 1ull;
+-
+- /*
+- * If reserved bit is not supported, clear the present bit to disable
+- * mmio page fault.
+- */
+- if (shadow_phys_bits == 52)
+- mask &= ~1ull;
++ if (shadow_phys_bits < 52)
++ mask = BIT_ULL(51) | PT_PRESENT_MASK;
++ else
++ mask = 0;
+
+ kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
+ }
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index c974c49221eb..eee7cb0e1d95 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3236,8 +3236,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
+ return NESTED_EXIT_HOST;
+ break;
+ case SVM_EXIT_EXCP_BASE + PF_VECTOR:
+- /* When we're shadowing, trap PFs, but not async PF */
+- if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
++ /* Trap async PF even if not shadowing */
++ if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason)
+ return NESTED_EXIT_HOST;
+ break;
+ default:
+@@ -3326,7 +3326,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
+ dst->iopm_base_pa = from->iopm_base_pa;
+ dst->msrpm_base_pa = from->msrpm_base_pa;
+ dst->tsc_offset = from->tsc_offset;
+- dst->asid = from->asid;
++ /* asid not copied, it is handled manually for svm->vmcb. */
+ dst->tlb_ctl = from->tlb_ctl;
+ dst->int_ctl = from->int_ctl;
+ dst->int_vector = from->int_vector;
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 3a2f05ef51fa..a03db4a75977 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -303,7 +303,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
+ cpu = get_cpu();
+ prev = vmx->loaded_vmcs;
+ vmx->loaded_vmcs = vmcs;
+- vmx_vcpu_load_vmcs(vcpu, cpu);
++ vmx_vcpu_load_vmcs(vcpu, cpu, prev);
+ vmx_sync_vmcs_host_state(vmx, prev);
+ put_cpu();
+
+@@ -5562,7 +5562,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
+ vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
+ KVM_ISA_VMX);
+
+- switch (exit_reason) {
++ switch ((u16)exit_reason) {
+ case EXIT_REASON_EXCEPTION_NMI:
+ if (is_nmi(intr_info))
+ return false;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index a83c94a971ee..b29902c521f2 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1314,10 +1314,12 @@ after_clear_sn:
+ pi_set_on(pi_desc);
+ }
+
+-void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
++void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
++ struct loaded_vmcs *buddy)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
++ struct vmcs *prev;
+
+ if (!already_loaded) {
+ loaded_vmcs_clear(vmx->loaded_vmcs);
+@@ -1336,10 +1338,18 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
+ local_irq_enable();
+ }
+
+- if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
++ prev = per_cpu(current_vmcs, cpu);
++ if (prev != vmx->loaded_vmcs->vmcs) {
+ per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
+ vmcs_load(vmx->loaded_vmcs->vmcs);
+- indirect_branch_prediction_barrier();
++
++ /*
++ * No indirect branch prediction barrier needed when switching
++ * the active VMCS within a guest, e.g. on nested VM-Enter.
++ * The L1 VMM can protect itself with retpolines, IBPB or IBRS.
++ */
++ if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
++ indirect_branch_prediction_barrier();
+ }
+
+ if (!already_loaded) {
+@@ -1376,7 +1386,7 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+- vmx_vcpu_load_vmcs(vcpu, cpu);
++ vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
+
+ vmx_vcpu_pi_load(vcpu, cpu);
+
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index e64da06c7009..ff7361aa824c 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -320,7 +320,8 @@ struct kvm_vmx {
+ };
+
+ bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
+-void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
++void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
++ struct loaded_vmcs *buddy);
+ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+ int allocate_vpid(void);
+ void free_vpid(int vpid);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 7f3371a39ed0..4b4a8a4e0251 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4568,7 +4568,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+
+ if (kvm_state.flags &
+ ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
+- | KVM_STATE_NESTED_EVMCS))
++ | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING))
+ break;
+
+ /* nested_run_pending implies guest_mode. */
+@@ -6908,7 +6908,7 @@ restart:
+ if (!ctxt->have_exception ||
+ exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+ kvm_rip_write(vcpu, ctxt->eip);
+- if (r && ctxt->tf)
++ if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+ r = kvm_vcpu_do_singlestep(vcpu);
+ if (kvm_x86_ops->update_emulated_instruction)
+ kvm_x86_ops->update_emulated_instruction(vcpu);
+@@ -8115,9 +8115,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
+ kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+ }
+
+-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+- unsigned long start, unsigned long end,
+- bool blockable)
++void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
++ unsigned long start, unsigned long end)
+ {
+ unsigned long apic_address;
+
+@@ -8128,8 +8127,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+ if (start <= apic_address && apic_address < end)
+ kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
+-
+- return 0;
+ }
+
+ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
+index 69309cd56fdf..33093fdedb02 100644
+--- a/arch/x86/mm/dump_pagetables.c
++++ b/arch/x86/mm/dump_pagetables.c
+@@ -249,10 +249,22 @@ static void note_wx(struct pg_state *st, unsigned long addr)
+ (void *)st->start_address);
+ }
+
+-static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
++static void effective_prot(struct ptdump_state *pt_st, int level, u64 val)
+ {
+- return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
+- ((prot1 | prot2) & _PAGE_NX);
++ struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
++ pgprotval_t prot = val & PTE_FLAGS_MASK;
++ pgprotval_t effective;
++
++ if (level > 0) {
++ pgprotval_t higher_prot = st->prot_levels[level - 1];
++
++ effective = (higher_prot & prot & (_PAGE_USER | _PAGE_RW)) |
++ ((higher_prot | prot) & _PAGE_NX);
++ } else {
++ effective = prot;
++ }
++
++ st->prot_levels[level] = effective;
+ }
+
+ /*
+@@ -270,16 +282,10 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
+ struct seq_file *m = st->seq;
+
+ new_prot = val & PTE_FLAGS_MASK;
+-
+- if (level > 0) {
+- new_eff = effective_prot(st->prot_levels[level - 1],
+- new_prot);
+- } else {
+- new_eff = new_prot;
+- }
+-
+- if (level >= 0)
+- st->prot_levels[level] = new_eff;
++ if (!val)
++ new_eff = 0;
++ else
++ new_eff = st->prot_levels[level];
+
+ /*
+ * If we have a "break" in the series, we need to flush the state that
+@@ -374,6 +380,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m,
+ struct pg_state st = {
+ .ptdump = {
+ .note_page = note_page,
++ .effective_prot = effective_prot,
+ .range = ptdump_ranges
+ },
+ .level = -1,
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index e723559c386a..0c67a5a94de3 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
+
+ /*
+ * Device [1022:7808]
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index f8b4dc161c02..f1e6ccaff853 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -403,7 +403,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
+ err = wait_for_completion_killable(&larval->completion);
+ WARN_ON(err);
+ if (!err)
+- crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval);
++ crypto_notify(CRYPTO_MSG_ALG_LOADED, larval);
+
+ out:
+ crypto_larval_kill(&larval->alg);
+diff --git a/crypto/drbg.c b/crypto/drbg.c
+index b6929eb5f565..04379ca624cd 100644
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1294,8 +1294,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
+ if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
+ drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags),
+ GFP_KERNEL);
+- if (!drbg->prev)
++ if (!drbg->prev) {
++ ret = -ENOMEM;
+ goto fini;
++ }
+ drbg->fips_primed = false;
+ }
+
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index a1a858ad4d18..f9b1a2abdbe2 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -865,6 +865,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
+ "acpi_cppc");
+ if (ret) {
+ per_cpu(cpc_desc_ptr, pr->id) = NULL;
++ kobject_put(&cpc_ptr->kobj);
+ goto out_free;
+ }
+
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index 5832bc10aca8..95e200b618bd 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -186,7 +186,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
+ * possibly drop references to the power resources in use.
+ */
+ state = ACPI_STATE_D3_HOT;
+- /* If _PR3 is not available, use D3hot as the target state. */
++ /* If D3cold is not supported, use D3hot as the target state. */
+ if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid)
+ target_state = state;
+ } else if (!device->power.states[state].flags.valid) {
+diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
+index aba0d0027586..6d7a522952bf 100644
+--- a/drivers/acpi/evged.c
++++ b/drivers/acpi/evged.c
+@@ -79,6 +79,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
+ struct resource r;
+ struct acpi_resource_irq *p = &ares->data.irq;
+ struct acpi_resource_extended_irq *pext = &ares->data.extended_irq;
++ char ev_name[5];
++ u8 trigger;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_END_TAG)
+ return AE_OK;
+@@ -87,14 +89,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
+ dev_err(dev, "unable to parse IRQ resource\n");
+ return AE_ERROR;
+ }
+- if (ares->type == ACPI_RESOURCE_TYPE_IRQ)
++ if (ares->type == ACPI_RESOURCE_TYPE_IRQ) {
+ gsi = p->interrupts[0];
+- else
++ trigger = p->triggering;
++ } else {
+ gsi = pext->interrupts[0];
++ trigger = p->triggering;
++ }
+
+ irq = r.start;
+
+- if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) {
++ switch (gsi) {
++ case 0 ... 255:
++ sprintf(ev_name, "_%c%02hhX",
++ trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
++
++ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
++ break;
++ /* fall through */
++ default:
++ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
++ break;
++
+ dev_err(dev, "cannot locate _EVT method\n");
+ return AE_ERROR;
+ }
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 6d3448895382..1b255e98de4d 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -919,12 +919,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
+
+ if (buffer.length && package
+ && package->type == ACPI_TYPE_PACKAGE
+- && package->package.count) {
+- int err = acpi_extract_power_resources(package, 0,
+- &ps->resources);
+- if (!err)
+- device->power.flags.power_resources = 1;
+- }
++ && package->package.count)
++ acpi_extract_power_resources(package, 0, &ps->resources);
++
+ ACPI_FREE(buffer.pointer);
+ }
+
+@@ -971,14 +968,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
+ acpi_bus_init_power_state(device, i);
+
+ INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
+- if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
+- device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
+
+- /* Set defaults for D0 and D3hot states (always valid) */
++ /* Set the defaults for D0 and D3hot (always supported). */
+ device->power.states[ACPI_STATE_D0].flags.valid = 1;
+ device->power.states[ACPI_STATE_D0].power = 100;
+ device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
+
++ /*
++ * Use power resources only if the D0 list of them is populated, because
++ * some platforms may provide _PR3 only to indicate D3cold support and
++ * in those cases the power resources list returned by it may be bogus.
++ */
++ if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
++ device->power.flags.power_resources = 1;
++ /*
++ * D3cold is supported if the D3hot list of power resources is
++ * not empty.
++ */
++ if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
++ device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
++ }
++
+ if (acpi_bus_init_power(device))
+ device->flags.power_manageable = 0;
+ }
+diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
+index c60d2c6d31d6..3a89909b50a6 100644
+--- a/drivers/acpi/sysfs.c
++++ b/drivers/acpi/sysfs.c
+@@ -993,8 +993,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
+
+ error = kobject_init_and_add(&hotplug->kobj,
+ &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
+- if (error)
++ if (error) {
++ kobject_put(&hotplug->kobj);
+ goto err_out;
++ }
+
+ kobject_uevent(&hotplug->kobj, KOBJ_ADD);
+ return;
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 68277687c160..3c4ecb824247 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -643,9 +643,17 @@ static void device_links_missing_supplier(struct device *dev)
+ {
+ struct device_link *link;
+
+- list_for_each_entry(link, &dev->links.suppliers, c_node)
+- if (link->status == DL_STATE_CONSUMER_PROBE)
++ list_for_each_entry(link, &dev->links.suppliers, c_node) {
++ if (link->status != DL_STATE_CONSUMER_PROBE)
++ continue;
++
++ if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
++ } else {
++ WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
++ WRITE_ONCE(link->status, DL_STATE_DORMANT);
++ }
++ }
+ }
+
+ /**
+@@ -684,11 +692,11 @@ int device_links_check_suppliers(struct device *dev)
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+- if (!(link->flags & DL_FLAG_MANAGED) ||
+- link->flags & DL_FLAG_SYNC_STATE_ONLY)
++ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+- if (link->status != DL_STATE_AVAILABLE) {
++ if (link->status != DL_STATE_AVAILABLE &&
++ !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ device_links_missing_supplier(dev);
+ ret = -EPROBE_DEFER;
+ break;
+@@ -949,11 +957,21 @@ static void __device_links_no_driver(struct device *dev)
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
++ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ device_link_drop_managed(link);
+- else if (link->status == DL_STATE_CONSUMER_PROBE ||
+- link->status == DL_STATE_ACTIVE)
++ continue;
++ }
++
++ if (link->status != DL_STATE_CONSUMER_PROBE &&
++ link->status != DL_STATE_ACTIVE)
++ continue;
++
++ if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
++ } else {
++ WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
++ WRITE_ONCE(link->status, DL_STATE_DORMANT);
++ }
+ }
+
+ dev->links.status = DL_DEV_NO_DRIVER;
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 8ef65c085640..c31ea3d18c8b 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -2902,17 +2902,17 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
+ (unsigned long long) current_req->cmd_flags))
+ return BLK_STS_IOERR;
+
+- spin_lock_irq(&floppy_lock);
+- list_add_tail(&bd->rq->queuelist, &floppy_reqs);
+- spin_unlock_irq(&floppy_lock);
+-
+ if (test_and_set_bit(0, &fdc_busy)) {
+ /* fdc busy, this new request will be treated when the
+ current one is done */
+ is_alive(__func__, "old request running");
+- return BLK_STS_OK;
++ return BLK_STS_RESOURCE;
+ }
+
++ spin_lock_irq(&floppy_lock);
++ list_add_tail(&bd->rq->queuelist, &floppy_reqs);
++ spin_unlock_irq(&floppy_lock);
++
+ command_status = FD_COMMAND_NONE;
+ __reschedule_timeout(MAXTIMEOUT, "fd_request");
+ set_fdc(0);
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index 66a62d17a3f5..3d42fc4290bc 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr,
+ unsigned int flags)
+ {
+ intel_private.driver->write_entry(addr, pg, flags);
++ readl(intel_private.gtt + pg);
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
+ }
+@@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
+ j++;
+ }
+ }
+- wmb();
++ readl(intel_private.gtt + j - 1);
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
+ }
+@@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void)
+
+ static void i9xx_chipset_flush(void)
+ {
++ wmb();
+ if (intel_private.i9xx_flush_page)
+ writel(1, intel_private.i9xx_flush_page);
+ }
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index f22b7aed6e64..006c58e32a5c 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -114,7 +114,11 @@ static int clk_pm_runtime_get(struct clk_core *core)
+ return 0;
+
+ ret = pm_runtime_get_sync(core->dev);
+- return ret < 0 ? ret : 0;
++ if (ret < 0) {
++ pm_runtime_put_noidle(core->dev);
++ return ret;
++ }
++ return 0;
+ }
+
+ static void clk_pm_runtime_put(struct clk_core *core)
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 808874bccf4a..347ea1ed260c 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2515,26 +2515,27 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
+ static int cpufreq_boost_set_sw(int state)
+ {
+ struct cpufreq_policy *policy;
+- int ret = -EINVAL;
+
+ for_each_active_policy(policy) {
++ int ret;
++
+ if (!policy->freq_table)
+- continue;
++ return -ENXIO;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy,
+ policy->freq_table);
+ if (ret) {
+ pr_err("%s: Policy frequency update failed\n",
+ __func__);
+- break;
++ return ret;
+ }
+
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+- break;
++ return ret;
+ }
+
+- return ret;
++ return 0;
+ }
+
+ int cpufreq_boost_trigger_state(int state)
+diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
+index c4632d84c9a1..637be2f903d3 100644
+--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
+@@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
+
+ struct nitrox_device *nitrox_get_first_device(void)
+ {
+- struct nitrox_device *ndev = NULL;
++ struct nitrox_device *ndev;
+
+ mutex_lock(&devlist_lock);
+ list_for_each_entry(ndev, &ndevlist, list) {
+@@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void)
+ break;
+ }
+ mutex_unlock(&devlist_lock);
+- if (!ndev)
++ if (&ndev->list == &ndevlist)
+ return NULL;
+
+ refcount_inc(&ndev->refcnt);
+diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
+index fd045e64972a..cb8a6ea2a4bc 100644
+--- a/drivers/crypto/virtio/virtio_crypto_algs.c
++++ b/drivers/crypto/virtio/virtio_crypto_algs.c
+@@ -350,13 +350,18 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
+ int err;
+ unsigned long flags;
+ struct scatterlist outhdr, iv_sg, status_sg, **sgs;
+- int i;
+ u64 dst_len;
+ unsigned int num_out = 0, num_in = 0;
+ int sg_total;
+ uint8_t *iv;
++ struct scatterlist *sg;
+
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
++ if (src_nents < 0) {
++ pr_err("Invalid number of src SG.\n");
++ return src_nents;
++ }
++
+ dst_nents = sg_nents(req->dst);
+
+ pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
+@@ -402,6 +407,7 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
+ goto free;
+ }
+
++ dst_len = min_t(unsigned int, req->cryptlen, dst_len);
+ pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
+ req->cryptlen, dst_len);
+
+@@ -442,12 +448,12 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
+ vc_sym_req->iv = iv;
+
+ /* Source data */
+- for (i = 0; i < src_nents; i++)
+- sgs[num_out++] = &req->src[i];
++ for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
++ sgs[num_out++] = sg;
+
+ /* Destination data */
+- for (i = 0; i < dst_nents; i++)
+- sgs[num_out + num_in++] = &req->dst[i];
++ for (sg = req->dst; sg; sg = sg_next(sg))
++ sgs[num_out + num_in++] = sg;
+
+ /* Status */
+ sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
+@@ -577,10 +583,11 @@ static void virtio_crypto_skcipher_finalize_req(
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+- crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
+- req, err);
+ kzfree(vc_sym_req->iv);
+ virtcrypto_clear_request(&vc_sym_req->base);
++
++ crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
++ req, err);
+ }
+
+ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
+index 059eccf0582b..50995f4c57a2 100644
+--- a/drivers/edac/i10nm_base.c
++++ b/drivers/edac/i10nm_base.c
+@@ -161,7 +161,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
+ mtr, mcddrtcfg, imc->mc, i, j);
+
+ if (IS_DIMM_PRESENT(mtr))
+- ndimms += skx_get_dimm_info(mtr, 0, dimm,
++ ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
+ imc, i, j);
+ else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
+ ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
+diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
+index 83545b4facb7..7469650877aa 100644
+--- a/drivers/edac/skx_base.c
++++ b/drivers/edac/skx_base.c
+@@ -163,27 +163,23 @@ static const struct x86_cpu_id skx_cpuids[] = {
+ };
+ MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
+
+-#define SKX_GET_MTMTR(dev, reg) \
+- pci_read_config_dword((dev), 0x87c, &(reg))
+-
+-static bool skx_check_ecc(struct pci_dev *pdev)
++static bool skx_check_ecc(u32 mcmtr)
+ {
+- u32 mtmtr;
+-
+- SKX_GET_MTMTR(pdev, mtmtr);
+-
+- return !!GET_BITFIELD(mtmtr, 2, 2);
++ return !!GET_BITFIELD(mcmtr, 2, 2);
+ }
+
+ static int skx_get_dimm_config(struct mem_ctl_info *mci)
+ {
+ struct skx_pvt *pvt = mci->pvt_info;
++ u32 mtr, mcmtr, amap, mcddrtcfg;
+ struct skx_imc *imc = pvt->imc;
+- u32 mtr, amap, mcddrtcfg;
+ struct dimm_info *dimm;
+ int i, j;
+ int ndimms;
+
++ /* Only the mcmtr on the first channel is effective */
++ pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
++
+ for (i = 0; i < SKX_NUM_CHANNELS; i++) {
+ ndimms = 0;
+ pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
+@@ -193,14 +189,14 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci)
+ pci_read_config_dword(imc->chan[i].cdev,
+ 0x80 + 4 * j, &mtr);
+ if (IS_DIMM_PRESENT(mtr)) {
+- ndimms += skx_get_dimm_info(mtr, amap, dimm, imc, i, j);
++ ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j);
+ } else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) {
+ ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
+ EDAC_MOD_STR);
+ nvdimm_count++;
+ }
+ }
+- if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
++ if (ndimms && !skx_check_ecc(mcmtr)) {
+ skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
+ return -ENODEV;
+ }
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index 99bbaf629b8d..412c651bef26 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -304,7 +304,7 @@ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
+ #define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows")
+ #define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols")
+
+-int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
++int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
+ struct skx_imc *imc, int chan, int dimmno)
+ {
+ int banks = 16, ranks, rows, cols, npages;
+@@ -324,8 +324,8 @@ int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+ imc->mc, chan, dimmno, size, npages,
+ banks, 1 << ranks, rows, cols);
+
+- imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
+- imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
++ imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mcmtr, 0, 0);
++ imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mcmtr, 9, 9);
+ imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
+ imc->chan[chan].dimms[dimmno].rowbits = rows;
+ imc->chan[chan].dimms[dimmno].colbits = cols;
+diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
+index 60d1ea669afd..319f9b2f1f89 100644
+--- a/drivers/edac/skx_common.h
++++ b/drivers/edac/skx_common.h
+@@ -128,7 +128,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type,
+
+ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm);
+
+-int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
++int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
+ struct skx_imc *imc, int chan, int dimmno);
+
+ int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
+diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
+index aff3dfb4d7ba..d187585db97a 100644
+--- a/drivers/firmware/efi/efivars.c
++++ b/drivers/firmware/efi/efivars.c
+@@ -522,8 +522,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
+ ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
+ NULL, "%s", short_name);
+ kfree(short_name);
+- if (ret)
++ if (ret) {
++ kobject_put(&new_var->kobj);
+ return ret;
++ }
+
+ kobject_uevent(&new_var->kobj, KOBJ_ADD);
+ if (efivar_entry_add(new_var, &efivar_sysfs_list)) {
+diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
+index f71eaa5bf52d..b3da2e193ad2 100644
+--- a/drivers/firmware/imx/imx-scu.c
++++ b/drivers/firmware/imx/imx-scu.c
+@@ -38,6 +38,7 @@ struct imx_sc_ipc {
+ struct device *dev;
+ struct mutex lock;
+ struct completion done;
++ bool fast_ipc;
+
+ /* temporarily store the SCU msg */
+ u32 *msg;
+@@ -115,6 +116,7 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
+ struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc;
+ struct imx_sc_rpc_msg *hdr;
+ u32 *data = msg;
++ int i;
+
+ if (!sc_ipc->msg) {
+ dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n",
+@@ -122,6 +124,19 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
+ return;
+ }
+
++ if (sc_ipc->fast_ipc) {
++ hdr = msg;
++ sc_ipc->rx_size = hdr->size;
++ sc_ipc->msg[0] = *data++;
++
++ for (i = 1; i < sc_ipc->rx_size; i++)
++ sc_ipc->msg[i] = *data++;
++
++ complete(&sc_ipc->done);
++
++ return;
++ }
++
+ if (sc_chan->idx == 0) {
+ hdr = msg;
+ sc_ipc->rx_size = hdr->size;
+@@ -143,20 +158,22 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
+
+ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
+ {
+- struct imx_sc_rpc_msg *hdr = msg;
++ struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg;
+ struct imx_sc_chan *sc_chan;
+ u32 *data = msg;
+ int ret;
++ int size;
+ int i;
+
+ /* Check size */
+- if (hdr->size > IMX_SC_RPC_MAX_MSG)
++ if (hdr.size > IMX_SC_RPC_MAX_MSG)
+ return -EINVAL;
+
+- dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr->svc,
+- hdr->func, hdr->size);
++ dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc,
++ hdr.func, hdr.size);
+
+- for (i = 0; i < hdr->size; i++) {
++ size = sc_ipc->fast_ipc ? 1 : hdr.size;
++ for (i = 0; i < size; i++) {
+ sc_chan = &sc_ipc->chans[i % 4];
+
+ /*
+@@ -168,8 +185,10 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
+ * Wait for tx_done before every send to ensure that no
+ * queueing happens at the mailbox channel level.
+ */
+- wait_for_completion(&sc_chan->tx_done);
+- reinit_completion(&sc_chan->tx_done);
++ if (!sc_ipc->fast_ipc) {
++ wait_for_completion(&sc_chan->tx_done);
++ reinit_completion(&sc_chan->tx_done);
++ }
+
+ ret = mbox_send_message(sc_chan->ch, &data[i]);
+ if (ret < 0)
+@@ -246,6 +265,8 @@ static int imx_scu_probe(struct platform_device *pdev)
+ struct imx_sc_chan *sc_chan;
+ struct mbox_client *cl;
+ char *chan_name;
++ struct of_phandle_args args;
++ int num_channel;
+ int ret;
+ int i;
+
+@@ -253,11 +274,20 @@ static int imx_scu_probe(struct platform_device *pdev)
+ if (!sc_ipc)
+ return -ENOMEM;
+
+- for (i = 0; i < SCU_MU_CHAN_NUM; i++) {
+- if (i < 4)
++ ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
++ "#mbox-cells", 0, &args);
++ if (ret)
++ return ret;
++
++ sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
++
++ num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
++ for (i = 0; i < num_channel; i++) {
++ if (i < num_channel / 2)
+ chan_name = kasprintf(GFP_KERNEL, "tx%d", i);
+ else
+- chan_name = kasprintf(GFP_KERNEL, "rx%d", i - 4);
++ chan_name = kasprintf(GFP_KERNEL, "rx%d",
++ i - num_channel / 2);
+
+ if (!chan_name)
+ return -ENOMEM;
+@@ -269,13 +299,15 @@ static int imx_scu_probe(struct platform_device *pdev)
+ cl->knows_txdone = true;
+ cl->rx_callback = imx_scu_rx_callback;
+
+- /* Initial tx_done completion as "done" */
+- cl->tx_done = imx_scu_tx_done;
+- init_completion(&sc_chan->tx_done);
+- complete(&sc_chan->tx_done);
++ if (!sc_ipc->fast_ipc) {
++ /* Initial tx_done completion as "done" */
++ cl->tx_done = imx_scu_tx_done;
++ init_completion(&sc_chan->tx_done);
++ complete(&sc_chan->tx_done);
++ }
+
+ sc_chan->sc_ipc = sc_ipc;
+- sc_chan->idx = i % 4;
++ sc_chan->idx = i % (num_channel / 2);
+ sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ if (IS_ERR(sc_chan->ch)) {
+ ret = PTR_ERR(sc_chan->ch);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 32a07665863f..fff95e6b46c7 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1362,10 +1362,24 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
+ return (result == DC_OK);
+ }
+
+-bool dc_is_hw_initialized(struct dc *dc)
++static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
+ {
+- struct dc_bios *dcb = dc->ctx->dc_bios;
+- return dcb->funcs->is_accelerated_mode(dcb);
++ int i;
++ struct pipe_ctx *pipe;
++
++ for (i = 0; i < MAX_PIPES; i++) {
++ pipe = &context->res_ctx.pipe_ctx[i];
++
++ if (!pipe->plane_state)
++ continue;
++
++ /* Must set to false to start with, due to OR in update function */
++ pipe->plane_state->status.is_flip_pending = false;
++ dc->hwss.update_pending_status(pipe);
++ if (pipe->plane_state->status.is_flip_pending)
++ return true;
++ }
++ return false;
+ }
+
+ bool dc_post_update_surfaces_to_stream(struct dc *dc)
+@@ -1378,6 +1392,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
+
+ post_surface_trace(dc);
+
++ if (is_flip_pending_in_pipes(dc, context))
++ return true;
++
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL ||
+ context->res_ctx.pipe_ctx[i].plane_state == NULL) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 8ff25b5dd2f6..e8d126890d7e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -1075,7 +1075,6 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
+ unsigned int dc_get_target_backlight_pwm(struct dc *dc);
+
+ bool dc_is_dmcu_initialized(struct dc *dc);
+-bool dc_is_hw_initialized(struct dc *dc);
+
+ enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
+ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+index 580319b7bf1a..0bf3cb239bf0 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+@@ -600,6 +600,14 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+ GFP_KERNEL |
+ __GFP_NORETRY |
+ __GFP_NOWARN);
++ /*
++ * Using __get_user_pages_fast() with a read-only
++ * access is questionable. A read-only page may be
++ * COW-broken, and then this might end up giving
++ * the wrong side of the COW..
++ *
++ * We may or may not care.
++ */
+ if (pvec) /* defer to worker if malloc fails */
+ pinned = __get_user_pages_fast(obj->userptr.ptr,
+ num_pages,
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
+index 7d52e24564db..7fe2edd4d009 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.h
++++ b/drivers/gpu/drm/vkms/vkms_drv.h
+@@ -121,11 +121,6 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+ enum drm_plane_type type, int index);
+
+ /* Gem stuff */
+-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+- struct drm_file *file,
+- u32 *handle,
+- u64 size);
+-
+ vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
+
+ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
+diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
+index 2e01186fb943..c541fec57566 100644
+--- a/drivers/gpu/drm/vkms/vkms_gem.c
++++ b/drivers/gpu/drm/vkms/vkms_gem.c
+@@ -97,10 +97,10 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
+ return ret;
+ }
+
+-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+- struct drm_file *file,
+- u32 *handle,
+- u64 size)
++static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
++ struct drm_file *file,
++ u32 *handle,
++ u64 size)
+ {
+ struct vkms_gem_object *obj;
+ int ret;
+@@ -113,7 +113,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ return ERR_CAST(obj);
+
+ ret = drm_gem_handle_create(file, &obj->gem, handle);
+- drm_gem_object_put_unlocked(&obj->gem);
+ if (ret)
+ return ERR_PTR(ret);
+
+@@ -142,6 +141,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
+ args->size = gem_obj->size;
+ args->pitch = pitch;
+
++ drm_gem_object_put_unlocked(gem_obj);
++
+ DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
+
+ return 0;
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 1bab8de14757..b94572e9c24f 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -296,6 +296,8 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
+ spin_lock_irq(&ev_queue->lock);
+ if (!list_empty(&ev_queue->event_list))
+ pollflags = EPOLLIN | EPOLLRDNORM;
++ else if (ev_queue->is_closed)
++ pollflags = EPOLLERR;
+ spin_unlock_irq(&ev_queue->lock);
+
+ return pollflags;
+diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
+index c8f87df93a50..9c6386b2af33 100644
+--- a/drivers/input/misc/axp20x-pek.c
++++ b/drivers/input/misc/axp20x-pek.c
+@@ -205,8 +205,11 @@ ATTRIBUTE_GROUPS(axp20x);
+
+ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
+ {
+- struct input_dev *idev = pwr;
+- struct axp20x_pek *axp20x_pek = input_get_drvdata(idev);
++ struct axp20x_pek *axp20x_pek = pwr;
++ struct input_dev *idev = axp20x_pek->input;
++
++ if (!idev)
++ return IRQ_HANDLED;
+
+ /*
+ * The power-button is connected to ground so a falling edge (dbf)
+@@ -225,22 +228,9 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
+ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
+ struct platform_device *pdev)
+ {
+- struct axp20x_dev *axp20x = axp20x_pek->axp20x;
+ struct input_dev *idev;
+ int error;
+
+- axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
+- if (axp20x_pek->irq_dbr < 0)
+- return axp20x_pek->irq_dbr;
+- axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc,
+- axp20x_pek->irq_dbr);
+-
+- axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
+- if (axp20x_pek->irq_dbf < 0)
+- return axp20x_pek->irq_dbf;
+- axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc,
+- axp20x_pek->irq_dbf);
+-
+ axp20x_pek->input = devm_input_allocate_device(&pdev->dev);
+ if (!axp20x_pek->input)
+ return -ENOMEM;
+@@ -255,24 +245,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
+
+ input_set_drvdata(idev, axp20x_pek);
+
+- error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
+- axp20x_pek_irq, 0,
+- "axp20x-pek-dbr", idev);
+- if (error < 0) {
+- dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
+- axp20x_pek->irq_dbr, error);
+- return error;
+- }
+-
+- error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
+- axp20x_pek_irq, 0,
+- "axp20x-pek-dbf", idev);
+- if (error < 0) {
+- dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
+- axp20x_pek->irq_dbf, error);
+- return error;
+- }
+-
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(&pdev->dev, "Can't register input device: %d\n",
+@@ -280,8 +252,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
+ return error;
+ }
+
+- device_init_wakeup(&pdev->dev, true);
+-
+ return 0;
+ }
+
+@@ -339,6 +309,18 @@ static int axp20x_pek_probe(struct platform_device *pdev)
+
+ axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
+
++ axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
++ if (axp20x_pek->irq_dbr < 0)
++ return axp20x_pek->irq_dbr;
++ axp20x_pek->irq_dbr = regmap_irq_get_virq(
++ axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr);
++
++ axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
++ if (axp20x_pek->irq_dbf < 0)
++ return axp20x_pek->irq_dbf;
++ axp20x_pek->irq_dbf = regmap_irq_get_virq(
++ axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf);
++
+ if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
+ error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
+ if (error)
+@@ -347,6 +329,26 @@ static int axp20x_pek_probe(struct platform_device *pdev)
+
+ axp20x_pek->info = (struct axp20x_info *)match->driver_data;
+
++ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
++ axp20x_pek_irq, 0,
++ "axp20x-pek-dbr", axp20x_pek);
++ if (error < 0) {
++ dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
++ axp20x_pek->irq_dbr, error);
++ return error;
++ }
++
++ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
++ axp20x_pek_irq, 0,
++ "axp20x-pek-dbf", axp20x_pek);
++ if (error < 0) {
++ dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
++ axp20x_pek->irq_dbf, error);
++ return error;
++ }
++
++ device_init_wakeup(&pdev->dev, true);
++
+ platform_set_drvdata(pdev, axp20x_pek);
+
+ return 0;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 4d2036209b45..758dae8d6500 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN005b", /* P50 */
+ "LEN005e", /* T560 */
+ "LEN006c", /* T470s */
++ "LEN007a", /* T470s */
+ "LEN0071", /* T480 */
+ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
+ "LEN0073", /* X1 Carbon G5 (Elantech) */
+diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
+index 69c6d559eeb0..2ef1adaed9af 100644
+--- a/drivers/input/touchscreen/mms114.c
++++ b/drivers/input/touchscreen/mms114.c
+@@ -91,15 +91,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg,
+ if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL)
+ BUG();
+
+- /* Write register: use repeated start */
++ /* Write register */
+ xfer[0].addr = client->addr;
+- xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART;
++ xfer[0].flags = client->flags & I2C_M_TEN;
+ xfer[0].len = 1;
+ xfer[0].buf = &buf;
+
+ /* Read data */
+ xfer[1].addr = client->addr;
+- xfer[1].flags = I2C_M_RD;
++ xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
+ xfer[1].len = len;
+ xfer[1].buf = val;
+
+@@ -428,10 +428,8 @@ static int mms114_probe(struct i2c_client *client,
+ const void *match_data;
+ int error;
+
+- if (!i2c_check_functionality(client->adapter,
+- I2C_FUNC_PROTOCOL_MANGLING)) {
+- dev_err(&client->dev,
+- "Need i2c bus that supports protocol mangling\n");
++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
++ dev_err(&client->dev, "Not supported I2C adapter\n");
+ return -ENODEV;
+ }
+
+diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+index d0c9dffe49e5..a26d43aa7595 100644
+--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
++++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+@@ -726,9 +726,8 @@ EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
+ int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
+ {
+ if (!dev->dma_parms) {
+- dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+- if (!dev->dma_parms)
+- return -ENOMEM;
++ dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
++ return -ENODEV;
+ }
+ if (dma_get_max_seg_size(dev) < size)
+ return dma_set_max_seg_size(dev, size);
+@@ -737,21 +736,6 @@ int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
+ }
+ EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
+
+-/*
+- * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
+- * @dev: device for configuring DMA parameters
+- *
+- * This function releases resources allocated to configure DMA parameters
+- * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
+- * device drivers on driver remove.
+- */
+-void vb2_dma_contig_clear_max_seg_size(struct device *dev)
+-{
+- kfree(dev->dma_parms);
+- dev->dma_parms = NULL;
+-}
+-EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
+-
+ MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
+ MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index ebb387aa5158..20eed28ea60d 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -584,7 +584,7 @@ try_again:
+ */
+ err = mmc_send_io_op_cond(host, ocr, &rocr);
+ if (err)
+- goto err;
++ return err;
+
+ /*
+ * For SPI, enable CRC as appropriate.
+@@ -592,17 +592,15 @@ try_again:
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+- goto err;
++ return err;
+ }
+
+ /*
+ * Allocate card structure.
+ */
+ card = mmc_alloc_card(host, NULL);
+- if (IS_ERR(card)) {
+- err = PTR_ERR(card);
+- goto err;
+- }
++ if (IS_ERR(card))
++ return PTR_ERR(card);
+
+ if ((rocr & R4_MEMORY_PRESENT) &&
+ mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
+@@ -610,19 +608,15 @@ try_again:
+
+ if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
+ memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
+- mmc_remove_card(card);
+- pr_debug("%s: Perhaps the card was replaced\n",
+- mmc_hostname(host));
+- return -ENOENT;
++ err = -ENOENT;
++ goto mismatch;
+ }
+ } else {
+ card->type = MMC_TYPE_SDIO;
+
+ if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
+- mmc_remove_card(card);
+- pr_debug("%s: Perhaps the card was replaced\n",
+- mmc_hostname(host));
+- return -ENOENT;
++ err = -ENOENT;
++ goto mismatch;
+ }
+ }
+
+@@ -677,7 +671,7 @@ try_again:
+ if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
+ err = mmc_sd_get_csd(host, card);
+ if (err)
+- return err;
++ goto remove;
+
+ mmc_decode_cid(card);
+ }
+@@ -704,7 +698,12 @@ try_again:
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+ }
+
+- goto finish;
++ if (oldcard)
++ mmc_remove_card(card);
++ else
++ host->card = card;
++
++ return 0;
+ }
+
+ /*
+@@ -718,9 +717,8 @@ try_again:
+ /* Retry init sequence, but without R4_18V_PRESENT. */
+ retries = 0;
+ goto try_again;
+- } else {
+- goto remove;
+ }
++ return err;
+ }
+
+ /*
+@@ -731,16 +729,14 @@ try_again:
+ goto remove;
+
+ if (oldcard) {
+- int same = (card->cis.vendor == oldcard->cis.vendor &&
+- card->cis.device == oldcard->cis.device);
+- mmc_remove_card(card);
+- if (!same) {
+- pr_debug("%s: Perhaps the card was replaced\n",
+- mmc_hostname(host));
+- return -ENOENT;
++ if (card->cis.vendor == oldcard->cis.vendor &&
++ card->cis.device == oldcard->cis.device) {
++ mmc_remove_card(card);
++ card = oldcard;
++ } else {
++ err = -ENOENT;
++ goto mismatch;
+ }
+-
+- card = oldcard;
+ }
+ card->ocr = ocr_card;
+ mmc_fixup_device(card, sdio_fixup_methods);
+@@ -801,16 +797,15 @@ try_again:
+ err = -EINVAL;
+ goto remove;
+ }
+-finish:
+- if (!oldcard)
+- host->card = card;
++
++ host->card = card;
+ return 0;
+
++mismatch:
++ pr_debug("%s: Perhaps the card was replaced\n", mmc_hostname(host));
+ remove:
+- if (!oldcard)
++ if (oldcard != card)
+ mmc_remove_card(card);
+-
+-err:
+ return err;
+ }
+
+diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
+index 01f222758910..966303291b8f 100644
+--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
++++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
+@@ -162,6 +162,9 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
+ static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
+ {
+ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
++
++ if (!data->host_cookie)
++ sdmmc_idma_unprep_data(host, data, 0);
+ }
+
+ static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index b68dcd1b0d50..ab358d8e82fa 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -1117,6 +1117,12 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
+ msm_host->use_cdr = true;
+
++ /*
++ * Clear tuning_done flag before tuning to ensure proper
++ * HS400 settings.
++ */
++ msm_host->tuning_done = 0;
++
+ /*
+ * For HS400 tuning in HS200 timing requires:
+ * - select MCLK/2 in VENDOR_SPEC
+diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
+index fcef5c0d0908..b6cb205d2d95 100644
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -136,9 +136,12 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
+ || mmc_gpio_get_cd(host->mmc) >= 0)
+ sdhci_at91_set_force_card_detect(host);
+
+- if (priv->cal_always_on && (mask & SDHCI_RESET_ALL))
+- sdhci_writel(host, SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
++ if (priv->cal_always_on && (mask & SDHCI_RESET_ALL)) {
++ u32 calcr = sdhci_readl(host, SDMMC_CALCR);
++
++ sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
+ SDMMC_CALCR);
++ }
+ }
+
+ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
+diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
+index 1e424bcdbd5f..735941f81b95 100644
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -1286,12 +1286,14 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
+ cancel_work_sync(&host->done);
+ cancel_delayed_work_sync(&host->delayed_reset_work);
+ tmio_mmc_release_dma(host);
++ tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
+
+- pm_runtime_dont_use_autosuspend(&pdev->dev);
+ if (host->native_hotplug)
+ pm_runtime_put_noidle(&pdev->dev);
+- pm_runtime_put_sync(&pdev->dev);
++
+ pm_runtime_disable(&pdev->dev);
++ pm_runtime_dont_use_autosuspend(&pdev->dev);
++ pm_runtime_put_noidle(&pdev->dev);
+ }
+ EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
+
+diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
+index a1683c49cb90..f82baf99fd69 100644
+--- a/drivers/mmc/host/uniphier-sd.c
++++ b/drivers/mmc/host/uniphier-sd.c
+@@ -610,11 +610,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
+ }
+ }
+
+- ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
+- dev_name(dev), host);
+- if (ret)
+- goto free_host;
+-
+ if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
+ host->dma_ops = &uniphier_sd_internal_dma_ops;
+ else
+@@ -642,8 +637,15 @@ static int uniphier_sd_probe(struct platform_device *pdev)
+ if (ret)
+ goto free_host;
+
++ ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
++ dev_name(dev), host);
++ if (ret)
++ goto remove_host;
++
+ return 0;
+
++remove_host:
++ tmio_mmc_host_remove(host);
+ free_host:
+ tmio_mmc_host_free(host);
+
+diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
+index 9f4205b4439b..d2b5ab403e06 100644
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -1079,8 +1079,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
+ if (id != QCA8K_ID_QCA8337)
+ return -ENODEV;
+
+- priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds),
+- QCA8K_NUM_PORTS);
++ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index cada6e7e30f4..5f6892aa6588 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -358,7 +358,7 @@ error_unmap_dma:
+ ena_unmap_tx_buff(xdp_ring, tx_info);
+ tx_info->xdpf = NULL;
+ error_drop_packet:
+-
++ __free_page(tx_info->xdp_rx_page);
+ return NETDEV_TX_OK;
+ }
+
+@@ -1642,11 +1642,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ &next_to_clean);
+
+ if (unlikely(!skb)) {
+- if (xdp_verdict == XDP_TX) {
++ if (xdp_verdict == XDP_TX)
+ ena_free_rx_page(rx_ring,
+ &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
+- res_budget--;
+- }
+ for (i = 0; i < ena_rx_ctx.descs; i++) {
+ rx_ring->free_ids[next_to_clean] =
+ rx_ring->ena_bufs[i].req_id;
+@@ -1654,8 +1652,10 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ ENA_RX_RING_IDX_NEXT(next_to_clean,
+ rx_ring->ring_size);
+ }
+- if (xdp_verdict == XDP_TX || xdp_verdict == XDP_DROP)
++ if (xdp_verdict != XDP_PASS) {
++ res_budget--;
+ continue;
++ }
+ break;
+ }
+
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index f42382c2ecd0..9067b413d6b7 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -2545,19 +2545,21 @@ static int macb_open(struct net_device *dev)
+
+ err = macb_phylink_connect(bp);
+ if (err)
+- goto pm_exit;
++ goto napi_exit;
+
+ netif_tx_start_all_queues(dev);
+
+ if (bp->ptp_info)
+ bp->ptp_info->ptp_init(dev);
+
+-pm_exit:
+- if (err) {
+- pm_runtime_put_sync(&bp->pdev->dev);
+- return err;
+- }
+ return 0;
++
++napi_exit:
++ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
++ napi_disable(&queue->napi);
++pm_exit:
++ pm_runtime_put_sync(&bp->pdev->dev);
++ return err;
+ }
+
+ static int macb_close(struct net_device *dev)
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 3de549c6c693..197dc5b2c090 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -4678,12 +4678,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
+ dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
+ break;
+ }
+- dev_info(dev, "Partner protocol version is %d\n",
+- crq->version_exchange_rsp.version);
+- if (be16_to_cpu(crq->version_exchange_rsp.version) <
+- ibmvnic_version)
+- ibmvnic_version =
++ ibmvnic_version =
+ be16_to_cpu(crq->version_exchange_rsp.version);
++ dev_info(dev, "Partner protocol version is %d\n",
++ ibmvnic_version);
+ send_cap_queries(adapter);
+ break;
+ case QUERY_CAPABILITY_RSP:
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 11babc79dc6c..14318dca6921 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -418,11 +418,17 @@ struct mvneta_pcpu_port {
+ u32 cause_rx_tx;
+ };
+
++enum {
++ __MVNETA_DOWN,
++};
++
+ struct mvneta_port {
+ u8 id;
+ struct mvneta_pcpu_port __percpu *ports;
+ struct mvneta_pcpu_stats __percpu *stats;
+
++ unsigned long state;
++
+ int pkt_size;
+ void __iomem *base;
+ struct mvneta_rx_queue *rxqs;
+@@ -2066,6 +2072,9 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
+ int i, drops = 0;
+ u32 ret;
+
++ if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
++ return -ENETDOWN;
++
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+@@ -3489,12 +3498,16 @@ static void mvneta_start_dev(struct mvneta_port *pp)
+
+ phylink_start(pp->phylink);
+ netif_tx_start_all_queues(pp->dev);
++
++ clear_bit(__MVNETA_DOWN, &pp->state);
+ }
+
+ static void mvneta_stop_dev(struct mvneta_port *pp)
+ {
+ unsigned int cpu;
+
++ set_bit(__MVNETA_DOWN, &pp->state);
++
+ phylink_stop(pp->phylink);
+
+ if (!pp->neta_armada3700) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+index 184c3eaefbcb..c190eb267f3c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+@@ -256,7 +256,6 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
+ goto params_reg_err;
+ mlx5_devlink_set_params_init_values(devlink);
+ devlink_params_publish(devlink);
+- devlink_reload_enable(devlink);
+ return 0;
+
+ params_reg_err:
+@@ -266,7 +265,6 @@ params_reg_err:
+
+ void mlx5_devlink_unregister(struct devlink *devlink)
+ {
+- devlink_reload_disable(devlink);
+ devlink_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
+ devlink_unregister(devlink);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+index c28cbae42331..2c80205dc939 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+@@ -152,6 +152,10 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
+ mlx5e_close_cq(&c->xskicosq.cq);
+ mlx5e_close_xdpsq(&c->xsksq);
+ mlx5e_close_cq(&c->xsksq.cq);
++
++ memset(&c->xskrq, 0, sizeof(c->xskrq));
++ memset(&c->xsksq, 0, sizeof(c->xsksq));
++ memset(&c->xskicosq, 0, sizeof(c->xskicosq));
+ }
+
+ void mlx5e_activate_xsk(struct mlx5e_channel *c)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index 68e7ef7ca52d..ffb360fe44d3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -193,15 +193,23 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
+
+ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
+ {
++ bool err_detected = false;
++
++ /* Mark the device as fatal in order to abort FW commands */
++ if ((check_fatal_sensors(dev) || force) &&
++ dev->state == MLX5_DEVICE_STATE_UP) {
++ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
++ err_detected = true;
++ }
+ mutex_lock(&dev->intf_state_mutex);
+- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+- goto unlock;
++ if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
++ goto unlock;/* a previous error is still being handled */
+ if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) {
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ goto unlock;
+ }
+
+- if (check_fatal_sensors(dev) || force) {
++ if (check_fatal_sensors(dev) || force) { /* protected state setting */
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ mlx5_cmd_flush(dev);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 20e12e14cfa8..743491babf88 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -794,6 +794,11 @@ err_disable:
+
+ static void mlx5_pci_close(struct mlx5_core_dev *dev)
+ {
++ /* health work might still be active, and it needs pci bar in
++ * order to know the NIC state. Therefore, drain the health WQ
++ * before removing the pci bars
++ */
++ mlx5_drain_health_wq(dev);
+ iounmap(dev->iseg);
+ pci_clear_master(dev->pdev);
+ release_bar(dev->pdev);
+@@ -1366,6 +1371,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
+
+ pci_save_state(pdev);
++ devlink_reload_enable(devlink);
+ return 0;
+
+ err_load_one:
+@@ -1383,6 +1389,7 @@ static void remove_one(struct pci_dev *pdev)
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
+
++ devlink_reload_disable(devlink);
+ mlx5_crdump_disable(dev);
+ mlx5_devlink_unregister(devlink);
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+index ce0a6837daa3..05f8d5a92862 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+@@ -391,8 +391,7 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
+ static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev,
+ int trip, enum thermal_trend *trend)
+ {
+- struct mlxsw_thermal_module *tz = tzdev->devdata;
+- struct mlxsw_thermal *thermal = tz->parent;
++ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+@@ -593,6 +592,22 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
+ return 0;
+ }
+
++static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev,
++ int trip, enum thermal_trend *trend)
++{
++ struct mlxsw_thermal_module *tz = tzdev->devdata;
++ struct mlxsw_thermal *thermal = tz->parent;
++
++ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
++ return -EINVAL;
++
++ if (tzdev == thermal->tz_highest_dev)
++ return 1;
++
++ *trend = THERMAL_TREND_STABLE;
++ return 0;
++}
++
+ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+ .bind = mlxsw_thermal_module_bind,
+ .unbind = mlxsw_thermal_module_unbind,
+@@ -604,7 +619,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+ .set_trip_temp = mlxsw_thermal_module_trip_temp_set,
+ .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
+ .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
+- .get_trend = mlxsw_thermal_trend_get,
++ .get_trend = mlxsw_thermal_module_trend_get,
+ };
+
+ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
+@@ -643,7 +658,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+ .set_trip_temp = mlxsw_thermal_module_trip_temp_set,
+ .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
+ .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
+- .get_trend = mlxsw_thermal_trend_get,
++ .get_trend = mlxsw_thermal_module_trend_get,
+ };
+
+ static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
+diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
+index b16a1221d19b..fb182bec8f06 100644
+--- a/drivers/net/net_failover.c
++++ b/drivers/net/net_failover.c
+@@ -61,7 +61,8 @@ static int net_failover_open(struct net_device *dev)
+ return 0;
+
+ err_standby_open:
+- dev_close(primary_dev);
++ if (primary_dev)
++ dev_close(primary_dev);
+ err_primary_open:
+ netif_tx_disable(dev);
+ return err;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 3063f2c9fa63..d720f15cb1dc 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1908,8 +1908,11 @@ drop:
+ skb->dev = tun->dev;
+ break;
+ case IFF_TAP:
+- if (!frags)
+- skb->protocol = eth_type_trans(skb, tun->dev);
++ if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
++ err = -ENOMEM;
++ goto drop;
++ }
++ skb->protocol = eth_type_trans(skb, tun->dev);
+ break;
+ }
+
+@@ -1966,9 +1969,12 @@ drop:
+ }
+
+ if (frags) {
++ u32 headlen;
++
+ /* Exercise flow dissector code path. */
+- u32 headlen = eth_get_headlen(tun->dev, skb->data,
+- skb_headlen(skb));
++ skb_push(skb, ETH_HLEN);
++ headlen = eth_get_headlen(tun->dev, skb->data,
++ skb_headlen(skb));
+
+ if (unlikely(headlen > skb_headlen(skb))) {
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index a5b415fed11e..779e56c43d27 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1924,6 +1924,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
+ ns_olen = request->len - skb_network_offset(request) -
+ sizeof(struct ipv6hdr) - sizeof(*ns);
+ for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
++ if (!ns->opt[i + 1]) {
++ kfree_skb(reply);
++ return NULL;
++ }
+ if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
+ daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
+ break;
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index dd0c32379375..4ed21dad6a8e 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -612,6 +612,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ hif_dev->remain_skb = nskb;
+ spin_unlock(&hif_dev->rx_lock);
+ } else {
++ if (pool_index == MAX_PKT_NUM_IN_TRANSFER) {
++ dev_err(&hif_dev->udev->dev,
++ "ath9k_htc: over RX MAX_PKT_NUM\n");
++ goto err;
++ }
+ nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+@@ -638,9 +643,9 @@ err:
+
+ static void ath9k_hif_usb_rx_cb(struct urb *urb)
+ {
+- struct sk_buff *skb = (struct sk_buff *) urb->context;
+- struct hif_device_usb *hif_dev =
+- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
++ struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
++ struct hif_device_usb *hif_dev = rx_buf->hif_dev;
++ struct sk_buff *skb = rx_buf->skb;
+ int ret;
+
+ if (!skb)
+@@ -680,14 +685,15 @@ resubmit:
+ return;
+ free:
+ kfree_skb(skb);
++ kfree(rx_buf);
+ }
+
+ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ {
+- struct sk_buff *skb = (struct sk_buff *) urb->context;
++ struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
++ struct hif_device_usb *hif_dev = rx_buf->hif_dev;
++ struct sk_buff *skb = rx_buf->skb;
+ struct sk_buff *nskb;
+- struct hif_device_usb *hif_dev =
+- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+@@ -745,6 +751,7 @@ resubmit:
+ return;
+ free:
+ kfree_skb(skb);
++ kfree(rx_buf);
+ urb->context = NULL;
+ }
+
+@@ -790,7 +797,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
+ init_usb_anchor(&hif_dev->mgmt_submitted);
+
+ for (i = 0; i < MAX_TX_URB_NUM; i++) {
+- tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
++ tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL);
+ if (!tx_buf)
+ goto err;
+
+@@ -827,8 +834,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
+
+ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
+ {
+- struct urb *urb = NULL;
++ struct rx_buf *rx_buf = NULL;
+ struct sk_buff *skb = NULL;
++ struct urb *urb = NULL;
+ int i, ret;
+
+ init_usb_anchor(&hif_dev->rx_submitted);
+@@ -836,6 +844,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
+
+ for (i = 0; i < MAX_RX_URB_NUM; i++) {
+
++ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
++ if (!rx_buf) {
++ ret = -ENOMEM;
++ goto err_rxb;
++ }
++
+ /* Allocate URB */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL) {
+@@ -850,11 +864,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
+ goto err_skb;
+ }
+
++ rx_buf->hif_dev = hif_dev;
++ rx_buf->skb = skb;
++
+ usb_fill_bulk_urb(urb, hif_dev->udev,
+ usb_rcvbulkpipe(hif_dev->udev,
+ USB_WLAN_RX_PIPE),
+ skb->data, MAX_RX_BUF_SIZE,
+- ath9k_hif_usb_rx_cb, skb);
++ ath9k_hif_usb_rx_cb, rx_buf);
+
+ /* Anchor URB */
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+@@ -880,6 +897,8 @@ err_submit:
+ err_skb:
+ usb_free_urb(urb);
+ err_urb:
++ kfree(rx_buf);
++err_rxb:
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+ return ret;
+ }
+@@ -891,14 +910,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
+
+ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
+ {
+- struct urb *urb = NULL;
++ struct rx_buf *rx_buf = NULL;
+ struct sk_buff *skb = NULL;
++ struct urb *urb = NULL;
+ int i, ret;
+
+ init_usb_anchor(&hif_dev->reg_in_submitted);
+
+ for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
+
++ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
++ if (!rx_buf) {
++ ret = -ENOMEM;
++ goto err_rxb;
++ }
++
+ /* Allocate URB */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL) {
+@@ -913,11 +939,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
+ goto err_skb;
+ }
+
++ rx_buf->hif_dev = hif_dev;
++ rx_buf->skb = skb;
++
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev,
+ USB_REG_IN_PIPE),
+ skb->data, MAX_REG_IN_BUF_SIZE,
+- ath9k_hif_usb_reg_in_cb, skb, 1);
++ ath9k_hif_usb_reg_in_cb, rx_buf, 1);
+
+ /* Anchor URB */
+ usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
+@@ -943,6 +972,8 @@ err_submit:
+ err_skb:
+ usb_free_urb(urb);
+ err_urb:
++ kfree(rx_buf);
++err_rxb:
+ ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
+ return ret;
+ }
+@@ -973,7 +1004,7 @@ err:
+ return -ENOMEM;
+ }
+
+-static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
++void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
+ {
+ usb_kill_anchored_urbs(&hif_dev->regout_submitted);
+ ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
+@@ -1341,8 +1372,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
+
+ if (hif_dev->flags & HIF_USB_READY) {
+ ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
+- ath9k_htc_hw_free(hif_dev->htc_handle);
+ ath9k_hif_usb_dev_deinit(hif_dev);
++ ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv);
++ ath9k_htc_hw_free(hif_dev->htc_handle);
+ }
+
+ usb_set_intfdata(interface, NULL);
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
+index 7846916aa01d..5985aa15ca93 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
+@@ -86,6 +86,11 @@ struct tx_buf {
+ struct list_head list;
+ };
+
++struct rx_buf {
++ struct sk_buff *skb;
++ struct hif_device_usb *hif_dev;
++};
++
+ #define HIF_USB_TX_STOP BIT(0)
+ #define HIF_USB_TX_FLUSH BIT(1)
+
+@@ -133,5 +138,6 @@ struct hif_device_usb {
+
+ int ath9k_hif_usb_init(void);
+ void ath9k_hif_usb_exit(void);
++void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev);
+
+ #endif /* HTC_USB_H */
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index d961095ab01f..40a065028ebe 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -931,8 +931,9 @@ err_init:
+ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid, char *product, u32 drv_info)
+ {
+- struct ieee80211_hw *hw;
++ struct hif_device_usb *hif_dev;
+ struct ath9k_htc_priv *priv;
++ struct ieee80211_hw *hw;
+ int ret;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
+@@ -967,7 +968,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ return 0;
+
+ err_init:
+- ath9k_deinit_wmi(priv);
++ ath9k_stop_wmi(priv);
++ hif_dev = (struct hif_device_usb *)htc_handle->hif_dev;
++ ath9k_hif_usb_dealloc_urbs(hif_dev);
++ ath9k_destoy_wmi(priv);
+ err_free:
+ ieee80211_free_hw(hw);
+ return ret;
+@@ -982,7 +986,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
+ htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED;
+
+ ath9k_deinit_device(htc_handle->drv_priv);
+- ath9k_deinit_wmi(htc_handle->drv_priv);
++ ath9k_stop_wmi(htc_handle->drv_priv);
+ ieee80211_free_hw(htc_handle->drv_priv->hw);
+ }
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index 9cec5c216e1f..118e5550b10c 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
+ * which are not PHY_ERROR (short radar pulses have a length of 3)
+ */
+ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
+- ath_warn(common,
+- "Short RX data len, dropping (dlen: %d)\n",
+- rs_datalen);
++ ath_dbg(common, ANY,
++ "Short RX data len, dropping (dlen: %d)\n",
++ rs_datalen);
+ goto rx_next;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index d091c8ebdcf0..d2e062eaf561 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target,
+
+ if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
+ epid = svc_rspmsg->endpoint_id;
++ if (epid < 0 || epid >= ENDPOINT_MAX)
++ return;
++
+ service_id = be16_to_cpu(svc_rspmsg->service_id);
+ max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
+ endpoint = &target->endpoint[epid];
+@@ -170,7 +173,6 @@ static int htc_config_pipe_credits(struct htc_target *target)
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC credit config timeout\n");
+- kfree_skb(skb);
+ return -ETIMEDOUT;
+ }
+
+@@ -206,7 +208,6 @@ static int htc_setup_complete(struct htc_target *target)
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC start timeout\n");
+- kfree_skb(skb);
+ return -ETIMEDOUT;
+ }
+
+@@ -279,7 +280,6 @@ int htc_connect_service(struct htc_target *target,
+ if (!time_left) {
+ dev_err(target->dev, "Service connection timeout for: %d\n",
+ service_connreq->service_id);
+- kfree_skb(skb);
+ return -ETIMEDOUT;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
+index cdc146091194..e7a3127395be 100644
+--- a/drivers/net/wireless/ath/ath9k/wmi.c
++++ b/drivers/net/wireless/ath/ath9k/wmi.c
+@@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
+ return wmi;
+ }
+
+-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
++void ath9k_stop_wmi(struct ath9k_htc_priv *priv)
+ {
+ struct wmi *wmi = priv->wmi;
+
+ mutex_lock(&wmi->op_mutex);
+ wmi->stopped = true;
+ mutex_unlock(&wmi->op_mutex);
++}
+
++void ath9k_destoy_wmi(struct ath9k_htc_priv *priv)
++{
+ kfree(priv->wmi);
+ }
+
+@@ -336,7 +339,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
+ wmi_cmd_to_name(cmd_id));
+ mutex_unlock(&wmi->op_mutex);
+- kfree_skb(skb);
+ return -ETIMEDOUT;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
+index 380175d5ecd7..d8b912206232 100644
+--- a/drivers/net/wireless/ath/ath9k/wmi.h
++++ b/drivers/net/wireless/ath/ath9k/wmi.h
+@@ -179,7 +179,6 @@ struct wmi {
+ };
+
+ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
+-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
+ int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ enum htc_endpoint_id *wmi_ctrl_epid);
+ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+@@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ void ath9k_wmi_event_tasklet(unsigned long data);
+ void ath9k_fatal_work(struct work_struct *work);
+ void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv);
++void ath9k_stop_wmi(struct ath9k_htc_priv *priv);
++void ath9k_destoy_wmi(struct ath9k_htc_priv *priv);
+
+ #define WMI_CMD(_wmi_cmd) \
+ do { \
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index d828ca835a98..fe9fbb74ce72 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4616,10 +4616,10 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
+
+ /*
+ * Some controllers might not implement link active reporting. In this
+- * case, we wait for 1000 + 100 ms.
++ * case, we wait for 1000 ms + any delay requested by the caller.
+ */
+ if (!pdev->link_active_reporting) {
+- msleep(1100);
++ msleep(timeout + delay);
+ return true;
+ }
+
+diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
+index fb088dd8529e..32fa60feaadb 100644
+--- a/drivers/platform/x86/sony-laptop.c
++++ b/drivers/platform/x86/sony-laptop.c
+@@ -757,33 +757,6 @@ static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
+ return result;
+ }
+
+-static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
+- int *result)
+-{
+- union acpi_object *object = NULL;
+- if (value) {
+- u64 v = *value;
+- object = __call_snc_method(handle, name, &v);
+- } else
+- object = __call_snc_method(handle, name, NULL);
+-
+- if (!object)
+- return -EINVAL;
+-
+- if (object->type != ACPI_TYPE_INTEGER) {
+- pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+- ACPI_TYPE_INTEGER, object->type);
+- kfree(object);
+- return -EINVAL;
+- }
+-
+- if (result)
+- *result = object->integer.value;
+-
+- kfree(object);
+- return 0;
+-}
+-
+ #define MIN(a, b) (a > b ? b : a)
+ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+ void *buffer, size_t buflen)
+@@ -795,17 +768,20 @@ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+ if (!object)
+ return -EINVAL;
+
+- if (object->type == ACPI_TYPE_BUFFER) {
++ if (!buffer) {
++ /* do nothing */
++ } else if (object->type == ACPI_TYPE_BUFFER) {
+ len = MIN(buflen, object->buffer.length);
++ memset(buffer, 0, buflen);
+ memcpy(buffer, object->buffer.pointer, len);
+
+ } else if (object->type == ACPI_TYPE_INTEGER) {
+ len = MIN(buflen, sizeof(object->integer.value));
++ memset(buffer, 0, buflen);
+ memcpy(buffer, &object->integer.value, len);
+
+ } else {
+- pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+- ACPI_TYPE_BUFFER, object->type);
++ pr_warn("Unexpected acpi_object: 0x%x\n", object->type);
+ ret = -EINVAL;
+ }
+
+@@ -813,6 +789,23 @@ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+ return ret;
+ }
+
++static int sony_nc_int_call(acpi_handle handle, char *name, int *value, int
++ *result)
++{
++ int ret;
++
++ if (value) {
++ u64 v = *value;
++
++ ret = sony_nc_buffer_call(handle, name, &v, result,
++ sizeof(*result));
++ } else {
++ ret = sony_nc_buffer_call(handle, name, NULL, result,
++ sizeof(*result));
++ }
++ return ret;
++}
++
+ struct sony_nc_handles {
+ u16 cap[0x10];
+ struct device_attribute devattr;
+@@ -2295,7 +2288,12 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd)
+ #ifdef CONFIG_PM_SLEEP
+ static void sony_nc_thermal_resume(void)
+ {
+- unsigned int status = sony_nc_thermal_mode_get();
++ int status;
++
++ if (!th_handle)
++ return;
++
++ status = sony_nc_thermal_mode_get();
+
+ if (status != th_handle->mode)
+ sony_nc_thermal_mode_set(th_handle->mode);
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index 097f33e4f1f3..ba18f32bd0c4 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -510,7 +510,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
+
+ /* Initialise vdev subdevice */
+ snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
+- rvdev->dev.parent = rproc->dev.parent;
++ rvdev->dev.parent = &rproc->dev;
+ rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset;
+ rvdev->dev.release = rproc_rvdev_release;
+ dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
+diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
+index 31a62a0b470e..380d52672035 100644
+--- a/drivers/remoteproc/remoteproc_virtio.c
++++ b/drivers/remoteproc/remoteproc_virtio.c
+@@ -375,6 +375,18 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
+ goto out;
+ }
+ }
++ } else {
++ struct device_node *np = rproc->dev.parent->of_node;
++
++ /*
++ * If we don't have dedicated buffer, just attempt to re-assign
++ * the reserved memory from our parent. A default memory-region
++ * at index 0 from the parent's memory-regions is assigned for
++ * the rvdev dev to allocate from. Failure is non-critical and
++ * the allocations will fall back to global pools, so don't
++ * check return value either.
++ */
++ of_reserved_mem_device_init_by_idx(dev, np, 0);
+ }
+
+ /* Allocate virtio device */
+diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
+index 58b35a1442c1..001b319a30ee 100644
+--- a/drivers/scsi/lpfc/lpfc_ct.c
++++ b/drivers/scsi/lpfc/lpfc_ct.c
+@@ -462,7 +462,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
+ struct lpfc_nodelist *ndlp;
+
+ if ((vport->port_type != LPFC_NPIV_PORT) ||
+- (fc4_type == FC_TYPE_FCP) ||
+ !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
+
+ ndlp = lpfc_setup_disc_node(vport, Did);
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index 83d8c4cb1ad5..98827363bc49 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -511,7 +511,7 @@ union MR_PROGRESS {
+ */
+ struct MR_PD_PROGRESS {
+ struct {
+-#ifndef MFI_BIG_ENDIAN
++#ifndef __BIG_ENDIAN_BITFIELD
+ u32 rbld:1;
+ u32 patrol:1;
+ u32 clear:1;
+@@ -537,7 +537,7 @@ struct MR_PD_PROGRESS {
+ };
+
+ struct {
+-#ifndef MFI_BIG_ENDIAN
++#ifndef __BIG_ENDIAN_BITFIELD
+ u32 rbld:1;
+ u32 patrol:1;
+ u32 clear:1;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index b2ad96564484..03a6c86475c8 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -4238,6 +4238,7 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd_mfi;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
++ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
+ u16 smid;
+ bool refire_cmd = 0;
+ u8 result;
+@@ -4305,6 +4306,11 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
+ result = COMPLETE_CMD;
+ }
+
++ scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
++ cmd_fusion->io_request;
++ if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
++ result = RETURN_CMD;
++
+ switch (result) {
+ case REFIRE_CMD:
+ megasas_fire_cmd_fusion(instance, req_desc);
+@@ -4533,7 +4539,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
+ if (!timeleft) {
+ dev_err(&instance->pdev->dev,
+ "task mgmt type 0x%x timed out\n", type);
+- cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
+ rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
+index d57ecc7f88d8..30de4b01f703 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
+@@ -774,7 +774,7 @@ struct MR_SPAN_BLOCK_INFO {
+ struct MR_CPU_AFFINITY_MASK {
+ union {
+ struct {
+-#ifndef MFI_BIG_ENDIAN
++#ifndef __BIG_ENDIAN_BITFIELD
+ u8 hw_path:1;
+ u8 cpu0:1;
+ u8 cpu1:1;
+@@ -866,7 +866,7 @@ struct MR_LD_RAID {
+ __le16 seqNum;
+
+ struct {
+-#ifndef MFI_BIG_ENDIAN
++#ifndef __BIG_ENDIAN_BITFIELD
+ u32 ldSyncRequired:1;
+ u32 regTypeReqOnReadIsValid:1;
+ u32 isEPD:1;
+@@ -889,7 +889,7 @@ struct {
+ /* 0x30 - 0x33, Logical block size for the LD */
+ u32 logical_block_length;
+ struct {
+-#ifndef MFI_BIG_ENDIAN
++#ifndef __BIG_ENDIAN_BITFIELD
+ /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
+ u32 ld_pi_exp:4;
+ /* 0x34, LOGICAL BLOCKS PER PHYSICAL
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index 23d295f36c80..c64be5e8fb8a 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -670,7 +670,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
+ if (buf)
+ buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
+ dev_dbg(&qspi->pdev->dev, "RD %02x\n",
+- buf ? buf[tp.byte] : 0xff);
++ buf ? buf[tp.byte] : 0x0);
+ } else {
+ u16 *buf = tp.trans->rx_buf;
+
+@@ -678,7 +678,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
+ buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
+ slot);
+ dev_dbg(&qspi->pdev->dev, "RD %04x\n",
+- buf ? buf[tp.byte] : 0xffff);
++ buf ? buf[tp.byte / 2] : 0x0);
+ }
+
+ update_qspi_trans_byte_count(qspi, &tp,
+@@ -733,13 +733,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
+ while (!tstatus && slot < MSPI_NUM_CDRAM) {
+ if (tp.trans->bits_per_word <= 8) {
+ const u8 *buf = tp.trans->tx_buf;
+- u8 val = buf ? buf[tp.byte] : 0xff;
++ u8 val = buf ? buf[tp.byte] : 0x00;
+
+ write_txram_slot_u8(qspi, slot, val);
+ dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
+ } else {
+ const u16 *buf = tp.trans->tx_buf;
+- u16 val = buf ? buf[tp.byte / 2] : 0xffff;
++ u16 val = buf ? buf[tp.byte / 2] : 0x0000;
+
+ write_txram_slot_u16(qspi, slot, val);
+ dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
+@@ -1222,6 +1222,11 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ }
+
+ qspi = spi_master_get_devdata(master);
++
++ qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
++ if (IS_ERR(qspi->clk))
++ return PTR_ERR(qspi->clk);
++
+ qspi->pdev = pdev;
+ qspi->trans_pos.trans = NULL;
+ qspi->trans_pos.byte = 0;
+@@ -1335,13 +1340,6 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ qspi->soc_intc = NULL;
+ }
+
+- qspi->clk = devm_clk_get(&pdev->dev, NULL);
+- if (IS_ERR(qspi->clk)) {
+- dev_warn(dev, "unable to get clock\n");
+- ret = PTR_ERR(qspi->clk);
+- goto qspi_probe_err;
+- }
+-
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index 11c235879bb7..fd887a6492f4 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -1347,7 +1347,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
+ goto out_dma_release;
+ }
+
+- err = devm_spi_register_controller(&pdev->dev, ctlr);
++ err = spi_register_controller(ctlr);
+ if (err) {
+ dev_err(&pdev->dev, "could not register SPI controller: %d\n",
+ err);
+@@ -1374,6 +1374,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
+
+ bcm2835_debugfs_remove(bs);
+
++ spi_unregister_controller(ctlr);
++
+ /* Clear FIFOs, and disable the HW block */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
+index a2162ff56a12..c331efd6e86b 100644
+--- a/drivers/spi/spi-bcm2835aux.c
++++ b/drivers/spi/spi-bcm2835aux.c
+@@ -569,7 +569,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
+ goto out_clk_disable;
+ }
+
+- err = devm_spi_register_master(&pdev->dev, master);
++ err = spi_register_master(master);
+ if (err) {
+ dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
+ goto out_clk_disable;
+@@ -593,6 +593,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev)
+
+ bcm2835aux_debugfs_remove(bs);
+
++ spi_unregister_master(master);
++
+ bcm2835aux_spi_reset_hw(bs);
+
+ /* disable the HW block by releasing the clock */
+diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
+index 31e3f866d11a..dbf9b8d5cebe 100644
+--- a/drivers/spi/spi-dw.c
++++ b/drivers/spi/spi-dw.c
+@@ -128,12 +128,20 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
+ {
+ struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
+ struct chip_data *chip = spi_get_ctldata(spi);
++ bool cs_high = !!(spi->mode & SPI_CS_HIGH);
+
+ /* Chip select logic is inverted from spi_set_cs() */
+ if (chip && chip->cs_control)
+ chip->cs_control(!enable);
+
+- if (!enable)
++ /*
++ * DW SPI controller demands any native CS being set in order to
++ * proceed with data transfer. So in order to activate the SPI
++ * communications we must set a corresponding bit in the Slave
++ * Enable register no matter whether the SPI core is configured to
++ * support active-high or active-low CS level.
++ */
++ if (cs_high == enable)
+ dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
+ else if (dws->cs_override)
+ dw_writel(dws, DW_SPI_SER, 0);
+@@ -526,7 +534,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
+ }
+ }
+
+- ret = devm_spi_register_controller(dev, master);
++ ret = spi_register_controller(master);
+ if (ret) {
+ dev_err(&master->dev, "problem registering spi master\n");
+ goto err_dma_exit;
+@@ -550,6 +558,8 @@ void dw_spi_remove_host(struct dw_spi *dws)
+ {
+ dw_spi_debugfs_remove(dws);
+
++ spi_unregister_controller(dws->master);
++
+ if (dws->dma_ops && dws->dma_ops->dma_exit)
+ dws->dma_ops->dma_exit(dws);
+
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index 2e318158fca9..5f8eb2589595 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1879,7 +1879,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
+
+ /* Register with the SPI framework */
+ platform_set_drvdata(pdev, drv_data);
+- status = devm_spi_register_controller(&pdev->dev, controller);
++ status = spi_register_controller(controller);
+ if (status != 0) {
+ dev_err(&pdev->dev, "problem registering spi controller\n");
+ goto out_error_pm_runtime_enabled;
+@@ -1888,7 +1888,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
+ return status;
+
+ out_error_pm_runtime_enabled:
+- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ out_error_clock_enabled:
+@@ -1915,6 +1914,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
+
+ pm_runtime_get_sync(&pdev->dev);
+
++ spi_unregister_controller(drv_data->controller);
++
+ /* Disable the SSP at the peripheral and SOC level */
+ pxa2xx_spi_write(drv_data, SSCR0, 0);
+ clk_disable_unprepare(ssp->clk);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 755221bc3745..1fc29a665a4a 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -2768,6 +2768,8 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ struct spi_controller *found;
+ int id = ctlr->bus_num;
+
++ device_for_each_child(&ctlr->dev, NULL, __unregister);
++
+ /* First make sure that this controller was ever added */
+ mutex_lock(&board_lock);
+ found = idr_find(&spi_master_idr, id);
+@@ -2780,7 +2782,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ list_del(&ctlr->list);
+ mutex_unlock(&board_lock);
+
+- device_for_each_child(&ctlr->dev, NULL, __unregister);
+ device_unregister(&ctlr->dev);
+ /* free bus id */
+ mutex_lock(&board_lock);
+diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
+index a1dafec0890a..6eb7436af462 100644
+--- a/drivers/staging/mt7621-pci/pci-mt7621.c
++++ b/drivers/staging/mt7621-pci/pci-mt7621.c
+@@ -479,17 +479,25 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
+
+ mt7621_perst_gpio_pcie_deassert(pcie);
+
++ tmp = NULL;
+ list_for_each_entry(port, &pcie->ports, list) {
+ u32 slot = port->slot;
+
+ if (!mt7621_pcie_port_is_linkup(port)) {
+ dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
+ slot);
+- if (slot != 1)
+- phy_power_off(port->phy);
+ mt7621_control_assert(port);
+ mt7621_pcie_port_clk_disable(port);
+ port->enabled = false;
++
++ if (slot == 0) {
++ tmp = port;
++ continue;
++ }
++
++ if (slot == 1 && tmp && !tmp->enabled)
++ phy_power_off(tmp->phy);
++
+ }
+ }
+
+diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
+index 76b2ff7fc7fe..2c757b81efa9 100644
+--- a/drivers/staging/wfx/main.c
++++ b/drivers/staging/wfx/main.c
+@@ -466,7 +466,6 @@ int wfx_probe(struct wfx_dev *wdev)
+
+ err2:
+ ieee80211_unregister_hw(wdev->hw);
+- ieee80211_free_hw(wdev->hw);
+ err1:
+ wfx_bh_unregister(wdev);
+ return err;
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 9fc7e374a29b..59379d662626 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4301,30 +4301,37 @@ int iscsit_close_connection(
+ if (!atomic_read(&sess->session_reinstatement) &&
+ atomic_read(&sess->session_fall_back_to_erl0)) {
+ spin_unlock_bh(&sess->conn_lock);
++ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess);
+
+ return 0;
+ } else if (atomic_read(&sess->session_logout)) {
+ pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+ sess->session_state = TARG_SESS_STATE_FREE;
+- spin_unlock_bh(&sess->conn_lock);
+
+- if (atomic_read(&sess->sleep_on_sess_wait_comp))
+- complete(&sess->session_wait_comp);
++ if (atomic_read(&sess->session_close)) {
++ spin_unlock_bh(&sess->conn_lock);
++ complete_all(&sess->session_wait_comp);
++ iscsit_close_session(sess);
++ } else {
++ spin_unlock_bh(&sess->conn_lock);
++ }
+
+ return 0;
+ } else {
+ pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+ sess->session_state = TARG_SESS_STATE_FAILED;
+
+- if (!atomic_read(&sess->session_continuation)) {
+- spin_unlock_bh(&sess->conn_lock);
++ if (!atomic_read(&sess->session_continuation))
+ iscsit_start_time2retain_handler(sess);
+- } else
+- spin_unlock_bh(&sess->conn_lock);
+
+- if (atomic_read(&sess->sleep_on_sess_wait_comp))
+- complete(&sess->session_wait_comp);
++ if (atomic_read(&sess->session_close)) {
++ spin_unlock_bh(&sess->conn_lock);
++ complete_all(&sess->session_wait_comp);
++ iscsit_close_session(sess);
++ } else {
++ spin_unlock_bh(&sess->conn_lock);
++ }
+
+ return 0;
+ }
+@@ -4429,9 +4436,9 @@ static void iscsit_logout_post_handler_closesession(
+ complete(&conn->conn_logout_comp);
+
+ iscsit_dec_conn_usage_count(conn);
++ atomic_set(&sess->session_close, 1);
+ iscsit_stop_session(sess, sleep, sleep);
+ iscsit_dec_session_usage_count(sess);
+- iscsit_close_session(sess);
+ }
+
+ static void iscsit_logout_post_handler_samecid(
+@@ -4566,49 +4573,6 @@ void iscsit_fail_session(struct iscsi_session *sess)
+ sess->session_state = TARG_SESS_STATE_FAILED;
+ }
+
+-int iscsit_free_session(struct iscsi_session *sess)
+-{
+- u16 conn_count = atomic_read(&sess->nconn);
+- struct iscsi_conn *conn, *conn_tmp = NULL;
+- int is_last;
+-
+- spin_lock_bh(&sess->conn_lock);
+- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+-
+- list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+- conn_list) {
+- if (conn_count == 0)
+- break;
+-
+- if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+- is_last = 1;
+- } else {
+- iscsit_inc_conn_usage_count(conn_tmp);
+- is_last = 0;
+- }
+- iscsit_inc_conn_usage_count(conn);
+-
+- spin_unlock_bh(&sess->conn_lock);
+- iscsit_cause_connection_reinstatement(conn, 1);
+- spin_lock_bh(&sess->conn_lock);
+-
+- iscsit_dec_conn_usage_count(conn);
+- if (is_last == 0)
+- iscsit_dec_conn_usage_count(conn_tmp);
+-
+- conn_count--;
+- }
+-
+- if (atomic_read(&sess->nconn)) {
+- spin_unlock_bh(&sess->conn_lock);
+- wait_for_completion(&sess->session_wait_comp);
+- } else
+- spin_unlock_bh(&sess->conn_lock);
+-
+- iscsit_close_session(sess);
+- return 0;
+-}
+-
+ void iscsit_stop_session(
+ struct iscsi_session *sess,
+ int session_sleep,
+@@ -4619,8 +4583,6 @@ void iscsit_stop_session(
+ int is_last;
+
+ spin_lock_bh(&sess->conn_lock);
+- if (session_sleep)
+- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+ if (connection_sleep) {
+ list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+@@ -4678,12 +4640,15 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
++ atomic_read(&sess->session_close) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
+ continue;
+ }
++ iscsit_inc_session_usage_count(sess);
+ atomic_set(&sess->session_reinstatement, 1);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
++ atomic_set(&sess->session_close, 1);
+ spin_unlock(&sess->conn_lock);
+
+ list_move_tail(&se_sess->sess_list, &free_list);
+@@ -4693,7 +4658,9 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+- iscsit_free_session(sess);
++ list_del_init(&se_sess->sess_list);
++ iscsit_stop_session(sess, 1, 1);
++ iscsit_dec_session_usage_count(sess);
+ session_count++;
+ }
+
+diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
+index c95f56a3ce31..7409ce2a6607 100644
+--- a/drivers/target/iscsi/iscsi_target.h
++++ b/drivers/target/iscsi/iscsi_target.h
+@@ -43,7 +43,6 @@ extern int iscsi_target_rx_thread(void *);
+ extern int iscsit_close_connection(struct iscsi_conn *);
+ extern int iscsit_close_session(struct iscsi_session *);
+ extern void iscsit_fail_session(struct iscsi_session *);
+-extern int iscsit_free_session(struct iscsi_session *);
+ extern void iscsit_stop_session(struct iscsi_session *, int, int);
+ extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
+
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 42b369fc415e..0fa1d57b26fa 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1476,20 +1476,23 @@ static void lio_tpg_close_session(struct se_session *se_sess)
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
++ atomic_read(&sess->session_close) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
+ spin_unlock_bh(&se_tpg->session_lock);
+ return;
+ }
++ iscsit_inc_session_usage_count(sess);
+ atomic_set(&sess->session_reinstatement, 1);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
++ atomic_set(&sess->session_close, 1);
+ spin_unlock(&sess->conn_lock);
+
+ iscsit_stop_time2retain_timer(sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ iscsit_stop_session(sess, 1, 1);
+- iscsit_close_session(sess);
++ iscsit_dec_session_usage_count(sess);
+ }
+
+ static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index f53330813207..731ee67fe914 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -156,6 +156,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+ spin_lock(&sess_p->conn_lock);
+ if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+ atomic_read(&sess_p->session_logout) ||
++ atomic_read(&sess_p->session_close) ||
+ (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess_p->conn_lock);
+ continue;
+@@ -166,6 +167,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+ (sess_p->sess_ops->SessionType == sessiontype))) {
+ atomic_set(&sess_p->session_reinstatement, 1);
+ atomic_set(&sess_p->session_fall_back_to_erl0, 1);
++ atomic_set(&sess_p->session_close, 1);
+ spin_unlock(&sess_p->conn_lock);
+ iscsit_inc_session_usage_count(sess_p);
+ iscsit_stop_time2retain_timer(sess_p);
+@@ -190,7 +192,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+ if (sess->session_state == TARG_SESS_STATE_FAILED) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_dec_session_usage_count(sess);
+- iscsit_close_session(sess);
+ return 0;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+@@ -198,7 +199,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+
+- iscsit_close_session(sess);
+ return 0;
+ }
+
+@@ -486,6 +486,7 @@ static int iscsi_login_non_zero_tsih_s2(
+ sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+ atomic_read(&sess_p->session_logout) ||
++ atomic_read(&sess_p->session_close) ||
+ (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
+ continue;
+ if (!memcmp(sess_p->isid, pdu->isid, 6) &&
+diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
+index f744479dc7df..c61476247ba8 100644
+--- a/drivers/video/fbdev/vt8500lcdfb.c
++++ b/drivers/video/fbdev/vt8500lcdfb.c
+@@ -230,6 +230,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
+ info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
+ for (i = 0; i < 256; i++)
+ vt8500lcd_setcolreg(i, 0, 0, 0, 0, info);
++ fallthrough;
+ case FB_BLANK_UNBLANK:
+ if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
+ info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
+diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
+index ad26cbffbc6f..0c2c0963aeb8 100644
+--- a/drivers/video/fbdev/w100fb.c
++++ b/drivers/video/fbdev/w100fb.c
+@@ -588,6 +588,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
+ memsize=par->mach->mem->size;
+ memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize);
+ vfree(par->saved_extmem);
++ par->saved_extmem = NULL;
+ }
+ if (par->saved_intmem) {
+ memsize=MEM_INT_SIZE;
+@@ -596,6 +597,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
+ else
+ memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize);
+ vfree(par->saved_intmem);
++ par->saved_intmem = NULL;
+ }
+ }
+
+diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
+index 8ed89f032ebf..e0e62149a6f4 100644
+--- a/drivers/watchdog/imx_sc_wdt.c
++++ b/drivers/watchdog/imx_sc_wdt.c
+@@ -177,6 +177,11 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
+ wdog->timeout = DEFAULT_TIMEOUT;
+
+ watchdog_init_timeout(wdog, 0, dev);
++
++ ret = imx_sc_wdt_set_timeout(wdog, wdog->timeout);
++ if (ret)
++ return ret;
++
+ watchdog_stop_on_reboot(wdog);
+ watchdog_stop_on_unregister(wdog);
+
+diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
+index c57c71b7d53d..ffe9bd843922 100644
+--- a/drivers/xen/pvcalls-back.c
++++ b/drivers/xen/pvcalls-back.c
+@@ -1087,7 +1087,8 @@ static void set_backend_state(struct xenbus_device *dev,
+ case XenbusStateInitialised:
+ switch (state) {
+ case XenbusStateConnected:
+- backend_connect(dev);
++ if (backend_connect(dev))
++ return;
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+ case XenbusStateClosing:
+diff --git a/fs/aio.c b/fs/aio.c
+index 5f3d3d814928..6483f9274d5e 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -176,6 +176,7 @@ struct fsync_iocb {
+ struct file *file;
+ struct work_struct work;
+ bool datasync;
++ struct cred *creds;
+ };
+
+ struct poll_iocb {
+@@ -1589,8 +1590,11 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb,
+ static void aio_fsync_work(struct work_struct *work)
+ {
+ struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
++ const struct cred *old_cred = override_creds(iocb->fsync.creds);
+
+ iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
++ revert_creds(old_cred);
++ put_cred(iocb->fsync.creds);
+ iocb_put(iocb);
+ }
+
+@@ -1604,6 +1608,10 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
+ if (unlikely(!req->file->f_op->fsync))
+ return -EINVAL;
+
++ req->creds = prepare_creds();
++ if (!req->creds)
++ return -ENOMEM;
++
+ req->datasync = datasync;
+ INIT_WORK(&req->work, aio_fsync_work);
+ schedule_work(&req->work);
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index fa77fe5258b0..82d5ea522c33 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -621,7 +621,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
+
+ if (tcon->ses->chan_max > 1)
+- seq_printf(s, ",multichannel,max_channel=%zu",
++ seq_printf(s, ",multichannel,max_channels=%zu",
+ tcon->ses->chan_max);
+
+ return 0;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 28c0be5e69b7..d9160eaa9e32 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2868,7 +2868,9 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ * response size smaller.
+ */
+ req->MaxOutputResponse = cpu_to_le32(max_response_size);
+-
++ req->sync_hdr.CreditCharge =
++ cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
++ SMB2_MAX_BUFFER_SIZE));
+ if (is_fsctl)
+ req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
+ else
+diff --git a/fs/fat/inode.c b/fs/fat/inode.c
+index 71946da84388..bf8e04e25f35 100644
+--- a/fs/fat/inode.c
++++ b/fs/fat/inode.c
+@@ -1520,6 +1520,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b,
+ goto out;
+ }
+
++ if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) {
++ if (!silent)
++ fat_msg(sb, KERN_ERR, "bogus number of FAT sectors");
++ goto out;
++ }
++
+ error = 0;
+
+ out:
+diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
+index 3a020bdc358c..966ed37c9acd 100644
+--- a/fs/gfs2/lops.c
++++ b/fs/gfs2/lops.c
+@@ -505,12 +505,12 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
+ unsigned int bsize = sdp->sd_sb.sb_bsize, off;
+ unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
+ unsigned int shift = PAGE_SHIFT - bsize_shift;
+- unsigned int max_bio_size = 2 * 1024 * 1024;
++ unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
+ struct gfs2_journal_extent *je;
+ int sz, ret = 0;
+ struct bio *bio = NULL;
+ struct page *page = NULL;
+- bool bio_chained = false, done = false;
++ bool done = false;
+ errseq_t since;
+
+ memset(head, 0, sizeof(*head));
+@@ -533,10 +533,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
+ off = 0;
+ }
+
+- if (!bio || (bio_chained && !off) ||
+- bio->bi_iter.bi_size >= max_bio_size) {
+- /* start new bio */
+- } else {
++ if (bio && (off || block < blocks_submitted + max_blocks)) {
+ sector_t sector = dblock << sdp->sd_fsb2bb_shift;
+
+ if (bio_end_sector(bio) == sector) {
+@@ -549,19 +546,17 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
+ (PAGE_SIZE - off) >> bsize_shift;
+
+ bio = gfs2_chain_bio(bio, blocks);
+- bio_chained = true;
+ goto add_block_to_new_bio;
+ }
+ }
+
+ if (bio) {
+- blocks_submitted = block + 1;
++ blocks_submitted = block;
+ submit_bio(bio);
+ }
+
+ bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
+ bio->bi_opf = REQ_OP_READ;
+- bio_chained = false;
+ add_block_to_new_bio:
+ sz = bio_add_page(bio, page, bsize, off);
+ BUG_ON(sz != bsize);
+@@ -569,7 +564,7 @@ block_added:
+ off += bsize;
+ if (off == PAGE_SIZE)
+ page = NULL;
+- if (blocks_submitted < 2 * max_bio_size >> bsize_shift) {
++ if (blocks_submitted <= blocks_read + max_blocks) {
+ /* Keep at least one bio in flight */
+ continue;
+ }
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index c6e1f76a6ee0..8276c3c42894 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -6254,8 +6254,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
+
+ ret = 0;
+ if (!pages || nr_pages > got_pages) {
+- kfree(vmas);
+- kfree(pages);
++ kvfree(vmas);
++ kvfree(pages);
+ pages = kvmalloc_array(nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ vmas = kvmalloc_array(nr_pages,
+@@ -6488,11 +6488,9 @@ static int io_uring_release(struct inode *inode, struct file *file)
+ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+ {
+- struct io_kiocb *req;
+- DEFINE_WAIT(wait);
+-
+ while (!list_empty_careful(&ctx->inflight_list)) {
+- struct io_kiocb *cancel_req = NULL;
++ struct io_kiocb *cancel_req = NULL, *req;
++ DEFINE_WAIT(wait);
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
+@@ -6531,7 +6529,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ * all we had, then we're done with this request.
+ */
+ if (refcount_sub_and_test(2, &cancel_req->refs)) {
+- io_put_req(cancel_req);
++ io_free_req(cancel_req);
++ finish_wait(&ctx->inflight_wait, &wait);
+ continue;
+ }
+ }
+@@ -6539,8 +6538,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
+ io_put_req(cancel_req);
+ schedule();
++ finish_wait(&ctx->inflight_wait, &wait);
+ }
+- finish_wait(&ctx->inflight_wait, &wait);
+ }
+
+ static int io_uring_flush(struct file *file, void *data)
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 445eef41bfaf..91b58c897f92 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2780,6 +2780,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
+ if (!nilfs->ns_writer)
+ return -ENOMEM;
+
++ inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
++
+ err = nilfs_segctor_start_thread(nilfs->ns_writer);
+ if (err) {
+ kfree(nilfs->ns_writer);
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index deb13f0a0f7d..d24548ed31b9 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -171,6 +171,10 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
+ if (!fsnotify_iter_should_report_type(iter_info, type))
+ continue;
+ mark = iter_info->marks[type];
++
++ /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */
++ marks_ignored_mask |= mark->ignored_mask;
++
+ /*
+ * If the event is on dir and this mark doesn't care about
+ * events on dir, don't send it!
+@@ -188,7 +192,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
+ continue;
+
+ marks_mask |= mark->mask;
+- marks_ignored_mask |= mark->ignored_mask;
+ }
+
+ test_mask = event_mask & marks_mask & ~marks_ignored_mask;
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 9fc47c2e078d..3190dac8f330 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -40,7 +40,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
+ {
+ ssize_t list_size, size, value_size = 0;
+ char *buf, *name, *value = NULL;
+- int uninitialized_var(error);
++ int error = 0;
+ size_t slen;
+
+ if (!(old->d_inode->i_opflags & IOP_XATTR) ||
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 3d3f2b8bdae5..c2424330209a 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -339,6 +339,9 @@ int ovl_check_fb_len(struct ovl_fb *fb, int fb_len);
+
+ static inline int ovl_check_fh_len(struct ovl_fh *fh, int fh_len)
+ {
++ if (fh_len < sizeof(struct ovl_fh))
++ return -EINVAL;
++
+ return ovl_check_fb_len(&fh->fb, fh_len - OVL_FH_WIRE_OFFSET);
+ }
+
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 6da18316d209..36b6819f12fe 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -448,7 +448,7 @@ const struct inode_operations proc_link_inode_operations = {
+
+ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ {
+- struct inode *inode = new_inode_pseudo(sb);
++ struct inode *inode = new_inode(sb);
+
+ if (inode) {
+ inode->i_ino = de->low_ino;
+diff --git a/fs/proc/self.c b/fs/proc/self.c
+index 57c0a1047250..32af065397f8 100644
+--- a/fs/proc/self.c
++++ b/fs/proc/self.c
+@@ -43,7 +43,7 @@ int proc_setup_self(struct super_block *s)
+ inode_lock(root_inode);
+ self = d_alloc_name(s->s_root, "self");
+ if (self) {
+- struct inode *inode = new_inode_pseudo(s);
++ struct inode *inode = new_inode(s);
+ if (inode) {
+ inode->i_ino = self_inum;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
+index f61ae53533f5..fac9e50b33a6 100644
+--- a/fs/proc/thread_self.c
++++ b/fs/proc/thread_self.c
+@@ -43,7 +43,7 @@ int proc_setup_thread_self(struct super_block *s)
+ inode_lock(root_inode);
+ thread_self = d_alloc_name(s->s_root, "thread-self");
+ if (thread_self) {
+- struct inode *inode = new_inode_pseudo(s);
++ struct inode *inode = new_inode(s);
+ if (inode) {
+ inode->i_ino = thread_self_inum;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index e00f41aa8ec4..39da8d8b561d 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -535,6 +535,7 @@
+ \
+ RO_EXCEPTION_TABLE \
+ NOTES \
++ BTF \
+ \
+ . = ALIGN((align)); \
+ __end_rodata = .;
+@@ -621,6 +622,20 @@
+ __stop___ex_table = .; \
+ }
+
++/*
++ * .BTF
++ */
++#ifdef CONFIG_DEBUG_INFO_BTF
++#define BTF \
++ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
++ __start_BTF = .; \
++ *(.BTF) \
++ __stop_BTF = .; \
++ }
++#else
++#define BTF
++#endif
++
+ /*
+ * Init task
+ */
+diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
+index f236f5b931b2..7fdd7f355b52 100644
+--- a/include/linux/elfnote.h
++++ b/include/linux/elfnote.h
+@@ -54,7 +54,7 @@
+ .popsection ;
+
+ #define ELFNOTE(name, type, desc) \
+- ELFNOTE_START(name, type, "") \
++ ELFNOTE_START(name, type, "a") \
+ desc ; \
+ ELFNOTE_END
+
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index b2a7159f66da..67b65176b5f2 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -1394,8 +1394,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ }
+ #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
+
+-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+- unsigned long start, unsigned long end, bool blockable);
++void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
++ unsigned long start, unsigned long end);
+
+ #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
+ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 96deeecd9179..9b9f48489576 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -669,6 +669,7 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
+ }
+
+ extern void kvfree(const void *addr);
++extern void kvfree_sensitive(const void *addr, size_t len);
+
+ /*
+ * Mapcount of compound page as a whole, does not include mapped sub-pages.
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index a0d8b41850b2..693cae9bfe66 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -139,7 +139,8 @@ struct padata_shell {
+ /**
+ * struct padata_instance - The overall control structure.
+ *
+- * @node: Used by CPU hotplug.
++ * @cpu_online_node: Linkage for CPU online callback.
++ * @cpu_dead_node: Linkage for CPU offline callback.
+ * @parallel_wq: The workqueue used for parallel work.
+ * @serial_wq: The workqueue used for serial work.
+ * @pslist: List of padata_shell objects attached to this instance.
+@@ -150,7 +151,8 @@ struct padata_shell {
+ * @flags: padata flags.
+ */
+ struct padata_instance {
+- struct hlist_node node;
++ struct hlist_node cpu_online_node;
++ struct hlist_node cpu_dead_node;
+ struct workqueue_struct *parallel_wq;
+ struct workqueue_struct *serial_wq;
+ struct list_head pslist;
+diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h
+index a67065c403c3..ac01502763bf 100644
+--- a/include/linux/ptdump.h
++++ b/include/linux/ptdump.h
+@@ -14,6 +14,7 @@ struct ptdump_state {
+ /* level is 0:PGD to 4:PTE, or -1 if unknown */
+ void (*note_page)(struct ptdump_state *st, unsigned long addr,
+ int level, unsigned long val);
++ void (*effective_prot)(struct ptdump_state *st, int level, u64 val);
+ const struct ptdump_range *range;
+ };
+
+diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
+index 86281ac7c305..860e0f843c12 100644
+--- a/include/linux/set_memory.h
++++ b/include/linux/set_memory.h
+@@ -26,7 +26,7 @@ static inline int set_direct_map_default_noflush(struct page *page)
+ #endif
+
+ #ifndef set_mce_nospec
+-static inline int set_mce_nospec(unsigned long pfn)
++static inline int set_mce_nospec(unsigned long pfn, bool unmap)
+ {
+ return 0;
+ }
+diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h
+index 5604818d137e..5be313cbf7d7 100644
+--- a/include/media/videobuf2-dma-contig.h
++++ b/include/media/videobuf2-dma-contig.h
+@@ -25,7 +25,7 @@ vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
+ }
+
+ int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size);
+-void vb2_dma_contig_clear_max_seg_size(struct device *dev);
++static inline void vb2_dma_contig_clear_max_seg_size(struct device *dev) { }
+
+ extern const struct vb2_mem_ops vb2_dma_contig_memops;
+
+diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
+index d0019d3395cf..59802eb8d2cc 100644
+--- a/include/net/inet_hashtables.h
++++ b/include/net/inet_hashtables.h
+@@ -185,6 +185,12 @@ static inline spinlock_t *inet_ehash_lockp(
+
+ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
+
++static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
++{
++ kfree(h->lhash2);
++ h->lhash2 = NULL;
++}
++
+ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
+ {
+ kvfree(hashinfo->ehash_locks);
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
+index a49d37140a64..591cd9e4692c 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -676,7 +676,7 @@ struct iscsi_session {
+ atomic_t session_logout;
+ atomic_t session_reinstatement;
+ atomic_t session_stop_active;
+- atomic_t sleep_on_sess_wait_comp;
++ atomic_t session_close;
+ /* connection list */
+ struct list_head sess_conn_list;
+ struct list_head cr_active_list;
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 7787bdcb5d68..ff04f60c78d1 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -3477,8 +3477,8 @@ errout:
+ return ERR_PTR(err);
+ }
+
+-extern char __weak _binary__btf_vmlinux_bin_start[];
+-extern char __weak _binary__btf_vmlinux_bin_end[];
++extern char __weak __start_BTF[];
++extern char __weak __stop_BTF[];
+ extern struct btf *btf_vmlinux;
+
+ #define BPF_MAP_TYPE(_id, _ops)
+@@ -3605,9 +3605,8 @@ struct btf *btf_parse_vmlinux(void)
+ }
+ env->btf = btf;
+
+- btf->data = _binary__btf_vmlinux_bin_start;
+- btf->data_size = _binary__btf_vmlinux_bin_end -
+- _binary__btf_vmlinux_bin_start;
++ btf->data = __start_BTF;
++ btf->data_size = __stop_BTF - __start_BTF;
+
+ err = btf_parse_hdr(env);
+ if (err)
+diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c
+index 7ae5dddd1fe6..3b495773de5a 100644
+--- a/kernel/bpf/sysfs_btf.c
++++ b/kernel/bpf/sysfs_btf.c
+@@ -9,15 +9,15 @@
+ #include <linux/sysfs.h>
+
+ /* See scripts/link-vmlinux.sh, gen_btf() func for details */
+-extern char __weak _binary__btf_vmlinux_bin_start[];
+-extern char __weak _binary__btf_vmlinux_bin_end[];
++extern char __weak __start_BTF[];
++extern char __weak __stop_BTF[];
+
+ static ssize_t
+ btf_vmlinux_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t len)
+ {
+- memcpy(buf, _binary__btf_vmlinux_bin_start + off, len);
++ memcpy(buf, __start_BTF + off, len);
+ return len;
+ }
+
+@@ -30,15 +30,14 @@ static struct kobject *btf_kobj;
+
+ static int __init btf_vmlinux_init(void)
+ {
+- if (!_binary__btf_vmlinux_bin_start)
++ if (!__start_BTF)
+ return 0;
+
+ btf_kobj = kobject_create_and_add("btf", kernel_kobj);
+ if (!btf_kobj)
+ return -ENOMEM;
+
+- bin_attr_btf_vmlinux.size = _binary__btf_vmlinux_bin_end -
+- _binary__btf_vmlinux_bin_start;
++ bin_attr_btf_vmlinux.size = __stop_BTF - __start_BTF;
+
+ return sysfs_create_bin_file(btf_kobj, &bin_attr_btf_vmlinux);
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 29ace472f916..ce9fd7605190 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -93,11 +93,11 @@ static void remote_function(void *data)
+ * @info: the function call argument
+ *
+ * Calls the function @func when the task is currently running. This might
+- * be on the current CPU, which just calls the function directly
++ * be on the current CPU, which just calls the function directly. This will
++ * retry due to any failures in smp_call_function_single(), such as if the
++ * task_cpu() goes offline concurrently.
+ *
+- * returns: @func return value, or
+- * -ESRCH - when the process isn't running
+- * -EAGAIN - when the process moved away
++ * returns @func return value or -ESRCH when the process isn't running
+ */
+ static int
+ task_function_call(struct task_struct *p, remote_function_f func, void *info)
+@@ -110,11 +110,16 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info)
+ };
+ int ret;
+
+- do {
+- ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
+- if (!ret)
+- ret = data.ret;
+- } while (ret == -EAGAIN);
++ for (;;) {
++ ret = smp_call_function_single(task_cpu(p), remote_function,
++ &data, 1);
++ ret = !ret ? data.ret : -EAGAIN;
++
++ if (ret != -EAGAIN)
++ break;
++
++ cond_resched();
++ }
+
+ return ret;
+ }
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 62082597d4a2..fee14ae90d96 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -703,7 +703,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
+ struct padata_instance *pinst;
+ int ret;
+
+- pinst = hlist_entry_safe(node, struct padata_instance, node);
++ pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
+ if (!pinst_has_cpu(pinst, cpu))
+ return 0;
+
+@@ -718,7 +718,7 @@ static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
+ struct padata_instance *pinst;
+ int ret;
+
+- pinst = hlist_entry_safe(node, struct padata_instance, node);
++ pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
+ if (!pinst_has_cpu(pinst, cpu))
+ return 0;
+
+@@ -734,8 +734,9 @@ static enum cpuhp_state hp_online;
+ static void __padata_free(struct padata_instance *pinst)
+ {
+ #ifdef CONFIG_HOTPLUG_CPU
+- cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node);
+- cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
++ &pinst->cpu_dead_node);
++ cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
+ #endif
+
+ WARN_ON(!list_empty(&pinst->pslist));
+@@ -939,9 +940,10 @@ static struct padata_instance *padata_alloc(const char *name,
+ mutex_init(&pinst->lock);
+
+ #ifdef CONFIG_HOTPLUG_CPU
+- cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
++ cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
++ &pinst->cpu_online_node);
+ cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
+- &pinst->node);
++ &pinst->cpu_dead_node);
+ #endif
+
+ put_online_cpus();
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 603d3d3cbf77..efb15f0f464b 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2682,7 +2682,7 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr)
+ /*
+ * We don't care about NUMA placement if we don't have memory.
+ */
+- if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
++ if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
+ return;
+
+ /*
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index 89260aa342d6..972eb01f4d0b 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -740,8 +740,9 @@ int bitmap_parse(const char *start, unsigned int buflen,
+ int chunks = BITS_TO_U32(nmaskbits);
+ u32 *bitmap = (u32 *)maskp;
+ int unset_bit;
++ int chunk;
+
+- while (1) {
++ for (chunk = 0; ; chunk++) {
+ end = bitmap_find_region_reverse(start, end);
+ if (start > end)
+ break;
+@@ -749,7 +750,11 @@ int bitmap_parse(const char *start, unsigned int buflen,
+ if (!chunks--)
+ return -EOVERFLOW;
+
+- end = bitmap_get_x32_reverse(start, end, bitmap++);
++#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
++ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk ^ 1]);
++#else
++ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk]);
++#endif
+ if (IS_ERR(end))
+ return PTR_ERR(end);
+ }
+diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
+index 717c940112f9..8ad5ba2b86e2 100644
+--- a/lib/lzo/lzo1x_compress.c
++++ b/lib/lzo/lzo1x_compress.c
+@@ -268,6 +268,19 @@ m_len_done:
+ *op++ = (M4_MARKER | ((m_off >> 11) & 8)
+ | (m_len - 2));
+ else {
++ if (unlikely(((m_off & 0x403f) == 0x403f)
++ && (m_len >= 261)
++ && (m_len <= 264))
++ && likely(bitstream_version)) {
++ // Under lzo-rle, block copies
++ // for 261 <= length <= 264 and
++ // (distance & 0x80f3) == 0x80f3
++ // can result in ambiguous
++ // output. Adjust length
++ // to 260 to prevent ambiguity.
++ ip -= m_len - 260;
++ m_len = 260;
++ }
+ m_len -= M4_MAX_LEN;
+ *op++ = (M4_MARKER | ((m_off >> 11) & 8));
+ while (unlikely(m_len > 255)) {
+diff --git a/mm/gup.c b/mm/gup.c
+index 1b521e0ac1de..b6a214e405f6 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -176,13 +176,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
+ }
+
+ /*
+- * FOLL_FORCE can write to even unwritable pte's, but only
+- * after we've gone through a COW cycle and they are dirty.
++ * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
++ * but only after we've gone through a COW cycle and they are dirty.
+ */
+ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+ {
+- return pte_write(pte) ||
+- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
++ return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
++}
++
++/*
++ * A (separate) COW fault might break the page the other way and
++ * get_user_pages() would return the page from what is now the wrong
++ * VM. So we need to force a COW break at GUP time even for reads.
++ */
++static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
++{
++ return is_cow_mapping(vma->vm_flags) && (flags & (FOLL_GET | FOLL_PIN));
+ }
+
+ static struct page *follow_page_pte(struct vm_area_struct *vma,
+@@ -848,12 +857,18 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ goto out;
+ }
+ if (is_vm_hugetlb_page(vma)) {
++ if (should_force_cow_break(vma, foll_flags))
++ foll_flags |= FOLL_WRITE;
+ i = follow_hugetlb_page(mm, vma, pages, vmas,
+ &start, &nr_pages, i,
+- gup_flags, nonblocking);
++ foll_flags, nonblocking);
+ continue;
+ }
+ }
++
++ if (should_force_cow_break(vma, foll_flags))
++ foll_flags |= FOLL_WRITE;
++
+ retry:
+ /*
+ * If we have a pending SIGKILL, don't keep faulting pages and
+@@ -2364,6 +2379,10 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end)
+ *
+ * If the architecture does not support this function, simply return with no
+ * pages pinned.
++ *
++ * Careful, careful! COW breaking can go either way, so a non-write
++ * access can get ambiguous page results. If you call this function without
++ * 'write' set, you'd better be sure that you're ok with that ambiguity.
+ */
+ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+@@ -2391,6 +2410,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ *
+ * We do not adopt an rcu_read_lock(.) here as we also want to
+ * block IPIs that come from THPs splitting.
++ *
++ * NOTE! We allow read-only gup_fast() here, but you'd better be
++ * careful about possible COW pages. You'll get _a_ COW page, but
++ * not necessarily the one you intended to get depending on what
++ * COW event happens after this. COW may break the page copy in a
++ * random direction.
+ */
+
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
+@@ -2448,10 +2473,17 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
+ if (unlikely(!access_ok((void __user *)start, len)))
+ return -EFAULT;
+
++ /*
++ * The FAST_GUP case requires FOLL_WRITE even for pure reads,
++ * because get_user_pages() may need to cause an early COW in
++ * order to avoid confusing the normal COW routines. So only
++ * targets that are already writable are safe to do by just
++ * looking at the page tables.
++ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
+ gup_fast_permitted(start, end)) {
+ local_irq_disable();
+- gup_pgd_range(addr, end, gup_flags, pages, &nr);
++ gup_pgd_range(addr, end, gup_flags | FOLL_WRITE, pages, &nr);
+ local_irq_enable();
+ ret = nr;
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 24ad53b4dfc0..4ffaeb9dd4af 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1465,13 +1465,12 @@ out_unlock:
+ }
+
+ /*
+- * FOLL_FORCE can write to even unwritable pmd's, but only
+- * after we've gone through a COW cycle and they are dirty.
++ * FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
++ * but only after we've gone through a COW cycle and they are dirty.
+ */
+ static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+ {
+- return pmd_write(pmd) ||
+- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
++ return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
+ }
+
+ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+diff --git a/mm/ptdump.c b/mm/ptdump.c
+index 26208d0d03b7..f4ce916f5602 100644
+--- a/mm/ptdump.c
++++ b/mm/ptdump.c
+@@ -36,6 +36,9 @@ static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
+ return note_kasan_page_table(walk, addr);
+ #endif
+
++ if (st->effective_prot)
++ st->effective_prot(st, 0, pgd_val(val));
++
+ if (pgd_leaf(val))
+ st->note_page(st, addr, 0, pgd_val(val));
+
+@@ -53,6 +56,9 @@ static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
+ return note_kasan_page_table(walk, addr);
+ #endif
+
++ if (st->effective_prot)
++ st->effective_prot(st, 1, p4d_val(val));
++
+ if (p4d_leaf(val))
+ st->note_page(st, addr, 1, p4d_val(val));
+
+@@ -70,6 +76,9 @@ static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
+ return note_kasan_page_table(walk, addr);
+ #endif
+
++ if (st->effective_prot)
++ st->effective_prot(st, 2, pud_val(val));
++
+ if (pud_leaf(val))
+ st->note_page(st, addr, 2, pud_val(val));
+
+@@ -87,6 +96,8 @@ static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
+ return note_kasan_page_table(walk, addr);
+ #endif
+
++ if (st->effective_prot)
++ st->effective_prot(st, 3, pmd_val(val));
+ if (pmd_leaf(val))
+ st->note_page(st, addr, 3, pmd_val(val));
+
+@@ -97,8 +108,12 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+ {
+ struct ptdump_state *st = walk->private;
++ pte_t val = READ_ONCE(*pte);
++
++ if (st->effective_prot)
++ st->effective_prot(st, 4, pte_val(val));
+
+- st->note_page(st, addr, 4, pte_val(READ_ONCE(*pte)));
++ st->note_page(st, addr, 4, pte_val(val));
+
+ return 0;
+ }
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 1907cb2903c7..4b045f12177f 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -1303,7 +1303,8 @@ void __init create_kmalloc_caches(slab_flags_t flags)
+ kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
+ kmalloc_info[i].name[KMALLOC_DMA],
+ kmalloc_info[i].size,
+- SLAB_CACHE_DMA | flags, 0, 0);
++ SLAB_CACHE_DMA | flags, 0,
++ kmalloc_info[i].size);
+ }
+ }
+ #endif
+diff --git a/mm/slub.c b/mm/slub.c
+index 3b17e774831a..fd886d24ee29 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -5778,8 +5778,10 @@ static int sysfs_slab_add(struct kmem_cache *s)
+
+ s->kobj.kset = kset;
+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
+- if (err)
++ if (err) {
++ kobject_put(&s->kobj);
+ goto out;
++ }
+
+ err = sysfs_create_group(&s->kobj, &slab_attr_group);
+ if (err)
+diff --git a/mm/util.c b/mm/util.c
+index 988d11e6c17c..dc1c877d5481 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -604,6 +604,24 @@ void kvfree(const void *addr)
+ }
+ EXPORT_SYMBOL(kvfree);
+
++/**
++ * kvfree_sensitive - Free a data object containing sensitive information.
++ * @addr: address of the data object to be freed.
++ * @len: length of the data object.
++ *
++ * Use the special memzero_explicit() function to clear the content of a
++ * kvmalloc'ed object containing sensitive data to make sure that the
++ * compiler won't optimize out the data clearing.
++ */
++void kvfree_sensitive(const void *addr, size_t len)
++{
++ if (likely(!ZERO_OR_NULL_PTR(addr))) {
++ memzero_explicit((void *)addr, len);
++ kvfree(addr);
++ }
++}
++EXPORT_SYMBOL(kvfree_sensitive);
++
+ static inline void *__page_rmapping(struct page *page)
+ {
+ unsigned long mapping;
+diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
+index 37908561a64b..b18cdf03edb3 100644
+--- a/net/bridge/br_arp_nd_proxy.c
++++ b/net/bridge/br_arp_nd_proxy.c
+@@ -276,6 +276,10 @@ static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p,
+ ns_olen = request->len - (skb_network_offset(request) +
+ sizeof(struct ipv6hdr)) - sizeof(*ns);
+ for (i = 0; i < ns_olen - 1; i += (ns->opt[i + 1] << 3)) {
++ if (!ns->opt[i + 1]) {
++ kfree_skb(reply);
++ return;
++ }
+ if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
+ daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
+ break;
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 4af8a98fe784..c13b6609474b 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -1139,14 +1139,14 @@ static int __init dccp_init(void)
+ inet_hashinfo_init(&dccp_hashinfo);
+ rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
+ if (rc)
+- goto out_fail;
++ goto out_free_percpu;
+ rc = -ENOBUFS;
+ dccp_hashinfo.bind_bucket_cachep =
+ kmem_cache_create("dccp_bind_bucket",
+ sizeof(struct inet_bind_bucket), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!dccp_hashinfo.bind_bucket_cachep)
+- goto out_free_percpu;
++ goto out_free_hashinfo2;
+
+ /*
+ * Size and allocate the main established and bind bucket
+@@ -1242,6 +1242,8 @@ out_free_dccp_ehash:
+ free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
+ out_free_bind_bucket_cachep:
+ kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
++out_free_hashinfo2:
++ inet_hashinfo2_free_mod(&dccp_hashinfo);
+ out_free_percpu:
+ percpu_counter_destroy(&dccp_orphan_count);
+ out_fail:
+@@ -1265,6 +1267,7 @@ static void __exit dccp_fini(void)
+ kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
+ dccp_ackvec_exit();
+ dccp_sysctl_exit();
++ inet_hashinfo2_free_mod(&dccp_hashinfo);
+ percpu_counter_destroy(&dccp_orphan_count);
+ }
+
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 18d05403d3b5..5af97b4f5df3 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -183,14 +183,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ retv = -EBUSY;
+ break;
+ }
+- }
+- if (sk->sk_protocol == IPPROTO_TCP &&
+- sk->sk_prot != &tcpv6_prot) {
+- retv = -EBUSY;
++ } else if (sk->sk_protocol == IPPROTO_TCP) {
++ if (sk->sk_prot != &tcpv6_prot) {
++ retv = -EBUSY;
++ break;
++ }
++ } else {
+ break;
+ }
+- if (sk->sk_protocol != IPPROTO_TCP)
+- break;
++
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ retv = -ENOTCONN;
+ break;
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 9f357aa22b94..bcbba0bef1c2 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -513,15 +513,58 @@ static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
+ kfree(attrbuf);
+ }
+
+-static int genl_lock_start(struct netlink_callback *cb)
++struct genl_start_context {
++ const struct genl_family *family;
++ struct nlmsghdr *nlh;
++ struct netlink_ext_ack *extack;
++ const struct genl_ops *ops;
++ int hdrlen;
++};
++
++static int genl_start(struct netlink_callback *cb)
+ {
+- const struct genl_ops *ops = genl_dumpit_info(cb)->ops;
++ struct genl_start_context *ctx = cb->data;
++ const struct genl_ops *ops = ctx->ops;
++ struct genl_dumpit_info *info;
++ struct nlattr **attrs = NULL;
+ int rc = 0;
+
++ if (ops->validate & GENL_DONT_VALIDATE_DUMP)
++ goto no_attrs;
++
++ if (ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
++ return -EINVAL;
++
++ attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
++ ops, ctx->hdrlen,
++ GENL_DONT_VALIDATE_DUMP_STRICT,
++ true);
++ if (IS_ERR(attrs))
++ return PTR_ERR(attrs);
++
++no_attrs:
++ info = genl_dumpit_info_alloc();
++ if (!info) {
++ kfree(attrs);
++ return -ENOMEM;
++ }
++ info->family = ctx->family;
++ info->ops = ops;
++ info->attrs = attrs;
++
++ cb->data = info;
+ if (ops->start) {
+- genl_lock();
++ if (!ctx->family->parallel_ops)
++ genl_lock();
+ rc = ops->start(cb);
+- genl_unlock();
++ if (!ctx->family->parallel_ops)
++ genl_unlock();
++ }
++
++ if (rc) {
++ kfree(attrs);
++ genl_dumpit_info_free(info);
++ cb->data = NULL;
+ }
+ return rc;
+ }
+@@ -548,7 +591,7 @@ static int genl_lock_done(struct netlink_callback *cb)
+ rc = ops->done(cb);
+ genl_unlock();
+ }
+- genl_family_rcv_msg_attrs_free(info->family, info->attrs, true);
++ genl_family_rcv_msg_attrs_free(info->family, info->attrs, false);
+ genl_dumpit_info_free(info);
+ return rc;
+ }
+@@ -573,43 +616,23 @@ static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
+ const struct genl_ops *ops,
+ int hdrlen, struct net *net)
+ {
+- struct genl_dumpit_info *info;
+- struct nlattr **attrs = NULL;
++ struct genl_start_context ctx;
+ int err;
+
+ if (!ops->dumpit)
+ return -EOPNOTSUPP;
+
+- if (ops->validate & GENL_DONT_VALIDATE_DUMP)
+- goto no_attrs;
+-
+- if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+- return -EINVAL;
+-
+- attrs = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
+- ops, hdrlen,
+- GENL_DONT_VALIDATE_DUMP_STRICT,
+- true);
+- if (IS_ERR(attrs))
+- return PTR_ERR(attrs);
+-
+-no_attrs:
+- /* Allocate dumpit info. It is going to be freed by done() callback. */
+- info = genl_dumpit_info_alloc();
+- if (!info) {
+- genl_family_rcv_msg_attrs_free(family, attrs, true);
+- return -ENOMEM;
+- }
+-
+- info->family = family;
+- info->ops = ops;
+- info->attrs = attrs;
++ ctx.family = family;
++ ctx.nlh = nlh;
++ ctx.extack = extack;
++ ctx.ops = ops;
++ ctx.hdrlen = hdrlen;
+
+ if (!family->parallel_ops) {
+ struct netlink_dump_control c = {
+ .module = family->module,
+- .data = info,
+- .start = genl_lock_start,
++ .data = &ctx,
++ .start = genl_start,
+ .dump = genl_lock_dumpit,
+ .done = genl_lock_done,
+ };
+@@ -617,12 +640,11 @@ no_attrs:
+ genl_unlock();
+ err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
+ genl_lock();
+-
+ } else {
+ struct netlink_dump_control c = {
+ .module = family->module,
+- .data = info,
+- .start = ops->start,
++ .data = &ctx,
++ .start = genl_start,
+ .dump = ops->dumpit,
+ .done = genl_parallel_done,
+ };
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 0d515d20b056..bf17b13009d1 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -221,7 +221,7 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
+ accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
+ total = accounted;
+
+- while (rem) {
++ do {
+ if (!skb || skb->len >= mss) {
+ prev = skb;
+ skb = tipc_buf_acquire(mss, GFP_KERNEL);
+@@ -249,7 +249,7 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
+ skb_put(skb, cpy);
+ rem -= cpy;
+ total += msg_blocks(hdr) - curr;
+- }
++ } while (rem);
+ return total - accounted;
+ }
+
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index ac569e197bfa..d09ab4afbda4 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -113,9 +113,6 @@ vmlinux_link()
+ gen_btf()
+ {
+ local pahole_ver
+- local bin_arch
+- local bin_format
+- local bin_file
+
+ if ! [ -x "$(command -v ${PAHOLE})" ]; then
+ echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available"
+@@ -133,17 +130,16 @@ gen_btf()
+ info "BTF" ${2}
+ LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1}
+
+- # dump .BTF section into raw binary file to link with final vmlinux
+- bin_arch=$(LANG=C ${OBJDUMP} -f ${1} | grep architecture | \
+- cut -d, -f1 | cut -d' ' -f2)
+- bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \
+- awk '{print $4}')
+- bin_file=.btf.vmlinux.bin
+- ${OBJCOPY} --change-section-address .BTF=0 \
+- --set-section-flags .BTF=alloc -O binary \
+- --only-section=.BTF ${1} $bin_file
+- ${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \
+- --rename-section .data=.BTF $bin_file ${2}
++ # Create ${2} which contains just .BTF section but no symbols. Add
++ # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
++ # deletes all symbols including __start_BTF and __stop_BTF, which will
++ # be redefined in the linker script. Add 2>/dev/null to suppress GNU
++ # objcopy warnings: "empty loadable segment detected at ..."
++ ${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \
++ --strip-all ${1} ${2} 2>/dev/null
++ # Change e_type to ET_REL so that it can be used to link final vmlinux.
++ # Unlike GNU ld, lld does not allow an ET_EXEC input.
++ printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none
+ }
+
+ # Create ${2} .o file with all symbols from the ${1} object file
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index 6d0ca48ae9a5..153d35c20d3d 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -350,15 +350,4 @@ static inline void key_check(const struct key *key)
+ #define key_check(key) do {} while(0)
+
+ #endif
+-
+-/*
+- * Helper function to clear and free a kvmalloc'ed memory object.
+- */
+-static inline void __kvzfree(const void *addr, size_t len)
+-{
+- if (addr) {
+- memset((void *)addr, 0, len);
+- kvfree(addr);
+- }
+-}
+ #endif /* _INTERNAL_H */
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 5e01192e222a..edde63a63007 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -142,10 +142,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
+
+ key_ref_put(keyring_ref);
+ error3:
+- if (payload) {
+- memzero_explicit(payload, plen);
+- kvfree(payload);
+- }
++ kvfree_sensitive(payload, plen);
+ error2:
+ kfree(description);
+ error:
+@@ -360,7 +357,7 @@ long keyctl_update_key(key_serial_t id,
+
+ key_ref_put(key_ref);
+ error2:
+- __kvzfree(payload, plen);
++ kvfree_sensitive(payload, plen);
+ error:
+ return ret;
+ }
+@@ -914,7 +911,7 @@ can_read_key:
+ */
+ if (ret > key_data_len) {
+ if (unlikely(key_data))
+- __kvzfree(key_data, key_data_len);
++ kvfree_sensitive(key_data, key_data_len);
+ key_data_len = ret;
+ continue; /* Allocate buffer */
+ }
+@@ -923,7 +920,7 @@ can_read_key:
+ ret = -EFAULT;
+ break;
+ }
+- __kvzfree(key_data, key_data_len);
++ kvfree_sensitive(key_data, key_data_len);
+
+ key_put_out:
+ key_put(key);
+@@ -1225,10 +1222,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
+ keyctl_change_reqkey_auth(NULL);
+
+ error2:
+- if (payload) {
+- memzero_explicit(payload, plen);
+- kvfree(payload);
+- }
++ kvfree_sensitive(payload, plen);
+ error:
+ return ret;
+ }
+diff --git a/security/smack/smack.h b/security/smack/smack.h
+index 62529f382942..335d2411abe4 100644
+--- a/security/smack/smack.h
++++ b/security/smack/smack.h
+@@ -148,7 +148,6 @@ struct smk_net4addr {
+ struct smack_known *smk_label; /* label */
+ };
+
+-#if IS_ENABLED(CONFIG_IPV6)
+ /*
+ * An entry in the table identifying IPv6 hosts.
+ */
+@@ -159,9 +158,7 @@ struct smk_net6addr {
+ int smk_masks; /* mask size */
+ struct smack_known *smk_label; /* label */
+ };
+-#endif /* CONFIG_IPV6 */
+
+-#ifdef SMACK_IPV6_PORT_LABELING
+ /*
+ * An entry in the table identifying ports.
+ */
+@@ -174,7 +171,6 @@ struct smk_port_label {
+ short smk_sock_type; /* Socket type */
+ short smk_can_reuse;
+ };
+-#endif /* SMACK_IPV6_PORT_LABELING */
+
+ struct smack_known_list_elem {
+ struct list_head list;
+@@ -335,9 +331,7 @@ extern struct smack_known smack_known_web;
+ extern struct mutex smack_known_lock;
+ extern struct list_head smack_known_list;
+ extern struct list_head smk_net4addr_list;
+-#if IS_ENABLED(CONFIG_IPV6)
+ extern struct list_head smk_net6addr_list;
+-#endif /* CONFIG_IPV6 */
+
+ extern struct mutex smack_onlycap_lock;
+ extern struct list_head smack_onlycap_list;
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 8c61d175e195..14bf2f4aea3b 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -50,10 +50,8 @@
+ #define SMK_RECEIVING 1
+ #define SMK_SENDING 2
+
+-#ifdef SMACK_IPV6_PORT_LABELING
+-DEFINE_MUTEX(smack_ipv6_lock);
++static DEFINE_MUTEX(smack_ipv6_lock);
+ static LIST_HEAD(smk_ipv6_port_list);
+-#endif
+ static struct kmem_cache *smack_inode_cache;
+ struct kmem_cache *smack_rule_cache;
+ int smack_enabled;
+@@ -2320,7 +2318,6 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip)
+ return NULL;
+ }
+
+-#if IS_ENABLED(CONFIG_IPV6)
+ /*
+ * smk_ipv6_localhost - Check for local ipv6 host address
+ * @sip: the address
+@@ -2388,7 +2385,6 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
+
+ return NULL;
+ }
+-#endif /* CONFIG_IPV6 */
+
+ /**
+ * smack_netlabel - Set the secattr on a socket
+@@ -2477,7 +2473,6 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
+ return smack_netlabel(sk, sk_lbl);
+ }
+
+-#if IS_ENABLED(CONFIG_IPV6)
+ /**
+ * smk_ipv6_check - check Smack access
+ * @subject: subject Smack label
+@@ -2510,7 +2505,6 @@ static int smk_ipv6_check(struct smack_known *subject,
+ rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc);
+ return rc;
+ }
+-#endif /* CONFIG_IPV6 */
+
+ #ifdef SMACK_IPV6_PORT_LABELING
+ /**
+@@ -2599,6 +2593,7 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
+ mutex_unlock(&smack_ipv6_lock);
+ return;
+ }
++#endif
+
+ /**
+ * smk_ipv6_port_check - check Smack port access
+@@ -2661,7 +2656,6 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
+
+ return smk_ipv6_check(skp, object, address, act);
+ }
+-#endif /* SMACK_IPV6_PORT_LABELING */
+
+ /**
+ * smack_inode_setsecurity - set smack xattrs
+@@ -2836,24 +2830,21 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
+ return 0;
+ if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
+-#ifdef SMACK_IPV6_SECMARK_LABELING
+- struct smack_known *rsp;
+-#endif
++ struct smack_known *rsp = NULL;
+
+ if (addrlen < SIN6_LEN_RFC2133)
+ return 0;
+-#ifdef SMACK_IPV6_SECMARK_LABELING
+- rsp = smack_ipv6host_label(sip);
++ if (__is_defined(SMACK_IPV6_SECMARK_LABELING))
++ rsp = smack_ipv6host_label(sip);
+ if (rsp != NULL) {
+ struct socket_smack *ssp = sock->sk->sk_security;
+
+ rc = smk_ipv6_check(ssp->smk_out, rsp, sip,
+ SMK_CONNECTING);
+ }
+-#endif
+-#ifdef SMACK_IPV6_PORT_LABELING
+- rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
+-#endif
++ if (__is_defined(SMACK_IPV6_PORT_LABELING))
++ rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
++
+ return rc;
+ }
+ if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in))
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index e3e05c04dbd1..c21b656b3263 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -878,11 +878,21 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ else
+ rule += strlen(skp->smk_known) + 1;
+
++ if (rule > data + count) {
++ rc = -EOVERFLOW;
++ goto out;
++ }
++
+ ret = sscanf(rule, "%d", &maplevel);
+ if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
+ goto out;
+
+ rule += SMK_DIGITLEN;
++ if (rule > data + count) {
++ rc = -EOVERFLOW;
++ goto out;
++ }
++
+ ret = sscanf(rule, "%d", &catlen);
+ if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
+ goto out;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index d5443eeb8b63..c936976e0e7b 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -138,6 +138,16 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
+ }
+ EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
+
++static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_group *group = &substream->self_group;
++
++ if (substream->pcm->nonatomic)
++ mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING);
++ else
++ spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
++}
++
+ /**
+ * snd_pcm_stream_unlock_irq - Unlock the PCM stream
+ * @substream: PCM substream
+@@ -2163,6 +2173,12 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
+ }
+ pcm_file = f.file->private_data;
+ substream1 = pcm_file->substream;
++
++ if (substream == substream1) {
++ res = -EINVAL;
++ goto _badf;
++ }
++
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group) {
+ res = -ENOMEM;
+@@ -2191,7 +2207,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
+ snd_pcm_stream_unlock_irq(substream);
+
+ snd_pcm_group_lock_irq(target_group, nonatomic);
+- snd_pcm_stream_lock(substream1);
++ snd_pcm_stream_lock_nested(substream1);
+ snd_pcm_group_assign(substream1, target_group);
+ refcount_inc(&target_group->refs);
+ snd_pcm_stream_unlock(substream1);
+@@ -2207,7 +2223,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
+
+ static void relink_to_local(struct snd_pcm_substream *substream)
+ {
+- snd_pcm_stream_lock(substream);
++ snd_pcm_stream_lock_nested(substream);
+ snd_pcm_group_assign(substream, &substream->self_group);
+ snd_pcm_stream_unlock(substream);
+ }
+diff --git a/sound/firewire/fireface/ff-protocol-latter.c b/sound/firewire/fireface/ff-protocol-latter.c
+index 0e4c3a9ed5e4..76ae568489ef 100644
+--- a/sound/firewire/fireface/ff-protocol-latter.c
++++ b/sound/firewire/fireface/ff-protocol-latter.c
+@@ -107,18 +107,18 @@ static int latter_allocate_resources(struct snd_ff *ff, unsigned int rate)
+ int err;
+
+ // Set the number of data blocks transferred in a second.
+- if (rate % 32000 == 0)
+- code = 0x00;
++ if (rate % 48000 == 0)
++ code = 0x04;
+ else if (rate % 44100 == 0)
+ code = 0x02;
+- else if (rate % 48000 == 0)
+- code = 0x04;
++ else if (rate % 32000 == 0)
++ code = 0x00;
+ else
+ return -EINVAL;
+
+ if (rate >= 64000 && rate < 128000)
+ code |= 0x08;
+- else if (rate >= 128000 && rate < 192000)
++ else if (rate >= 128000)
+ code |= 0x10;
+
+ reg = cpu_to_le32(code);
+@@ -140,7 +140,7 @@ static int latter_allocate_resources(struct snd_ff *ff, unsigned int rate)
+ if (curr_rate == rate)
+ break;
+ }
+- if (count == 10)
++ if (count > 10)
+ return -ETIMEDOUT;
+
+ for (i = 0; i < ARRAY_SIZE(amdtp_rate_table); ++i) {
+diff --git a/sound/firewire/fireface/ff-stream.c b/sound/firewire/fireface/ff-stream.c
+index 63b79c4a5405..5452115c0ef9 100644
+--- a/sound/firewire/fireface/ff-stream.c
++++ b/sound/firewire/fireface/ff-stream.c
+@@ -184,7 +184,6 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
+ */
+ if (!amdtp_stream_running(&ff->rx_stream)) {
+ int spd = fw_parent_device(ff->unit)->max_speed;
+- unsigned int ir_delay_cycle;
+
+ err = ff->spec->protocol->begin_session(ff, rate);
+ if (err < 0)
+@@ -200,14 +199,7 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
+ if (err < 0)
+ goto error;
+
+- // The device postpones start of transmission mostly for several
+- // cycles after receiving packets firstly.
+- if (ff->spec->protocol == &snd_ff_protocol_ff800)
+- ir_delay_cycle = 800; // = 100 msec
+- else
+- ir_delay_cycle = 16; // = 2 msec
+-
+- err = amdtp_domain_start(&ff->domain, ir_delay_cycle);
++ err = amdtp_domain_start(&ff->domain, 0);
+ if (err < 0)
+ goto error;
+
+diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c
+index ff3a05ad99c0..64610571a5e1 100644
+--- a/sound/isa/es1688/es1688.c
++++ b/sound/isa/es1688/es1688.c
+@@ -267,8 +267,10 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard,
+ return error;
+ }
+ error = snd_es1688_probe(card, dev);
+- if (error < 0)
++ if (error < 0) {
++ snd_card_free(card);
+ return error;
++ }
+ pnp_set_card_drvdata(pcard, card);
+ snd_es968_pnp_is_probed = 1;
+ return 0;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 8b015b27e9c7..29da0b03b895 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2659,6 +2659,9 @@ static const struct pci_device_id azx_ids[] = {
+ { PCI_DEVICE(0x1002, 0xab20),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ AZX_DCAPS_PM_RUNTIME },
++ { PCI_DEVICE(0x1002, 0xab28),
++ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
++ AZX_DCAPS_PM_RUNTIME },
+ { PCI_DEVICE(0x1002, 0xab38),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ AZX_DCAPS_PM_RUNTIME },
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e62d58872b6e..2c4575909441 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -8124,6 +8124,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60130},
+ {0x17, 0x90170110}),
++ SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC,
++ {0x14, 0x01014010},
++ {0x17, 0x90170120},
++ {0x18, 0x02a11030},
++ {0x19, 0x02a1103f},
++ {0x21, 0x0221101f}),
+ {}
+ };
+
+diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c
+index 8600c5439e1e..2e4aa23b5a60 100644
+--- a/sound/soc/codecs/max9867.c
++++ b/sound/soc/codecs/max9867.c
+@@ -46,13 +46,13 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(max9867_micboost_tlv,
+
+ static const struct snd_kcontrol_new max9867_snd_controls[] = {
+ SOC_DOUBLE_R_TLV("Master Playback Volume", MAX9867_LEFTVOL,
+- MAX9867_RIGHTVOL, 0, 41, 1, max9867_master_tlv),
++ MAX9867_RIGHTVOL, 0, 40, 1, max9867_master_tlv),
+ SOC_DOUBLE_R_TLV("Line Capture Volume", MAX9867_LEFTLINELVL,
+ MAX9867_RIGHTLINELVL, 0, 15, 1, max9867_line_tlv),
+ SOC_DOUBLE_R_TLV("Mic Capture Volume", MAX9867_LEFTMICGAIN,
+ MAX9867_RIGHTMICGAIN, 0, 20, 1, max9867_mic_tlv),
+ SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", MAX9867_LEFTMICGAIN,
+- MAX9867_RIGHTMICGAIN, 5, 4, 0, max9867_micboost_tlv),
++ MAX9867_RIGHTMICGAIN, 5, 3, 0, max9867_micboost_tlv),
+ SOC_SINGLE("Digital Sidetone Volume", MAX9867_SIDETONE, 0, 31, 1),
+ SOC_SINGLE_TLV("Digital Playback Volume", MAX9867_DACLEVEL, 0, 15, 1,
+ max9867_dac_tlv),
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 827fb0bc8b56..8f559b505bb7 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -813,9 +813,6 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+ if (chip == (void *)-1L)
+ return 0;
+
+- chip->autosuspended = !!PMSG_IS_AUTO(message);
+- if (!chip->autosuspended)
+- snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
+ if (!chip->num_suspended_intf++) {
+ list_for_each_entry(as, &chip->pcm_list, list) {
+ snd_usb_pcm_suspend(as);
+@@ -828,6 +825,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+ snd_usb_mixer_suspend(mixer);
+ }
+
++ if (!PMSG_IS_AUTO(message) && !chip->system_suspend) {
++ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
++ chip->system_suspend = chip->num_suspended_intf;
++ }
++
+ return 0;
+ }
+
+@@ -841,10 +843,10 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+
+ if (chip == (void *)-1L)
+ return 0;
+- if (--chip->num_suspended_intf)
+- return 0;
+
+ atomic_inc(&chip->active); /* avoid autopm */
++ if (chip->num_suspended_intf > 1)
++ goto out;
+
+ list_for_each_entry(as, &chip->pcm_list, list) {
+ err = snd_usb_pcm_resume(as);
+@@ -866,9 +868,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+ snd_usbmidi_resume(p);
+ }
+
+- if (!chip->autosuspended)
++ out:
++ if (chip->num_suspended_intf == chip->system_suspend) {
+ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0);
+- chip->autosuspended = 0;
++ chip->system_suspend = 0;
++ }
++ chip->num_suspended_intf--;
+
+ err_out:
+ atomic_dec(&chip->active); /* allow autopm after this point */
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index bbae11605a4c..042a5e8eb79d 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -25,6 +25,26 @@
+ .idProduct = prod, \
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC
+
++/* HP Thunderbolt Dock Audio Headset */
++{
++ USB_DEVICE(0x03f0, 0x0269),
++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ .vendor_name = "HP",
++ .product_name = "Thunderbolt Dock Audio Headset",
++ .profile_name = "HP-Thunderbolt-Dock-Audio-Headset",
++ .ifnum = QUIRK_NO_INTERFACE
++ }
++},
++/* HP Thunderbolt Dock Audio Module */
++{
++ USB_DEVICE(0x03f0, 0x0567),
++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ .vendor_name = "HP",
++ .product_name = "Thunderbolt Dock Audio Module",
++ .profile_name = "HP-Thunderbolt-Dock-Audio-Module",
++ .ifnum = QUIRK_NO_INTERFACE
++ }
++},
+ /* FTDI devices */
+ {
+ USB_DEVICE(0x0403, 0xb8d8),
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 6fe3ab582ec6..a42d021624dc 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -26,7 +26,7 @@ struct snd_usb_audio {
+ struct usb_interface *pm_intf;
+ u32 usb_id;
+ struct mutex mutex;
+- unsigned int autosuspended:1;
++ unsigned int system_suspend;
+ atomic_t active;
+ atomic_t shutdown;
+ atomic_t usage_count;
+diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
+index eea132f512b0..c6bcf5709564 100644
+--- a/tools/perf/util/probe-event.c
++++ b/tools/perf/util/probe-event.c
+@@ -1765,8 +1765,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
+ fmt1_str = strtok_r(argv0_str, ":", &fmt);
+ fmt2_str = strtok_r(NULL, "/", &fmt);
+ fmt3_str = strtok_r(NULL, " \t", &fmt);
+- if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL
+- || fmt3_str == NULL) {
++ if (fmt1_str == NULL || fmt2_str == NULL || fmt3_str == NULL) {
+ semantic_error("Failed to parse event name: %s\n", argv[0]);
+ ret = -EINVAL;
+ goto out;
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
+index 021c03fd885d..23465823532b 100644
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
+@@ -14,6 +14,8 @@ if [ ! -f set_event ]; then
+ exit_unsupported
+ fi
+
++[ -f error_log ] || exit_unsupported
++
+ ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter'
+
+ exit 0
+diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+index 6dee9e636a95..422e7761254d 100644
+--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
++++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+@@ -115,6 +115,7 @@ static struct option long_options[] = {
+ { "tcp", no_argument, 0, 't' },
+ { "udp", no_argument, 0, 'u' },
+ { "ip", no_argument, 0, 'i' },
++ { NULL, 0, NULL, 0 },
+ };
+
+ static int next_port = 19999;
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+index 8877f7b2b809..12aa4bc1f6a0 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+@@ -32,7 +32,7 @@
+ "setup": [
+ "$TC qdisc add dev $DEV2 ingress"
+ ],
+- "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 parent ffff: handle 0xffffffff flower action ok",
++ "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 ingress handle 0xffffffff flower action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower.*handle 0xffffffff",
+@@ -77,9 +77,9 @@
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV2 ingress",
+- "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop"
++ "$TC filter add dev $DEV2 protocol ip prio 1 ingress flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop"
+ ],
+- "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop",
++ "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip prio 1 ingress flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py
+index 6a2bd2cf528e..995f66ce43eb 100755
+--- a/tools/testing/selftests/tc-testing/tdc_batch.py
++++ b/tools/testing/selftests/tc-testing/tdc_batch.py
+@@ -72,21 +72,21 @@ mac_prefix = args.mac_prefix
+
+ def format_add_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+- return ("filter add dev {} {} protocol ip parent ffff: handle {} "
++ return ("filter add dev {} {} protocol ip ingress handle {} "
+ " flower {} src_mac {} dst_mac {} action drop {}".format(
+ device, prio, handle, skip, src_mac, dst_mac, share_action))
+
+
+ def format_rep_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+- return ("filter replace dev {} {} protocol ip parent ffff: handle {} "
++ return ("filter replace dev {} {} protocol ip ingress handle {} "
+ " flower {} src_mac {} dst_mac {} action drop {}".format(
+ device, prio, handle, skip, src_mac, dst_mac, share_action))
+
+
+ def format_del_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+- return ("filter del dev {} {} protocol ip parent ffff: handle {} "
++ return ("filter del dev {} {} protocol ip ingress handle {} "
+ "flower".format(device, prio, handle))
+
+
+diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
+index 0a356aa91aa1..f2047fc69006 100644
+--- a/virt/kvm/arm/aarch32.c
++++ b/virt/kvm/arm/aarch32.c
+@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
+ [7] = { 4, 4 }, /* FIQ, unused */
+ };
+
++static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
++{
++ preempt_disable();
++ if (kvm_arm_vcpu_loaded(vcpu)) {
++ kvm_arch_vcpu_put(vcpu);
++ return true;
++ }
++
++ preempt_enable();
++ return false;
++}
++
++static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
++{
++ if (loaded) {
++ kvm_arch_vcpu_load(vcpu, smp_processor_id());
++ preempt_enable();
++ }
++}
++
+ /*
+ * When an exception is taken, most CPSR fields are left unchanged in the
+ * handler. However, some are explicitly overridden (e.g. M[4:0]).
+@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+
+ void kvm_inject_undef32(struct kvm_vcpu *vcpu)
+ {
++ bool loaded = pre_fault_synchronize(vcpu);
++
+ prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
++ post_fault_synchronize(vcpu, loaded);
+ }
+
+ /*
+@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
+ u32 vect_offset;
+ u32 *far, *fsr;
+ bool is_lpae;
++ bool loaded;
++
++ loaded = pre_fault_synchronize(vcpu);
+
+ if (is_pabt) {
+ vect_offset = 12;
+@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
+ /* no need to shuffle FS[4] into DFSR[10] as its 0 */
+ *fsr = DFSR_FSC_EXTABT_nLPAE;
+ }
++
++ post_fault_synchronize(vcpu, loaded);
+ }
+
+ void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
+index eda7b624eab8..0aca5514a58b 100644
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -332,6 +332,16 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
+ preempt_enable();
+ }
+
++#ifdef CONFIG_ARM64
++#define __ptrauth_save_key(regs, key) \
++({ \
++ regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
++ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
++})
++#else
++#define __ptrauth_save_key(regs, key) do { } while (0)
++#endif
++
+ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+ int *last_ran;
+@@ -365,7 +375,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ else
+ vcpu_set_wfx_traps(vcpu);
+
+- vcpu_ptrauth_setup_lazy(vcpu);
++ if (vcpu_has_ptrauth(vcpu)) {
++ struct kvm_cpu_context __maybe_unused *ctxt = vcpu->arch.host_cpu_context;
++
++ __ptrauth_save_key(ctxt->sys_regs, APIA);
++ __ptrauth_save_key(ctxt->sys_regs, APIB);
++ __ptrauth_save_key(ctxt->sys_regs, APDA);
++ __ptrauth_save_key(ctxt->sys_regs, APDB);
++ __ptrauth_save_key(ctxt->sys_regs, APGA);
++
++ vcpu_ptrauth_disable(vcpu);
++ }
+ }
+
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 70f03ce0e5c1..412c85d90f18 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -157,10 +157,9 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
+ static unsigned long long kvm_createvm_count;
+ static unsigned long long kvm_active_vms;
+
+-__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+- unsigned long start, unsigned long end, bool blockable)
++__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
++ unsigned long start, unsigned long end)
+ {
+- return 0;
+ }
+
+ bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
+@@ -378,6 +377,18 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
+ return container_of(mn, struct kvm, mmu_notifier);
+ }
+
++static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
++ struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++ struct kvm *kvm = mmu_notifier_to_kvm(mn);
++ int idx;
++
++ idx = srcu_read_lock(&kvm->srcu);
++ kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
++ srcu_read_unlock(&kvm->srcu, idx);
++}
++
+ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address,
+@@ -402,7 +413,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ {
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int need_tlb_flush = 0, idx;
+- int ret;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+@@ -419,14 +429,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ kvm_flush_remote_tlbs(kvm);
+
+ spin_unlock(&kvm->mmu_lock);
+-
+- ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
+- range->end,
+- mmu_notifier_range_blockable(range));
+-
+ srcu_read_unlock(&kvm->srcu, idx);
+
+- return ret;
++ return 0;
+ }
+
+ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+@@ -532,6 +537,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
+ }
+
+ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
++ .invalidate_range = kvm_mmu_notifier_invalidate_range,
+ .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
+ .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
+ .clear_flush_young = kvm_mmu_notifier_clear_flush_young,