diff options
author | Mike Pagano <mpagano@gentoo.org> | 2021-03-17 14:43:15 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2021-03-17 14:43:15 -0400 |
commit | f9188213412c671d0da4b9ef1c06e00fe50bac15 (patch) | |
tree | e5bdba21b3274f44ce4bd8270254b8dfe70b2aa5 | |
parent | Update wireguard patch (diff) | |
download | linux-patches-f9188213412c671d0da4b9ef1c06e00fe50bac15.tar.gz linux-patches-f9188213412c671d0da4b9ef1c06e00fe50bac15.tar.bz2 linux-patches-f9188213412c671d0da4b9ef1c06e00fe50bac15.zip |
Linux patch 5.4.1065.4-109
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1105_linux-5.4.106.patch | 5187 |
2 files changed, 5191 insertions, 0 deletions
diff --git a/0000_README b/0000_README index ddac1264..d0d3a1ac 100644 --- a/0000_README +++ b/0000_README @@ -463,6 +463,10 @@ Patch: 1104_linux-5.4.105.patch From: http://www.kernel.org Desc: Linux 5.4.105 +Patch: 1105_linux-5.4.106.patch +From: http://www.kernel.org +Desc: Linux 5.4.106 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1105_linux-5.4.106.patch b/1105_linux-5.4.106.patch new file mode 100644 index 00000000..9fcde759 --- /dev/null +++ b/1105_linux-5.4.106.patch @@ -0,0 +1,5187 @@ +diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt +index 7064efd3b5ea3..fd22224853e58 100644 +--- a/Documentation/virt/kvm/api.txt ++++ b/Documentation/virt/kvm/api.txt +@@ -172,6 +172,9 @@ is dependent on the CPU capability and the kernel configuration. The limit can + be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION + ioctl() at run-time. + ++Creation of the VM will fail if the requested IPA size (whether it is ++implicit or explicit) is unsupported on the host. ++ + Please note that configuring the IPA size does not affect the capability + exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects + size of the address translated by the stage2 level (guest physical to +diff --git a/Makefile b/Makefile +index e27d031f3241e..a333b378f1f71 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 105 ++SUBLEVEL = 106 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +@@ -1175,9 +1175,15 @@ define filechk_utsrelease.h + endef + + define filechk_version.h +- echo \#define LINUX_VERSION_CODE $(shell \ +- expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \ +- echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))' ++ if [ $(SUBLEVEL) -gt 255 ]; then \ ++ echo \#define LINUX_VERSION_CODE $(shell \ ++ expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \ ++ else \ ++ echo \#define LINUX_VERSION_CODE $(shell \ ++ expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \ ++ fi; \ ++ echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + \ ++ ((c) > 255 ? 255 : (c)))' + endef + + $(version_h): FORCE +diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h +index f615830f9f57b..9d0b7e677faac 100644 +--- a/arch/arm/include/asm/kvm_asm.h ++++ b/arch/arm/include/asm/kvm_asm.h +@@ -56,7 +56,7 @@ extern char __kvm_hyp_init_end[]; + extern void __kvm_flush_vm_context(void); + extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); + extern void __kvm_tlb_flush_vmid(struct kvm *kvm); +-extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); ++extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu); + + extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); + +diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c +index 848f27bbad9db..80e67108d39d1 100644 +--- a/arch/arm/kvm/hyp/tlb.c ++++ b/arch/arm/kvm/hyp/tlb.c +@@ -45,7 +45,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) + __kvm_tlb_flush_vmid(kvm); + } + +-void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) ++void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu) + { + struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); + +@@ -54,6 +54,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) + isb(); + + write_sysreg(0, TLBIALL); ++ write_sysreg(0, ICIALLU); + dsb(nsh); + isb(); + +diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h +index 64d79b2884344..c54e759896c1a 100644 +--- a/arch/arm64/include/asm/kvm_asm.h ++++ b/arch/arm64/include/asm/kvm_asm.h +@@ -60,7 +60,7 @@ extern char __kvm_hyp_vector[]; + extern void __kvm_flush_vm_context(void); + extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); + extern void __kvm_tlb_flush_vmid(struct kvm *kvm); +-extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); ++extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu); + + extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); + +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h +index 6c295a231882a..67b6b90f37eed 100644 +--- a/arch/arm64/include/asm/memory.h ++++ b/arch/arm64/include/asm/memory.h +@@ -315,6 +315,11 @@ static inline void *phys_to_virt(phys_addr_t x) + #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) + + #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) ++#define page_to_virt(x) ({ \ ++ __typeof__(x) __page = x; \ ++ void *__addr = __va(page_to_phys(__page)); \ ++ (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ ++}) + #define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) + #else + #define page_to_virt(x) ({ \ +diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h +index 3827ff4040a3f..3a5d9f1c91b6d 100644 +--- a/arch/arm64/include/asm/mmu_context.h ++++ b/arch/arm64/include/asm/mmu_context.h +@@ -63,10 +63,7 @@ extern u64 idmap_ptrs_per_pgd; + + static inline bool __cpu_uses_extended_idmap(void) + { +- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52)) +- return false; +- +- return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); ++ return unlikely(idmap_t0sz != TCR_T0SZ(vabits_actual)); + } + + /* +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S +index 438de2301cfe3..a2e0b37549433 100644 +--- a/arch/arm64/kernel/head.S ++++ b/arch/arm64/kernel/head.S +@@ -337,7 +337,7 @@ __create_page_tables: + */ + adrp x5, __idmap_text_end + clz x5, x5 +- cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? ++ cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough? + b.ge 1f // .. then skip VA range extension + + adr_l x6, idmap_t0sz +diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c +index eb0efc5557f30..7b7213fc17d95 100644 +--- a/arch/arm64/kvm/hyp/tlb.c ++++ b/arch/arm64/kvm/hyp/tlb.c +@@ -182,7 +182,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) + __tlb_switch_to_host(kvm, &cxt); + } + +-void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) ++void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu) + { + struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); + struct tlb_inv_context cxt; +@@ -191,6 +191,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) + __tlb_switch_to_guest(kvm, &cxt); + + __tlbi(vmalle1); ++ asm volatile("ic iallu"); + dsb(nsh); + isb(); + +diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c +index 784d485218ca1..a3105ae464be1 100644 +--- a/arch/arm64/kvm/reset.c ++++ b/arch/arm64/kvm/reset.c +@@ -378,10 +378,10 @@ void kvm_set_ipa_limit(void) + pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n", + (va_max < pa_max) ? "Virtual" : "Physical"); + +- WARN(ipa_max < KVM_PHYS_SHIFT, +- "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max); + kvm_ipa_limit = ipa_max; +- kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit); ++ kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit, ++ ((kvm_ipa_limit < KVM_PHYS_SHIFT) ? ++ " (Reduced IPA size, limited VM/VMM compatibility)" : "")); + } + + /* +@@ -408,6 +408,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) + return -EINVAL; + } else { + phys_shift = KVM_PHYS_SHIFT; ++ if (phys_shift > kvm_ipa_limit) { ++ pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n", ++ current->comm); ++ return -EINVAL; ++ } + } + + parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7; +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c +index 602bd19630ff8..cbcac03c0e0da 100644 +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -245,6 +245,18 @@ int pfn_valid(unsigned long pfn) + + if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn)))) + return 0; ++ ++ /* ++ * ZONE_DEVICE memory does not have the memblock entries. ++ * memblock_is_map_memory() check for ZONE_DEVICE based ++ * addresses will always fail. Even the normal hotplugged ++ * memory will never have MEMBLOCK_NOMAP flag set in their ++ * memblock entries. Skip memblock search for all non early ++ * memory sections covering all of hotplug memory including ++ * both normal and ZONE_DEVICE based. ++ */ ++ if (!early_section(__pfn_to_section(pfn))) ++ return pfn_section_valid(__pfn_to_section(pfn), pfn); + #endif + return memblock_is_map_memory(addr); + } +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c +index d10247fab0fdf..99bc0289ab2b6 100644 +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -38,7 +38,7 @@ + #define NO_BLOCK_MAPPINGS BIT(0) + #define NO_CONT_MAPPINGS BIT(1) + +-u64 idmap_t0sz = TCR_T0SZ(VA_BITS); ++u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN); + u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; + + u64 __section(".mmuoff.data.write") vabits_actual; +diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h +index 898b542628815..be0f7257b13c8 100644 +--- a/arch/powerpc/include/asm/code-patching.h ++++ b/arch/powerpc/include/asm/code-patching.h +@@ -72,7 +72,7 @@ void __patch_exception(int exc, unsigned long addr); + #endif + + #define OP_RT_RA_MASK 0xffff0000UL +-#define LIS_R2 0x3c020000UL ++#define LIS_R2 0x3c400000UL + #define ADDIS_R2_R12 0x3c4c0000UL + #define ADDI_R2_R2 0x38420000UL + +diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h +index 7bcb64444a394..f71c361dc356f 100644 +--- a/arch/powerpc/include/asm/machdep.h ++++ b/arch/powerpc/include/asm/machdep.h +@@ -59,6 +59,9 @@ struct machdep_calls { + int (*pcibios_root_bridge_prepare)(struct pci_host_bridge + *bridge); + ++ /* finds all the pci_controllers present at boot */ ++ void (*discover_phbs)(void); ++ + /* To setup PHBs when using automatic OF platform driver for PCI */ + int (*pci_setup_phb)(struct pci_controller *host); + +diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h +index c41220f4aad9e..5a424f867c828 100644 +--- a/arch/powerpc/include/asm/ptrace.h ++++ b/arch/powerpc/include/asm/ptrace.h +@@ -62,6 +62,9 @@ struct pt_regs + }; + #endif + ++ ++#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) ++ + #ifdef __powerpc64__ + + /* +diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c +index 5c0a1e17219b7..af399675248ed 100644 +--- a/arch/powerpc/kernel/asm-offsets.c ++++ b/arch/powerpc/kernel/asm-offsets.c +@@ -285,7 +285,7 @@ int main(void) + + /* Interrupt register frame */ + DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); +- DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); ++ DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS); + STACK_PT_REGS_OFFSET(GPR0, gpr[0]); + STACK_PT_REGS_OFFSET(GPR1, gpr[1]); + STACK_PT_REGS_OFFSET(GPR2, gpr[2]); +diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S +index 126ba54384300..edaab1142498c 100644 +--- a/arch/powerpc/kernel/head_32.S ++++ b/arch/powerpc/kernel/head_32.S +@@ -418,10 +418,11 @@ InstructionTLBMiss: + cmplw 0,r1,r3 + #endif + mfspr r2, SPRN_SPRG_PGDIR +- li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC ++ li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER + #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) + bge- 112f + lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ ++ li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC + addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ + #endif + 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ +@@ -480,9 +481,10 @@ DataLoadTLBMiss: + lis r1,PAGE_OFFSET@h /* check if kernel address */ + cmplw 0,r1,r3 + mfspr r2, SPRN_SPRG_PGDIR +- li r1, _PAGE_PRESENT | _PAGE_ACCESSED ++ li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER + bge- 112f + lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ ++ li r1, _PAGE_PRESENT | _PAGE_ACCESSED + addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ + 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ +@@ -556,9 +558,10 @@ DataStoreTLBMiss: + lis r1,PAGE_OFFSET@h /* check if kernel address */ + cmplw 0,r1,r3 + mfspr r2, SPRN_SPRG_PGDIR +- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED ++ li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER + bge- 112f + lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ ++ li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED + addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ + 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ +diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c +index 1c448cf255061..a2c258a8d7367 100644 +--- a/arch/powerpc/kernel/pci-common.c ++++ b/arch/powerpc/kernel/pci-common.c +@@ -1669,3 +1669,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev) + } + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); ++ ++ ++static int __init discover_phbs(void) ++{ ++ if (ppc_md.discover_phbs) ++ ppc_md.discover_phbs(); ++ ++ return 0; ++} ++core_initcall(discover_phbs); +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c +index bd0c258a1d5dd..c94bba9142e7e 100644 +--- a/arch/powerpc/kernel/process.c ++++ b/arch/powerpc/kernel/process.c +@@ -2081,7 +2081,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) + * See if this is an exception frame. + * We look for the "regshere" marker in the current frame. + */ +- if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) ++ if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS) + && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { + struct pt_regs *regs = (struct pt_regs *) + (sp + STACK_FRAME_OVERHEAD); +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c +index 206032c9b5458..ecfa460f66d17 100644 +--- a/arch/powerpc/kernel/traps.c ++++ b/arch/powerpc/kernel/traps.c +@@ -513,8 +513,11 @@ out: + die("Unrecoverable nested System Reset", regs, SIGABRT); + #endif + /* Must die if the interrupt is not recoverable */ +- if (!(regs->msr & MSR_RI)) ++ if (!(regs->msr & MSR_RI)) { ++ /* For the reason explained in die_mce, nmi_exit before die */ ++ nmi_exit(); + die("Unrecoverable System Reset", regs, SIGABRT); ++ } + + if (saved_hsrrs) { + mtspr(SPRN_HSRR0, hsrr0); +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c +index 02fc75ddcbb36..6f013e4188349 100644 +--- a/arch/powerpc/perf/core-book3s.c ++++ b/arch/powerpc/perf/core-book3s.c +@@ -2077,7 +2077,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, + left += period; + if (left <= 0) + left = period; +- record = siar_valid(regs); ++ ++ /* ++ * If address is not requested in the sample via ++ * PERF_SAMPLE_IP, just record that sample irrespective ++ * of SIAR valid check. ++ */ ++ if (event->attr.sample_type & PERF_SAMPLE_IP) ++ record = siar_valid(regs); ++ else ++ record = 1; ++ + event->hw.last_period = event->hw.sample_period; + } + if (left < 0x80000000LL) +@@ -2095,9 +2105,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, + * MMCR2. Check attr.exclude_kernel and address to drop the sample in + * these cases. + */ +- if (event->attr.exclude_kernel && record) +- if (is_kernel_addr(mfspr(SPRN_SIAR))) +- record = 0; ++ if (event->attr.exclude_kernel && ++ (event->attr.sample_type & PERF_SAMPLE_IP) && ++ is_kernel_addr(mfspr(SPRN_SIAR))) ++ record = 0; + + /* + * Finally record data if requested. +diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c +index b3ac2455faadc..637300330507f 100644 +--- a/arch/powerpc/platforms/pseries/msi.c ++++ b/arch/powerpc/platforms/pseries/msi.c +@@ -4,6 +4,7 @@ + * Copyright 2006-2007 Michael Ellerman, IBM Corp. + */ + ++#include <linux/crash_dump.h> + #include <linux/device.h> + #include <linux/irq.h> + #include <linux/msi.h> +@@ -458,8 +459,28 @@ again: + return hwirq; + } + +- virq = irq_create_mapping_affinity(NULL, hwirq, +- entry->affinity); ++ /* ++ * Depending on the number of online CPUs in the original ++ * kernel, it is likely for CPU #0 to be offline in a kdump ++ * kernel. The associated IRQs in the affinity mappings ++ * provided by irq_create_affinity_masks() are thus not ++ * started by irq_startup(), as per-design for managed IRQs. ++ * This can be a problem with multi-queue block devices driven ++ * by blk-mq : such a non-started IRQ is very likely paired ++ * with the single queue enforced by blk-mq during kdump (see ++ * blk_mq_alloc_tag_set()). This causes the device to remain ++ * silent and likely hangs the guest at some point. ++ * ++ * We don't really care for fine-grained affinity when doing ++ * kdump actually : simply ignore the pre-computed affinity ++ * masks in this case and let the default mask with all CPUs ++ * be used when creating the IRQ mappings. ++ */ ++ if (is_kdump_kernel()) ++ virq = irq_create_mapping(NULL, hwirq); ++ else ++ virq = irq_create_mapping_affinity(NULL, hwirq, ++ entry->affinity); + + if (!virq) { + pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq); +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c +index 659d99af91566..8c51462f13fd1 100644 +--- a/arch/s390/kernel/smp.c ++++ b/arch/s390/kernel/smp.c +@@ -765,7 +765,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, + static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) + { + struct sclp_core_entry *core; +- cpumask_t avail; ++ static cpumask_t avail; + bool configured; + u16 core_id; + int nr, i; +diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h +index f94532f25db14..274217e7ed702 100644 +--- a/arch/sparc/include/asm/mman.h ++++ b/arch/sparc/include/asm/mman.h +@@ -57,35 +57,39 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr) + { + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI)) + return 0; +- if (prot & PROT_ADI) { +- if (!adi_capable()) +- return 0; ++ return 1; ++} + +- if (addr) { +- struct vm_area_struct *vma; ++#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags) ++/* arch_validate_flags() - Ensure combination of flags is valid for a ++ * VMA. ++ */ ++static inline bool arch_validate_flags(unsigned long vm_flags) ++{ ++ /* If ADI is being enabled on this VMA, check for ADI ++ * capability on the platform and ensure VMA is suitable ++ * for ADI ++ */ ++ if (vm_flags & VM_SPARC_ADI) { ++ if (!adi_capable()) ++ return false; + +- vma = find_vma(current->mm, addr); +- if (vma) { +- /* ADI can not be enabled on PFN +- * mapped pages +- */ +- if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) +- return 0; ++ /* ADI can not be enabled on PFN mapped pages */ ++ if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) ++ return false; + +- /* Mergeable pages can become unmergeable +- * if ADI is enabled on them even if they +- * have identical data on them. This can be +- * because ADI enabled pages with identical +- * data may still not have identical ADI +- * tags on them. Disallow ADI on mergeable +- * pages. +- */ +- if (vma->vm_flags & VM_MERGEABLE) +- return 0; +- } +- } ++ /* Mergeable pages can become unmergeable ++ * if ADI is enabled on them even if they ++ * have identical data on them. This can be ++ * because ADI enabled pages with identical ++ * data may still not have identical ADI ++ * tags on them. Disallow ADI on mergeable ++ * pages. ++ */ ++ if (vm_flags & VM_MERGEABLE) ++ return false; + } +- return 1; ++ return true; + } + #endif /* CONFIG_SPARC64 */ + +diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c +index 906eda1158b4d..40dd6cb4a4133 100644 +--- a/arch/sparc/mm/init_32.c ++++ b/arch/sparc/mm/init_32.c +@@ -197,6 +197,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) + size = memblock_phys_mem_size() - memblock_reserved_size(); + *pages_avail = (size >> PAGE_SHIFT) - high_pages; + ++ /* Only allow low memory to be allocated via memblock allocation */ ++ memblock_set_current_limit(max_low_pfn << PAGE_SHIFT); ++ + return max_pfn; + } + +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c +index f29f015a5e7f3..b934f9f68a168 100644 +--- a/arch/x86/kernel/unwind_orc.c ++++ b/arch/x86/kernel/unwind_orc.c +@@ -357,8 +357,8 @@ static bool deref_stack_regs(struct unwind_state *state, unsigned long addr, + if (!stack_access_ok(state, addr, sizeof(struct pt_regs))) + return false; + +- *ip = regs->ip; +- *sp = regs->sp; ++ *ip = READ_ONCE_NOCHECK(regs->ip); ++ *sp = READ_ONCE_NOCHECK(regs->sp); + return true; + } + +@@ -370,8 +370,8 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr + if (!stack_access_ok(state, addr, IRET_FRAME_SIZE)) + return false; + +- *ip = regs->ip; +- *sp = regs->sp; ++ *ip = READ_ONCE_NOCHECK(regs->ip); ++ *sp = READ_ONCE_NOCHECK(regs->sp); + return true; + } + +@@ -392,12 +392,12 @@ static bool get_reg(struct unwind_state *state, unsigned int reg_off, + return false; + + if (state->full_regs) { +- *val = ((unsigned long *)state->regs)[reg]; ++ *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]); + return true; + } + + if (state->prev_regs) { +- *val = ((unsigned long *)state->prev_regs)[reg]; ++ *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]); + return true; + } + +diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c +index 7d5236eafe845..4c3b9813b2843 100644 +--- a/drivers/base/swnode.c ++++ b/drivers/base/swnode.c +@@ -812,6 +812,9 @@ int software_node_register(const struct software_node *node) + if (software_node_to_swnode(node)) + return -EEXIST; + ++ if (node->parent && !parent) ++ return -EINVAL; ++ + return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0)); + } + EXPORT_SYMBOL_GPL(software_node_register); +diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c +index 804d28faa97b0..a1824bb080446 100644 +--- a/drivers/block/rsxx/core.c ++++ b/drivers/block/rsxx/core.c +@@ -869,6 +869,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, + card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event"); + if (!card->event_wq) { + dev_err(CARD_TO_DEV(card), "Failed card event setup.\n"); ++ st = -ENOMEM; + goto failed_event_handler; + } + +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c +index 22aa432a68bf9..719c6b7741afa 100644 +--- a/drivers/block/zram/zram_drv.c ++++ b/drivers/block/zram/zram_drv.c +@@ -627,7 +627,7 @@ static ssize_t writeback_store(struct device *dev, + struct bio_vec bio_vec; + struct page *page; + ssize_t ret = len; +- int mode; ++ int mode, err; + unsigned long blk_idx = 0; + + if (sysfs_streq(buf, "idle")) +@@ -719,12 +719,17 @@ static ssize_t writeback_store(struct device *dev, + * XXX: A single page IO would be inefficient for write + * but it would be not bad as starter. + */ +- ret = submit_bio_wait(&bio); +- if (ret) { ++ err = submit_bio_wait(&bio); ++ if (err) { + zram_slot_lock(zram, index); + zram_clear_flag(zram, index, ZRAM_UNDER_WB); + zram_clear_flag(zram, index, ZRAM_IDLE); + zram_slot_unlock(zram, index); ++ /* ++ * Return last IO error unless every IO were ++ * not suceeded. ++ */ ++ ret = err; + continue; + } + +diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c +index f5918707672f1..d88f4230c2219 100644 +--- a/drivers/gpu/drm/drm_gem_shmem_helper.c ++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c +@@ -474,14 +474,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + loff_t num_pages = obj->size >> PAGE_SHIFT; ++ vm_fault_t ret; + struct page *page; ++ pgoff_t page_offset; + +- if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) +- return VM_FAULT_SIGBUS; ++ /* We don't use vmf->pgoff since that has the fake offset */ ++ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; + +- page = shmem->pages[vmf->pgoff]; ++ mutex_lock(&shmem->pages_lock); ++ ++ if (page_offset >= num_pages || ++ WARN_ON_ONCE(!shmem->pages) || ++ shmem->madv < 0) { ++ ret = VM_FAULT_SIGBUS; ++ } else { ++ page = shmem->pages[page_offset]; ++ ++ ret = vmf_insert_page(vma, vmf->address, page); ++ } + +- return vmf_insert_page(vma, vmf->address, page); ++ mutex_unlock(&shmem->pages_lock); ++ ++ return ret; + } + + static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) +@@ -549,9 +563,6 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma) + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + +- /* Remove the fake offset */ +- vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node); +- + return 0; + } + EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); +diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c +index 22c7fd7196c82..2cf053fb8d54b 100644 +--- a/drivers/gpu/drm/drm_ioc32.c ++++ b/drivers/gpu/drm/drm_ioc32.c +@@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd, + if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) + return -EFAULT; + ++ memset(&v, 0, sizeof(v)); ++ + v = (struct drm_version) { + .name_len = v32.name_len, + .name = compat_ptr(v32.name), +@@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, + + if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) + return -EFAULT; ++ ++ memset(&uq, 0, sizeof(uq)); ++ + uq = (struct drm_unique){ + .unique_len = uq32.unique_len, + .unique = compat_ptr(uq32.unique), +@@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, + if (copy_from_user(&c32, argp, sizeof(c32))) + return -EFAULT; + ++ memset(&client, 0, sizeof(client)); ++ + client.idx = c32.idx; + + err = drm_ioctl_kernel(file, drm_getclient, &client, 0); +@@ -850,6 +857,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + ++ memset(&req, 0, sizeof(req)); ++ + req.request.type = req32.request.type; + req.request.sequence = req32.request.sequence; + req.request.signal = req32.request.signal; +@@ -887,6 +896,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, + struct drm_mode_fb_cmd2 req64; + int err; + ++ memset(&req64, 0, sizeof(req64)); ++ + if (copy_from_user(&req64, argp, + offsetof(drm_mode_fb_cmd232_t, modifier))) + return -EFAULT; +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c +index a24f8dec5adc9..86d0961112773 100644 +--- a/drivers/gpu/drm/meson/meson_drv.c ++++ b/drivers/gpu/drm/meson/meson_drv.c +@@ -420,6 +420,16 @@ static int meson_probe_remote(struct platform_device *pdev, + return count; + } + ++static void meson_drv_shutdown(struct platform_device *pdev) ++{ ++ struct meson_drm *priv = dev_get_drvdata(&pdev->dev); ++ struct drm_device *drm = priv->drm; ++ ++ DRM_DEBUG_DRIVER("\n"); ++ drm_kms_helper_poll_fini(drm); ++ drm_atomic_helper_shutdown(drm); ++} ++ + static int meson_drv_probe(struct platform_device *pdev) + { + struct component_match *match = NULL; +@@ -469,6 +479,7 @@ MODULE_DEVICE_TABLE(of, dt_match); + + static struct platform_driver meson_drm_platform_driver = { + .probe = meson_drv_probe, ++ .shutdown = meson_drv_shutdown, + .driver = { + .name = "meson-drm", + .of_match_table = dt_match, +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c +index 92d84280096e0..9abf3dc5ef990 100644 +--- a/drivers/gpu/drm/qxl/qxl_display.c ++++ b/drivers/gpu/drm/qxl/qxl_display.c +@@ -325,6 +325,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, + + head.id = i; + head.flags = 0; ++ head.surface_id = 0; + oldcount = qdev->monitors_config->count; + if (crtc->state->active) { + struct drm_display_mode *mode = &crtc->mode; +diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c +index 86001cfbdb6f1..b499ac37dc7b0 100644 +--- a/drivers/hid/hid-logitech-dj.c ++++ b/drivers/hid/hid-logitech-dj.c +@@ -995,7 +995,12 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev, + workitem.reports_supported |= STD_KEYBOARD; + break; + case 0x0d: +- device_type = "eQUAD Lightspeed 1_1"; ++ device_type = "eQUAD Lightspeed 1.1"; ++ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); ++ workitem.reports_supported |= STD_KEYBOARD; ++ break; ++ case 0x0f: ++ device_type = "eQUAD Lightspeed 1.2"; + logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); + workitem.reports_supported |= STD_KEYBOARD; + break; +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c +index 9c162a01a5849..d0c4b3019e41e 100644 +--- a/drivers/i2c/busses/i2c-rcar.c ++++ b/drivers/i2c/busses/i2c-rcar.c +@@ -89,7 +89,6 @@ + + #define RCAR_BUS_PHASE_START (MDBS | MIE | ESG) + #define RCAR_BUS_PHASE_DATA (MDBS | MIE) +-#define RCAR_BUS_MASK_DATA (~(ESG | FSB) & 0xFF) + #define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB) + + #define RCAR_IRQ_SEND (MNR | MAL | MST | MAT | MDE) +@@ -117,6 +116,7 @@ enum rcar_i2c_type { + }; + + struct rcar_i2c_priv { ++ u32 flags; + void __iomem *io; + struct i2c_adapter adap; + struct i2c_msg *msg; +@@ -127,7 +127,6 @@ struct rcar_i2c_priv { + + int pos; + u32 icccr; +- u32 flags; + u8 recovery_icmcr; /* protected by adapter lock */ + enum rcar_i2c_type devtype; + struct i2c_client *slave; +@@ -616,7 +615,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) + /* + * This driver has a lock-free design because there are IP cores (at least + * R-Car Gen2) which have an inherent race condition in their hardware design. +- * There, we need to clear RCAR_BUS_MASK_DATA bits as soon as possible after ++ * There, we need to switch to RCAR_BUS_PHASE_DATA as soon as possible after + * the interrupt was generated, otherwise an unwanted repeated message gets + * generated. It turned out that taking a spinlock at the beginning of the ISR + * was already causing repeated messages. Thus, this driver was converted to +@@ -625,13 +624,11 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) + static irqreturn_t rcar_i2c_irq(int irq, void *ptr) + { + struct rcar_i2c_priv *priv = ptr; +- u32 msr, val; ++ u32 msr; + + /* Clear START or STOP immediately, except for REPSTART after read */ +- if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) { +- val = rcar_i2c_read(priv, ICMCR); +- rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA); +- } ++ if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) ++ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA); + + msr = rcar_i2c_read(priv, ICMSR); + +diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c +index d38398526965d..a4b7422de534e 100644 +--- a/drivers/input/keyboard/applespi.c ++++ b/drivers/input/keyboard/applespi.c +@@ -48,6 +48,7 @@ + #include <linux/efi.h> + #include <linux/input.h> + #include <linux/input/mt.h> ++#include <linux/ktime.h> + #include <linux/leds.h> + #include <linux/module.h> + #include <linux/spinlock.h> +@@ -400,7 +401,7 @@ struct applespi_data { + unsigned int cmd_msg_cntr; + /* lock to protect the above parameters and flags below */ + spinlock_t cmd_msg_lock; +- bool cmd_msg_queued; ++ ktime_t cmd_msg_queued; + enum applespi_evt_type cmd_evt_type; + + struct led_classdev backlight_info; +@@ -716,7 +717,7 @@ static void applespi_msg_complete(struct applespi_data *applespi, + wake_up_all(&applespi->drain_complete); + + if (is_write_msg) { +- applespi->cmd_msg_queued = false; ++ applespi->cmd_msg_queued = 0; + applespi_send_cmd_msg(applespi); + } + +@@ -758,8 +759,16 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi) + return 0; + + /* check whether send is in progress */ +- if (applespi->cmd_msg_queued) +- return 0; ++ if (applespi->cmd_msg_queued) { ++ if (ktime_ms_delta(ktime_get(), applespi->cmd_msg_queued) < 1000) ++ return 0; ++ ++ dev_warn(&applespi->spi->dev, "Command %d timed out\n", ++ applespi->cmd_evt_type); ++ ++ applespi->cmd_msg_queued = 0; ++ applespi->write_active = false; ++ } + + /* set up packet */ + memset(packet, 0, APPLESPI_PACKET_SIZE); +@@ -856,7 +865,7 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi) + return sts; + } + +- applespi->cmd_msg_queued = true; ++ applespi->cmd_msg_queued = ktime_get_coarse(); + applespi->write_active = true; + + return 0; +@@ -1908,7 +1917,7 @@ static int __maybe_unused applespi_resume(struct device *dev) + applespi->drain = false; + applespi->have_cl_led_on = false; + applespi->have_bl_level = 0; +- applespi->cmd_msg_queued = false; ++ applespi->cmd_msg_queued = 0; + applespi->read_active = false; + applespi->write_active = false; + +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c +index 31d7e2d4f3040..ad714ff375f85 100644 +--- a/drivers/iommu/amd_iommu_init.c ++++ b/drivers/iommu/amd_iommu_init.c +@@ -12,6 +12,7 @@ + #include <linux/acpi.h> + #include <linux/list.h> + #include <linux/bitmap.h> ++#include <linux/delay.h> + #include <linux/slab.h> + #include <linux/syscore_ops.h> + #include <linux/interrupt.h> +@@ -253,6 +254,8 @@ static enum iommu_init_state init_state = IOMMU_START_STATE; + static int amd_iommu_enable_interrupts(void); + static int __init iommu_go_to_state(enum iommu_init_state state); + static void init_device_table_dma(void); ++static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, ++ u8 fxn, u64 *value, bool is_write); + + static bool amd_iommu_pre_enabled = true; + +@@ -1672,13 +1675,11 @@ static int __init init_iommu_all(struct acpi_table_header *table) + return 0; + } + +-static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, +- u8 fxn, u64 *value, bool is_write); +- +-static void init_iommu_perf_ctr(struct amd_iommu *iommu) ++static void __init init_iommu_perf_ctr(struct amd_iommu *iommu) + { ++ int retry; + struct pci_dev *pdev = iommu->dev; +- u64 val = 0xabcd, val2 = 0, save_reg = 0; ++ u64 val = 0xabcd, val2 = 0, save_reg, save_src; + + if (!iommu_feature(iommu, FEATURE_PC)) + return; +@@ -1686,17 +1687,39 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) + amd_iommu_pc_present = true; + + /* save the value to restore, if writable */ +- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) ++ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) || ++ iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false)) + goto pc_false; + +- /* Check if the performance counters can be written to */ +- if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || +- (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || +- (val != val2)) ++ /* ++ * Disable power gating by programing the performance counter ++ * source to 20 (i.e. counts the reads and writes from/to IOMMU ++ * Reserved Register [MMIO Offset 1FF8h] that are ignored.), ++ * which never get incremented during this init phase. ++ * (Note: The event is also deprecated.) ++ */ ++ val = 20; ++ if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true)) + goto pc_false; + ++ /* Check if the performance counters can be written to */ ++ val = 0xabcd; ++ for (retry = 5; retry; retry--) { ++ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) || ++ iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) || ++ val2) ++ break; ++ ++ /* Wait about 20 msec for power gating to disable and retry. */ ++ msleep(20); ++ } ++ + /* restore */ +- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) ++ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) || ++ iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true)) ++ goto pc_false; ++ ++ if (val != val2) + goto pc_false; + + pci_info(pdev, "IOMMU performance counters supported\n"); +diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c +index a4a45d68a6efc..1c00688c71c24 100644 +--- a/drivers/media/platform/vsp1/vsp1_drm.c ++++ b/drivers/media/platform/vsp1/vsp1_drm.c +@@ -245,7 +245,7 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1, + brx = &vsp1->bru->entity; + else if (pipe->brx && !drm_pipe->force_brx_release) + brx = pipe->brx; +- else if (!vsp1->bru->entity.pipe) ++ else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe) + brx = &vsp1->bru->entity; + else + brx = &vsp1->brs->entity; +@@ -462,9 +462,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1, + * make sure it is present in the pipeline's list of entities if it + * wasn't already. + */ +- if (!use_uif) { ++ if (drm_pipe->uif && !use_uif) { + drm_pipe->uif->pipe = NULL; +- } else if (!drm_pipe->uif->pipe) { ++ } else if (drm_pipe->uif && !drm_pipe->uif->pipe) { + drm_pipe->uif->pipe = pipe; + list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities); + } +diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile +index 48d23433b3c06..caeb51def782c 100644 +--- a/drivers/media/rc/Makefile ++++ b/drivers/media/rc/Makefile +@@ -5,6 +5,7 @@ obj-y += keymaps/ + obj-$(CONFIG_RC_CORE) += rc-core.o + rc-core-y := rc-main.o rc-ir-raw.o + rc-core-$(CONFIG_LIRC) += lirc_dev.o ++rc-core-$(CONFIG_MEDIA_CEC_RC) += keymaps/rc-cec.o + rc-core-$(CONFIG_BPF_LIRC_MODE2) += bpf-lirc.o + obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o + obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o +diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile +index ea91a9afa6a02..d89dcc4481229 100644 +--- a/drivers/media/rc/keymaps/Makefile ++++ b/drivers/media/rc/keymaps/Makefile +@@ -20,7 +20,6 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ + rc-behold.o \ + rc-behold-columbus.o \ + rc-budget-ci-old.o \ +- rc-cec.o \ + rc-cinergy-1400.o \ + rc-cinergy.o \ + rc-d680-dmb.o \ +diff --git a/drivers/media/rc/keymaps/rc-cec.c b/drivers/media/rc/keymaps/rc-cec.c +index 3e3bd11092b45..068e22aeac8c3 100644 +--- a/drivers/media/rc/keymaps/rc-cec.c ++++ b/drivers/media/rc/keymaps/rc-cec.c +@@ -1,5 +1,15 @@ + // SPDX-License-Identifier: GPL-2.0-or-later + /* Keytable for the CEC remote control ++ * ++ * This keymap is unusual in that it can't be built as a module, ++ * instead it is registered directly in rc-main.c if CONFIG_MEDIA_CEC_RC ++ * is set. This is because it can be called from drm_dp_cec_set_edid() via ++ * cec_register_adapter() in an asynchronous context, and it is not ++ * allowed to use request_module() to load rc-cec.ko in that case. ++ * ++ * Since this keymap is only used if CONFIG_MEDIA_CEC_RC is set, we ++ * just compile this keymap into the rc-core module and never as a ++ * separate module. + * + * Copyright (c) 2015 by Kamil Debski + */ +@@ -152,7 +162,7 @@ static struct rc_map_table cec[] = { + /* 0x77-0xff: Reserved */ + }; + +-static struct rc_map_list cec_map = { ++struct rc_map_list cec_map = { + .map = { + .scan = cec, + .size = ARRAY_SIZE(cec), +@@ -160,19 +170,3 @@ static struct rc_map_list cec_map = { + .name = RC_MAP_CEC, + } + }; +- +-static int __init init_rc_map_cec(void) +-{ +- return rc_map_register(&cec_map); +-} +- +-static void __exit exit_rc_map_cec(void) +-{ +- rc_map_unregister(&cec_map); +-} +- +-module_init(init_rc_map_cec); +-module_exit(exit_rc_map_cec); +- +-MODULE_LICENSE("GPL"); +-MODULE_AUTHOR("Kamil Debski"); +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c +index c4d7e06974d2c..ee80f38970bc4 100644 +--- a/drivers/media/rc/rc-main.c ++++ b/drivers/media/rc/rc-main.c +@@ -2033,6 +2033,9 @@ static int __init rc_core_init(void) + + led_trigger_register_simple("rc-feedback", &led_feedback); + rc_map_register(&empty_map); ++#ifdef CONFIG_MEDIA_CEC_RC ++ rc_map_register(&cec_map); ++#endif + + return 0; + } +@@ -2042,6 +2045,9 @@ static void __exit rc_core_exit(void) + lirc_dev_exit(); + class_unregister(&rc_class); + led_trigger_unregister_simple(led_feedback); ++#ifdef CONFIG_MEDIA_CEC_RC ++ rc_map_unregister(&cec_map); ++#endif + rc_map_unregister(&empty_map); + } + +diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c +index 6f108996142d7..bbfaec2e6ef61 100644 +--- a/drivers/media/usb/usbtv/usbtv-audio.c ++++ b/drivers/media/usb/usbtv/usbtv-audio.c +@@ -399,7 +399,7 @@ void usbtv_audio_free(struct usbtv *usbtv) + cancel_work_sync(&usbtv->snd_trigger); + + if (usbtv->snd && usbtv->udev) { +- snd_card_free(usbtv->snd); ++ snd_card_free_when_closed(usbtv->snd); + usbtv->snd = NULL; + } + } +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c +index 3a5d2890fe2aa..beaf15807f789 100644 +--- a/drivers/misc/fastrpc.c ++++ b/drivers/misc/fastrpc.c +@@ -924,6 +924,11 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, + if (!fl->cctx->rpdev) + return -EPIPE; + ++ if (handle == FASTRPC_INIT_HANDLE && !kernel) { ++ dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle); ++ return -EPERM; ++ } ++ + ctx = fastrpc_context_alloc(fl, kernel, sc, args); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); +diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c +index 95ff7c5a1dfb6..0a5e5b841aeb1 100644 +--- a/drivers/misc/pvpanic.c ++++ b/drivers/misc/pvpanic.c +@@ -166,6 +166,7 @@ static const struct of_device_id pvpanic_mmio_match[] = { + { .compatible = "qemu,pvpanic-mmio", }, + {} + }; ++MODULE_DEVICE_TABLE(of, pvpanic_mmio_match); + + static struct platform_driver pvpanic_mmio_driver = { + .driver = { +diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c +index 74de3f2dda38e..28501f165d457 100644 +--- a/drivers/mmc/core/bus.c ++++ b/drivers/mmc/core/bus.c +@@ -373,11 +373,6 @@ void mmc_remove_card(struct mmc_card *card) + mmc_remove_card_debugfs(card); + #endif + +- if (host->cqe_enabled) { +- host->cqe_ops->cqe_disable(host); +- host->cqe_enabled = false; +- } +- + if (mmc_card_present(card)) { + if (mmc_host_is_spi(card->host)) { + pr_info("%s: SPI card removed\n", +@@ -390,6 +385,10 @@ void mmc_remove_card(struct mmc_card *card) + of_node_put(card->dev.of_node); + } + ++ if (host->cqe_enabled) { ++ host->cqe_ops->cqe_disable(host); ++ host->cqe_enabled = false; ++ } ++ + put_device(&card->dev); + } +- +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c +index de14b5845f525..9f29288f2c9a9 100644 +--- a/drivers/mmc/core/mmc.c ++++ b/drivers/mmc/core/mmc.c +@@ -423,10 +423,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) + + /* EXT_CSD value is in units of 10ms, but we store in ms */ + card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; +- /* Some eMMC set the value too low so set a minimum */ +- if (card->ext_csd.part_time && +- card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) +- card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; + + /* Sleep / awake timeout in 100ns units */ + if (sa_shift > 0 && sa_shift <= 0x17) +@@ -616,6 +612,17 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) + card->ext_csd.data_sector_size = 512; + } + ++ /* ++ * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined ++ * when accessing a specific field", so use it here if there is no ++ * PARTITION_SWITCH_TIME. ++ */ ++ if (!card->ext_csd.part_time) ++ card->ext_csd.part_time = card->ext_csd.generic_cmd6_time; ++ /* Some eMMC set the value too low so set a minimum */ ++ if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) ++ card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; ++ + /* eMMC v5 or later */ + if (card->ext_csd.rev >= 7) { + memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION], +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c +index 9d47a2bd2546b..1254a5650cfff 100644 +--- a/drivers/mmc/host/mtk-sd.c ++++ b/drivers/mmc/host/mtk-sd.c +@@ -1020,13 +1020,13 @@ static void msdc_track_cmd_data(struct msdc_host *host, + static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) + { + unsigned long flags; +- bool ret; + +- ret = cancel_delayed_work(&host->req_timeout); +- if (!ret) { +- /* delay work already running */ +- return; +- } ++ /* ++ * No need check the return value of cancel_delayed_work, as only ONE ++ * path will go here! ++ */ ++ cancel_delayed_work(&host->req_timeout); ++ + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + spin_unlock_irqrestore(&host->lock, flags); +@@ -1046,7 +1046,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, + bool done = false; + bool sbc_error; + unsigned long flags; +- u32 *rsp = cmd->resp; ++ u32 *rsp; + + if (mrq->sbc && cmd == mrq->cmd && + (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR +@@ -1067,6 +1067,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, + + if (done) + return true; ++ rsp = cmd->resp; + + sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask); + +@@ -1254,7 +1255,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, + static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data) + { +- struct mmc_command *stop = data->stop; ++ struct mmc_command *stop; + unsigned long flags; + bool done; + unsigned int check_data = events & +@@ -1270,6 +1271,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, + + if (done) + return true; ++ stop = data->stop; + + if (check_data || (stop && stop->error)) { + dev_dbg(host->dev, "DMA status: 0x%8X\n", +diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c +index 4031217d21c37..52054931c3507 100644 +--- a/drivers/mmc/host/mxs-mmc.c ++++ b/drivers/mmc/host/mxs-mmc.c +@@ -644,7 +644,7 @@ static int mxs_mmc_probe(struct platform_device *pdev) + + ret = mmc_of_parse(mmc); + if (ret) +- goto out_clk_disable; ++ goto out_free_dma; + + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c +index 1bd955e4c7d66..b6d00dfa8b8f6 100644 +--- a/drivers/net/can/flexcan.c ++++ b/drivers/net/can/flexcan.c +@@ -548,7 +548,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv) + u32 reg; + + reg = priv->read(®s->mcr); +- reg |= FLEXCAN_MCR_HALT; ++ reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT; + priv->write(reg, ®s->mcr); + + while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) +@@ -1057,10 +1057,13 @@ static int flexcan_chip_start(struct net_device *dev) + + flexcan_set_bittiming(dev); + ++ /* set freeze, halt */ ++ err = flexcan_chip_freeze(priv); ++ if (err) ++ goto out_chip_disable; ++ + /* MCR + * +- * enable freeze +- * halt now + * only supervisor access + * enable warning int + * enable individual RX masking +@@ -1069,9 +1072,8 @@ static int flexcan_chip_start(struct net_device *dev) + */ + reg_mcr = priv->read(®s->mcr); + reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); +- reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | +- FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C | +- FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); ++ reg_mcr |= FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | ++ FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + + /* MCR + * +@@ -1432,10 +1434,14 @@ static int register_flexcandev(struct net_device *dev) + if (err) + goto out_chip_disable; + +- /* set freeze, halt and activate FIFO, restrict register access */ ++ /* set freeze, halt */ ++ err = flexcan_chip_freeze(priv); ++ if (err) ++ goto out_chip_disable; ++ ++ /* activate FIFO, restrict register access */ + reg = priv->read(®s->mcr); +- reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | +- FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; ++ reg |= FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; + priv->write(reg, ®s->mcr); + + /* Currently we only support newer versions of this core +diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c +index 1f8710b35c6d7..32cb479fe6ac8 100644 +--- a/drivers/net/can/m_can/tcan4x5x.c ++++ b/drivers/net/can/m_can/tcan4x5x.c +@@ -325,14 +325,14 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) + if (ret) + return ret; + ++ /* Zero out the MCAN buffers */ ++ m_can_init_ram(cdev); ++ + ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL); + if (ret) + return ret; + +- /* Zero out the MCAN buffers */ +- m_can_init_ram(cdev); +- + return ret; + } + +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c +index aa693c8e285ab..bde8ec75ac4e9 100644 +--- a/drivers/net/ethernet/atheros/alx/main.c ++++ b/drivers/net/ethernet/atheros/alx/main.c +@@ -1897,13 +1897,16 @@ static int alx_resume(struct device *dev) + + if (!netif_running(alx->dev)) + return 0; +- netif_device_attach(alx->dev); + + rtnl_lock(); + err = __alx_open(alx, true); + rtnl_unlock(); ++ if (err) ++ return err; + +- return err; ++ netif_device_attach(alx->dev); ++ ++ return 0; + } + + static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 4ae49d92c1eed..5a7831a97a132 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -7925,10 +7925,18 @@ static void bnxt_setup_inta(struct bnxt *bp) + bp->irq_tbl[0].handler = bnxt_inta; + } + ++static int bnxt_init_int_mode(struct bnxt *bp); ++ + static int bnxt_setup_int_mode(struct bnxt *bp) + { + int rc; + ++ if (!bp->irq_tbl) { ++ rc = bnxt_init_int_mode(bp); ++ if (rc || !bp->irq_tbl) ++ return rc ?: -ENODEV; ++ } ++ + if (bp->flags & BNXT_FLAG_USING_MSIX) + bnxt_setup_msix(bp); + else +@@ -8113,7 +8121,7 @@ static int bnxt_init_inta(struct bnxt *bp) + + static int bnxt_init_int_mode(struct bnxt *bp) + { +- int rc = 0; ++ int rc = -ENODEV; + + if (bp->flags & BNXT_FLAG_MSIX_CAP) + rc = bnxt_init_msix(bp); +@@ -8748,7 +8756,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) + { + struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_drv_if_change_input req = {0}; +- bool resc_reinit = false, fw_reset = false; ++ bool fw_reset = !bp->irq_tbl; ++ bool resc_reinit = false; + u32 flags = 0; + int rc; + +@@ -8776,6 +8785,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { + netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); ++ set_bit(BNXT_STATE_ABORT_ERR, &bp->state); + return -ENODEV; + } + if (resc_reinit || fw_reset) { +diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c +index 70060c51854fd..0928bec79fe4b 100644 +--- a/drivers/net/ethernet/davicom/dm9000.c ++++ b/drivers/net/ethernet/davicom/dm9000.c +@@ -134,6 +134,8 @@ struct board_info { + u32 wake_state; + + int ip_summed; ++ ++ struct regulator *power_supply; + }; + + /* debug code */ +@@ -1454,7 +1456,7 @@ dm9000_probe(struct platform_device *pdev) + if (ret) { + dev_err(dev, "failed to request reset gpio %d: %d\n", + reset_gpios, ret); +- return -ENODEV; ++ goto out_regulator_disable; + } + + /* According to manual PWRST# Low Period Min 1ms */ +@@ -1466,8 +1468,10 @@ dm9000_probe(struct platform_device *pdev) + + if (!pdata) { + pdata = dm9000_parse_dt(&pdev->dev); +- if (IS_ERR(pdata)) +- return PTR_ERR(pdata); ++ if (IS_ERR(pdata)) { ++ ret = PTR_ERR(pdata); ++ goto out_regulator_disable; ++ } + } + + /* Init network device */ +@@ -1484,6 +1488,8 @@ dm9000_probe(struct platform_device *pdev) + + db->dev = &pdev->dev; + db->ndev = ndev; ++ if (!IS_ERR(power)) ++ db->power_supply = power; + + spin_lock_init(&db->lock); + mutex_init(&db->addr_lock); +@@ -1708,6 +1714,10 @@ out: + dm9000_release_board(pdev, db); + free_netdev(ndev); + ++out_regulator_disable: ++ if (!IS_ERR(power)) ++ regulator_disable(power); ++ + return ret; + } + +@@ -1765,10 +1775,13 @@ static int + dm9000_drv_remove(struct platform_device *pdev) + { + struct net_device *ndev = platform_get_drvdata(pdev); ++ struct board_info *dm = to_dm9000_board(ndev); + + unregister_netdev(ndev); +- dm9000_release_board(pdev, netdev_priv(ndev)); ++ dm9000_release_board(pdev, dm); + free_netdev(ndev); /* free device structure */ ++ if (dm->power_supply) ++ regulator_disable(dm->power_supply); + + dev_dbg(&pdev->dev, "released and freed device\n"); + return 0; +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c +index 4ef4d41b0d8d6..b77eaf31bd4ed 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c +@@ -942,7 +942,7 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) + enetc_free_tx_ring(priv->tx_ring[i]); + } + +-static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) ++int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) + { + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + +@@ -963,7 +963,7 @@ static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) + return 0; + } + +-static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) ++void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) + { + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + +@@ -971,7 +971,7 @@ static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) + cbdr->bd_base = NULL; + } + +-static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) ++void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) + { + /* set CBDR cache attributes */ + enetc_wr(hw, ENETC_SICAR2, +@@ -991,7 +991,7 @@ static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) + cbdr->cir = hw->reg + ENETC_SICBDRCIR; + } + +-static void enetc_clear_cbdr(struct enetc_hw *hw) ++void enetc_clear_cbdr(struct enetc_hw *hw) + { + enetc_wr(hw, ENETC_SICBDRMR, 0); + } +@@ -1016,13 +1016,12 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) + return 0; + } + +-static int enetc_configure_si(struct enetc_ndev_priv *priv) ++int enetc_configure_si(struct enetc_ndev_priv *priv) + { + struct enetc_si *si = priv->si; + struct enetc_hw *hw = &si->hw; + int err; + +- enetc_setup_cbdr(hw, &si->cbd_ring); + /* set SI cache attributes */ + enetc_wr(hw, ENETC_SICAR0, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); +@@ -1068,6 +1067,8 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) + if (err) + return err; + ++ enetc_setup_cbdr(&si->hw, &si->cbd_ring); ++ + priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), + GFP_KERNEL); + if (!priv->cls_rules) { +@@ -1075,14 +1076,8 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) + goto err_alloc_cls; + } + +- err = enetc_configure_si(priv); +- if (err) +- goto err_config_si; +- + return 0; + +-err_config_si: +- kfree(priv->cls_rules); + err_alloc_cls: + enetc_clear_cbdr(&si->hw); + enetc_free_cbdr(priv->dev, &si->cbd_ring); +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h +index 541b4e2073fe3..b8801a2b6a025 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc.h ++++ b/drivers/net/ethernet/freescale/enetc/enetc.h +@@ -221,6 +221,7 @@ void enetc_get_si_caps(struct enetc_si *si); + void enetc_init_si_rings_params(struct enetc_ndev_priv *priv); + int enetc_alloc_si_resources(struct enetc_ndev_priv *priv); + void enetc_free_si_resources(struct enetc_ndev_priv *priv); ++int enetc_configure_si(struct enetc_ndev_priv *priv); + + int enetc_open(struct net_device *ndev); + int enetc_close(struct net_device *ndev); +@@ -236,6 +237,10 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void enetc_set_ethtool_ops(struct net_device *ndev); + + /* control buffer descriptor ring (CBDR) */ ++int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr); ++void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr); ++void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr); ++void enetc_clear_cbdr(struct enetc_hw *hw); + int enetc_set_mac_flt_entry(struct enetc_si *si, int index, + char *mac_addr, int si_map); + int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c +index 22f70638a4055..ac62464e0416a 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c +@@ -854,6 +854,26 @@ static int enetc_init_port_rss_memory(struct enetc_si *si) + return err; + } + ++static void enetc_init_unused_port(struct enetc_si *si) ++{ ++ struct device *dev = &si->pdev->dev; ++ struct enetc_hw *hw = &si->hw; ++ int err; ++ ++ si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; ++ err = enetc_alloc_cbdr(dev, &si->cbd_ring); ++ if (err) ++ return; ++ ++ enetc_setup_cbdr(hw, &si->cbd_ring); ++ ++ enetc_init_port_rfs_memory(si); ++ enetc_init_port_rss_memory(si); ++ ++ enetc_clear_cbdr(hw); ++ enetc_free_cbdr(dev, &si->cbd_ring); ++} ++ + static int enetc_pf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) + { +@@ -863,11 +883,6 @@ static int enetc_pf_probe(struct pci_dev *pdev, + struct enetc_pf *pf; + int err; + +- if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { +- dev_info(&pdev->dev, "device is disabled, skipping\n"); +- return -ENODEV; +- } +- + err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); + if (err) { + dev_err(&pdev->dev, "PCI probing failed\n"); +@@ -881,6 +896,13 @@ static int enetc_pf_probe(struct pci_dev *pdev, + goto err_map_pf_space; + } + ++ if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { ++ enetc_init_unused_port(si); ++ dev_info(&pdev->dev, "device is disabled, skipping\n"); ++ err = -ENODEV; ++ goto err_device_disabled; ++ } ++ + pf = enetc_si_priv(si); + pf->si = si; + pf->total_vfs = pci_sriov_get_totalvfs(pdev); +@@ -920,6 +942,12 @@ static int enetc_pf_probe(struct pci_dev *pdev, + goto err_init_port_rss; + } + ++ err = enetc_configure_si(priv); ++ if (err) { ++ dev_err(&pdev->dev, "Failed to configure SI\n"); ++ goto err_config_si; ++ } ++ + err = enetc_alloc_msix(priv); + if (err) { + dev_err(&pdev->dev, "MSIX alloc failed\n"); +@@ -945,6 +973,7 @@ err_reg_netdev: + enetc_mdio_remove(pf); + enetc_of_put_phy(priv); + enetc_free_msix(priv); ++err_config_si: + err_init_port_rss: + err_init_port_rfs: + err_alloc_msix: +@@ -953,6 +982,7 @@ err_alloc_si_res: + si->ndev = NULL; + free_netdev(ndev); + err_alloc_netdev: ++err_device_disabled: + err_map_pf_space: + enetc_pci_remove(pdev); + +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c +index ebd21bf4cfa1e..3a8c2049b417c 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c +@@ -189,6 +189,12 @@ static int enetc_vf_probe(struct pci_dev *pdev, + goto err_alloc_si_res; + } + ++ err = enetc_configure_si(priv); ++ if (err) { ++ dev_err(&pdev->dev, "Failed to configure SI\n"); ++ goto err_config_si; ++ } ++ + err = enetc_alloc_msix(priv); + if (err) { + dev_err(&pdev->dev, "MSIX alloc failed\n"); +@@ -208,6 +214,7 @@ static int enetc_vf_probe(struct pci_dev *pdev, + + err_reg_netdev: + enetc_free_msix(priv); ++err_config_si: + err_alloc_msix: + enetc_free_si_resources(priv); + err_alloc_si_res: +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +index 1426eb5ddf3df..e34e0854635c3 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +@@ -1018,16 +1018,16 @@ struct hclge_fd_tcam_config_3_cmd { + #define HCLGE_FD_AD_DROP_B 0 + #define HCLGE_FD_AD_DIRECT_QID_B 1 + #define HCLGE_FD_AD_QID_S 2 +-#define HCLGE_FD_AD_QID_M GENMASK(12, 2) ++#define HCLGE_FD_AD_QID_M GENMASK(11, 2) + #define HCLGE_FD_AD_USE_COUNTER_B 12 + #define HCLGE_FD_AD_COUNTER_NUM_S 13 + #define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13) + #define HCLGE_FD_AD_NXT_STEP_B 20 + #define HCLGE_FD_AD_NXT_KEY_S 21 +-#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21) ++#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21) + #define HCLGE_FD_AD_WR_RULE_ID_B 0 + #define HCLGE_FD_AD_RULE_ID_S 1 +-#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1) ++#define HCLGE_FD_AD_RULE_ID_M GENMASK(12, 1) + + struct hclge_fd_ad_config_cmd { + u8 stage; +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +index 08040cafc06bc..93f3865b679bf 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +@@ -4908,9 +4908,9 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, + case BIT(INNER_SRC_MAC): + for (i = 0; i < ETH_ALEN; i++) { + calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], +- rule->tuples.src_mac[i]); ++ rule->tuples_mask.src_mac[i]); + calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], +- rule->tuples.src_mac[i]); ++ rule->tuples_mask.src_mac[i]); + } + + return true; +@@ -5939,8 +5939,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, + fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); + fs->m_ext.vlan_tci = + rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? +- cpu_to_be16(VLAN_VID_MASK) : +- cpu_to_be16(rule->tuples_mask.vlan_tag1); ++ 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); + } + + if (fs->flow_type & FLOW_MAC_EXT) { +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 309cdc5ebc1ff..79b13750fa2d2 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -1753,10 +1753,9 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + +- if (adapter->state != VNIC_PROBED) { +- ether_addr_copy(adapter->mac_addr, addr->sa_data); ++ ether_addr_copy(adapter->mac_addr, addr->sa_data); ++ if (adapter->state != VNIC_PROBED) + rc = __ibmvnic_set_mac(netdev, addr->sa_data); +- } + + return rc; + } +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index 0604b5aaad86f..58211590229b1 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -15142,6 +15142,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (err) { + dev_info(&pdev->dev, + "setup of misc vector failed: %d\n", err); ++ i40e_cloud_filter_exit(pf); ++ i40e_fdir_teardown(pf); + goto err_vsis; + } + } +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +index 113f6087c7c9a..b14b164c9601f 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +@@ -575,6 +575,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) + return -EINVAL; + } + ++ if (xs->props.mode != XFRM_MODE_TRANSPORT) { ++ netdev_err(dev, "Unsupported mode for ipsec offload\n"); ++ return -EINVAL; ++ } ++ + if (ixgbe_ipsec_check_mgmt_ip(xs)) { + netdev_err(dev, "IPsec IP addr clash with mgmt filters\n"); + return -EINVAL; +diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c +index 5170dd9d8705b..caaea2c920a6e 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c ++++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c +@@ -272,6 +272,11 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) + return -EINVAL; + } + ++ if (xs->props.mode != XFRM_MODE_TRANSPORT) { ++ netdev_err(dev, "Unsupported mode for ipsec offload\n"); ++ return -EINVAL; ++ } ++ + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + struct rx_sa rsa; + +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +index a1202e53710cd..5582fba2f5823 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +@@ -47,7 +47,7 @@ + #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) + #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) + +-static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) ++int mlx4_en_moderation_update(struct mlx4_en_priv *priv) + { + int i, t; + int err = 0; +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +index b5eb116249dda..cd165e52ed33c 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +@@ -3657,6 +3657,8 @@ int mlx4_en_reset_config(struct net_device *dev, + en_err(priv, "Failed starting port\n"); + } + ++ if (!err) ++ err = mlx4_en_moderation_update(priv); + out: + mutex_unlock(&mdev->state_lock); + kfree(tmp); +diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +index a2f69c6f0c79f..17a5bd4c68b2b 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +@@ -797,6 +797,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); + #define DEV_FEATURE_CHANGED(dev, new_features, feature) \ + ((dev->features & feature) ^ (new_features & feature)) + ++int mlx4_en_moderation_update(struct mlx4_en_priv *priv); + int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t new_features); +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c +index 18f86e441570c..a042f4607b0d0 100644 +--- a/drivers/net/ethernet/renesas/sh_eth.c ++++ b/drivers/net/ethernet/renesas/sh_eth.c +@@ -610,6 +610,8 @@ static struct sh_eth_cpu_data r7s72100_data = { + EESR_TDE, + .fdr_value = 0x0000070f, + ++ .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, ++ + .no_psr = 1, + .apr = 1, + .mpr = 1, +@@ -828,6 +830,8 @@ static struct sh_eth_cpu_data r7s9210_data = { + + .fdr_value = 0x0000070f, + ++ .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, ++ + .apr = 1, + .mpr = 1, + .tpauser = 1, +@@ -1131,6 +1135,9 @@ static struct sh_eth_cpu_data sh771x_data = { + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, ++ ++ .trscer_err_mask = DESC_I_RINT8, ++ + .tsu = 1, + .dual_port = 1, + }; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +index 68c157979b947..a41ac13cc4e55 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +@@ -116,6 +116,23 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr, + ioaddr + DMA_CHAN_INTR_ENA(chan)); + } + ++static void dwmac410_dma_init_channel(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, u32 chan) ++{ ++ u32 value; ++ ++ /* common channel control register config */ ++ value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); ++ if (dma_cfg->pblx8) ++ value = value | DMA_BUS_MODE_PBL; ++ ++ writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); ++ ++ /* Mask interrupts by writing to CSR7 */ ++ writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, ++ ioaddr + DMA_CHAN_INTR_ENA(chan)); ++} ++ + static void dwmac4_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, int atds) + { +@@ -462,7 +479,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { + const struct stmmac_dma_ops dwmac410_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, +- .init_chan = dwmac4_dma_init_channel, ++ .init_chan = dwmac410_dma_init_channel, + .init_rx_chan = dwmac4_dma_init_rx_chan, + .init_tx_chan = dwmac4_dma_init_tx_chan, + .axi = dwmac4_dma_axi, +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +index f2a29a90e0854..afdea015f4b45 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +@@ -60,10 +60,6 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan) + + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); +- +- value = readl(ioaddr + GMAC_CONFIG); +- value &= ~GMAC_CONFIG_TE; +- writel(value, ioaddr + GMAC_CONFIG); + } + + void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan) +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index ce5d3e9e5dff4..8e7c60e02fa09 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -4821,6 +4821,8 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv) + tx_q->cur_tx = 0; + tx_q->dirty_tx = 0; + tx_q->mss = 0; ++ ++ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); + } + } + +diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c +index a6bbe93f29ef6..917f37c176302 100644 +--- a/drivers/net/netdevsim/netdev.c ++++ b/drivers/net/netdevsim/netdev.c +@@ -292,6 +292,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) + + ns = netdev_priv(dev); + ns->netdev = dev; ++ u64_stats_init(&ns->syncp); + ns->nsim_dev = nsim_dev; + ns->nsim_dev_port = nsim_dev_port; + ns->nsim_bus_dev = nsim_dev->nsim_bus_dev; +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index b718b11607fcd..b0b8a3ce82b68 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -345,15 +345,16 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, + + phydev->autoneg = autoneg; + +- phydev->speed = speed; ++ if (autoneg == AUTONEG_DISABLE) { ++ phydev->speed = speed; ++ phydev->duplex = duplex; ++ } + + linkmode_copy(phydev->advertising, advertising); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->advertising, autoneg == AUTONEG_ENABLE); + +- phydev->duplex = duplex; +- + phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; + + /* Restart the PHY */ +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 05b85b94d9518..6508d70056b3a 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -441,13 +441,6 @@ static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, c + goto err; + } + +- /* we don't want to modify a running netdev */ +- if (netif_running(dev->net)) { +- netdev_err(dev->net, "Cannot change a running device\n"); +- ret = -EBUSY; +- goto err; +- } +- + ret = qmimux_register_device(dev->net, mux_id); + if (!ret) { + info->flags |= QMI_WWAN_FLAG_MUX; +@@ -477,13 +470,6 @@ static ssize_t del_mux_store(struct device *d, struct device_attribute *attr, c + if (!rtnl_trylock()) + return restart_syscall(); + +- /* we don't want to modify a running netdev */ +- if (netif_running(dev->net)) { +- netdev_err(dev->net, "Cannot change a running device\n"); +- ret = -EBUSY; +- goto err; +- } +- + del_dev = qmimux_find_dev(dev, mux_id); + if (!del_dev) { + netdev_err(dev->net, "mux_id not present\n"); +diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c +index 709e3de0f6af1..60f357d2f79fa 100644 +--- a/drivers/net/wan/lapbether.c ++++ b/drivers/net/wan/lapbether.c +@@ -283,7 +283,6 @@ static int lapbeth_open(struct net_device *dev) + return -ENODEV; + } + +- netif_start_queue(dev); + return 0; + } + +@@ -291,8 +290,6 @@ static int lapbeth_close(struct net_device *dev) + { + int err; + +- netif_stop_queue(dev); +- + if ((err = lapb_unregister(dev)) != LAPB_OK) + pr_err("lapb_unregister error: %d\n", err); + +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h +index a412b352182c8..d50022d264642 100644 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h +@@ -177,7 +177,8 @@ struct ath_frame_info { + s8 txq; + u8 keyix; + u8 rtscts_rate; +- u8 retries : 7; ++ u8 retries : 6; ++ u8 dyn_smps : 1; + u8 baw_tracked : 1; + u8 tx_power; + enum ath9k_key_type keytype:2; +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c +index 31e7b108279c6..14e6871a14054 100644 +--- a/drivers/net/wireless/ath/ath9k/xmit.c ++++ b/drivers/net/wireless/ath/ath9k/xmit.c +@@ -1271,6 +1271,11 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, + is_40, is_sgi, is_sp); + if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) + info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; ++ if (rix >= 8 && fi->dyn_smps) { ++ info->rates[i].RateFlags |= ++ ATH9K_RATESERIES_RTS_CTS; ++ info->flags |= ATH9K_TXDESC_CTSENA; ++ } + + info->txpower[i] = ath_get_rate_txpower(sc, bf, rix, + is_40, false); +@@ -2111,6 +2116,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, + fi->keyix = an->ps_key; + else + fi->keyix = ATH9K_TXKEYIX_INVALID; ++ fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC; + fi->keytype = keytype; + fi->framelen = framelen; + fi->tx_power = txpower; +diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c +index 781952b686ed2..d3efcbd48ee1e 100644 +--- a/drivers/net/wireless/mediatek/mt76/dma.c ++++ b/drivers/net/wireless/mediatek/mt76/dma.c +@@ -454,13 +454,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, + { + struct sk_buff *skb = q->rx_head; + struct skb_shared_info *shinfo = skb_shinfo(skb); ++ int nr_frags = shinfo->nr_frags; + +- if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { ++ if (nr_frags < ARRAY_SIZE(shinfo->frags)) { + struct page *page = virt_to_head_page(data); + int offset = data - page_address(page) + q->buf_offset; + +- skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, +- q->buf_size); ++ skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); + } else { + skb_free_frag(data); + } +@@ -469,7 +469,10 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, + return; + + q->rx_head = NULL; +- dev->drv->rx_skb(dev, q - dev->q_rx, skb); ++ if (nr_frags < ARRAY_SIZE(shinfo->frags)) ++ dev->drv->rx_skb(dev, q - dev->q_rx, skb); ++ else ++ dev_kfree_skb(skb); + } + + static int +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index 95d77a17375e1..e4e24e7bf4628 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -455,7 +455,6 @@ static void nvme_free_ns_head(struct kref *ref) + + nvme_mpath_remove_disk(head); + ida_simple_remove(&head->subsys->ns_ida, head->instance); +- list_del_init(&head->entry); + cleanup_srcu_struct(&head->srcu); + nvme_put_subsystem(head->subsys); + kfree(head); +@@ -3374,7 +3373,6 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys, + + list_for_each_entry(h, &subsys->nsheads, entry) { + if (nvme_ns_ids_valid(&new->ids) && +- !list_empty(&h->list) && + nvme_ns_ids_equal(&new->ids, &h->ids)) + return -EINVAL; + } +@@ -3469,6 +3467,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, + "IDs don't match for shared namespace %d\n", + nsid); + ret = -EINVAL; ++ nvme_put_ns_head(head); + goto out_unlock; + } + } +@@ -3629,6 +3628,8 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) + out_unlink_ns: + mutex_lock(&ctrl->subsys->lock); + list_del_rcu(&ns->siblings); ++ if (list_empty(&ns->head->list)) ++ list_del_init(&ns->head->entry); + mutex_unlock(&ctrl->subsys->lock); + nvme_put_ns_head(ns->head); + out_free_id: +@@ -3651,7 +3652,10 @@ static void nvme_ns_remove(struct nvme_ns *ns) + + mutex_lock(&ns->ctrl->subsys->lock); + list_del_rcu(&ns->siblings); ++ if (list_empty(&ns->head->list)) ++ list_del_init(&ns->head->entry); + mutex_unlock(&ns->ctrl->subsys->lock); ++ + synchronize_rcu(); /* guarantee not available in head->list */ + nvme_mpath_clear_current_path(ns); + synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */ +diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c +index f4c02da84e599..0bfa5065b4405 100644 +--- a/drivers/pci/controller/pci-xgene-msi.c ++++ b/drivers/pci/controller/pci-xgene-msi.c +@@ -384,13 +384,9 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu) + if (!msi_group->gic_irq) + continue; + +- irq_set_chained_handler(msi_group->gic_irq, +- xgene_msi_isr); +- err = irq_set_handler_data(msi_group->gic_irq, msi_group); +- if (err) { +- pr_err("failed to register GIC IRQ handler\n"); +- return -EINVAL; +- } ++ irq_set_chained_handler_and_data(msi_group->gic_irq, ++ xgene_msi_isr, msi_group); ++ + /* + * Statically allocate MSI GIC IRQs to each CPU core. + * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated +diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c +index 626a7c352dfdf..728a59655825d 100644 +--- a/drivers/pci/controller/pcie-mediatek.c ++++ b/drivers/pci/controller/pcie-mediatek.c +@@ -1063,14 +1063,14 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) + err = of_pci_get_devfn(child); + if (err < 0) { + dev_err(dev, "failed to parse devfn: %d\n", err); +- return err; ++ goto error_put_node; + } + + slot = PCI_SLOT(err); + + err = mtk_pcie_parse_port(pcie, child, slot); + if (err) +- return err; ++ goto error_put_node; + } + + err = mtk_pcie_subsys_powerup(pcie); +@@ -1086,6 +1086,9 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) + mtk_pcie_subsys_powerdown(pcie); + + return 0; ++error_put_node: ++ of_node_put(child); ++ return err; + } + + static int mtk_pcie_probe(struct platform_device *pdev) +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 9add26438be50..3c3bc9f584983 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -3903,6 +3903,10 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, + ret = logic_pio_register_range(range); + if (ret) + kfree(range); ++ ++ /* Ignore duplicates due to deferred probing */ ++ if (ret == -EEXIST) ++ ret = 0; + #endif + + return ret; +diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c +index f64b82824db28..2db7113383fdc 100644 +--- a/drivers/platform/olpc/olpc-ec.c ++++ b/drivers/platform/olpc/olpc-ec.c +@@ -426,11 +426,8 @@ static int olpc_ec_probe(struct platform_device *pdev) + + /* get the EC revision */ + err = olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, &ec->version, 1); +- if (err) { +- ec_priv = NULL; +- kfree(ec); +- return err; +- } ++ if (err) ++ goto error; + + config.dev = pdev->dev.parent; + config.driver_data = ec; +@@ -440,12 +437,16 @@ static int olpc_ec_probe(struct platform_device *pdev) + if (IS_ERR(ec->dcon_rdev)) { + dev_err(&pdev->dev, "failed to register DCON regulator\n"); + err = PTR_ERR(ec->dcon_rdev); +- kfree(ec); +- return err; ++ goto error; + } + + ec->dbgfs_dir = olpc_ec_setup_debugfs(); + ++ return 0; ++ ++error: ++ ec_priv = NULL; ++ kfree(ec); + return err; + } + +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c +index a0ebc2e603949..b577c8f7e3462 100644 +--- a/drivers/s390/block/dasd.c ++++ b/drivers/s390/block/dasd.c +@@ -3087,7 +3087,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, + + basedev = block->base; + spin_lock_irq(&dq->lock); +- if (basedev->state < DASD_STATE_READY) { ++ if (basedev->state < DASD_STATE_READY || ++ test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) { + DBF_DEV_EVENT(DBF_ERR, basedev, + "device not ready for request %p", req); + rc = BLK_STS_IOERR; +@@ -3522,8 +3523,6 @@ void dasd_generic_remove(struct ccw_device *cdev) + struct dasd_device *device; + struct dasd_block *block; + +- cdev->handler = NULL; +- + device = dasd_device_from_cdev(cdev); + if (IS_ERR(device)) { + dasd_remove_sysfs_files(cdev); +@@ -3542,6 +3541,7 @@ void dasd_generic_remove(struct ccw_device *cdev) + * no quite down yet. + */ + dasd_set_target_state(device, DASD_STATE_NEW); ++ cdev->handler = NULL; + /* dasd_delete_device destroys the device reference. */ + block = device->block; + dasd_delete_device(device); +diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c +index f0d71ab77c50e..15df0a5c03ecb 100644 +--- a/drivers/s390/cio/vfio_ccw_ops.c ++++ b/drivers/s390/cio/vfio_ccw_ops.c +@@ -506,7 +506,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, + if (ret) + return ret; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; + } + case VFIO_DEVICE_GET_REGION_INFO: + { +@@ -524,7 +524,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, + if (ret) + return ret; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; + } + case VFIO_DEVICE_GET_IRQ_INFO: + { +@@ -545,7 +545,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, + if (info.count == -1) + return -EINVAL; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; + } + case VFIO_DEVICE_SET_IRQS: + { +diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c +index 790b0b2b36272..1ec01148018f1 100644 +--- a/drivers/s390/crypto/vfio_ap_ops.c ++++ b/drivers/s390/crypto/vfio_ap_ops.c +@@ -1279,7 +1279,7 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg) + info.num_regions = 0; + info.num_irqs = 0; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; + } + + static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index a14057c67a12a..c5b7d18513b66 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -1532,14 +1532,9 @@ check_mgmt: + } + rc = iscsi_prep_scsi_cmd_pdu(conn->task); + if (rc) { +- if (rc == -ENOMEM || rc == -EACCES) { +- spin_lock_bh(&conn->taskqueuelock); +- list_add_tail(&conn->task->running, +- &conn->cmdqueue); +- conn->task = NULL; +- spin_unlock_bh(&conn->taskqueuelock); +- goto done; +- } else ++ if (rc == -ENOMEM || rc == -EACCES) ++ fail_scsi_task(conn->task, DID_IMM_RETRY); ++ else + fail_scsi_task(conn->task, DID_ABORT); + spin_lock_bh(&conn->taskqueuelock); + continue; +diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c +index 8622cf9d3f640..9e7a6de3c43d1 100644 +--- a/drivers/spi/spi-stm32.c ++++ b/drivers/spi/spi-stm32.c +@@ -924,8 +924,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) + mask |= STM32H7_SPI_SR_RXP; + + if (!(sr & mask)) { +- dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", +- sr, ier); ++ dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", ++ sr, ier); + spin_unlock_irqrestore(&spi->lock, flags); + return IRQ_NONE; + } +@@ -952,15 +952,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) + } + + if (sr & STM32H7_SPI_SR_OVR) { +- dev_warn(spi->dev, "Overrun: received value discarded\n"); +- if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) +- stm32h7_spi_read_rxfifo(spi, false); +- /* +- * If overrun is detected while using DMA, it means that +- * something went wrong, so stop the current transfer +- */ +- if (spi->cur_usedma) +- end = true; ++ dev_err(spi->dev, "Overrun: RX data lost\n"); ++ end = true; + } + + if (sr & STM32H7_SPI_SR_EOT) { +diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c +index e035c9f757a1c..2176d3289eff5 100644 +--- a/drivers/staging/comedi/drivers/addi_apci_1032.c ++++ b/drivers/staging/comedi/drivers/addi_apci_1032.c +@@ -260,6 +260,7 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) + struct apci1032_private *devpriv = dev->private; + struct comedi_subdevice *s = dev->read_subdev; + unsigned int ctrl; ++ unsigned short val; + + /* check interrupt is from this device */ + if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) & +@@ -275,7 +276,8 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) + outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG); + + s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff; +- comedi_buf_write_samples(s, &s->state, 1); ++ val = s->state; ++ comedi_buf_write_samples(s, &val, 1); + comedi_handle_events(dev, s); + + /* enable the interrupt */ +diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c +index 816dd25b9d0e4..8c3eff7cf465c 100644 +--- a/drivers/staging/comedi/drivers/addi_apci_1500.c ++++ b/drivers/staging/comedi/drivers/addi_apci_1500.c +@@ -208,7 +208,7 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) + struct comedi_device *dev = d; + struct apci1500_private *devpriv = dev->private; + struct comedi_subdevice *s = dev->read_subdev; +- unsigned int status = 0; ++ unsigned short status = 0; + unsigned int val; + + val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR); +@@ -238,14 +238,14 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) + * + * Mask Meaning + * ---------- ------------------------------------------ +- * 0x00000001 Event 1 has occurred +- * 0x00000010 Event 2 has occurred +- * 0x00000100 Counter/timer 1 has run down (not implemented) +- * 0x00001000 Counter/timer 2 has run down (not implemented) +- * 0x00010000 Counter 3 has run down (not implemented) +- * 0x00100000 Watchdog has run down (not implemented) +- * 0x01000000 Voltage error +- * 0x10000000 Short-circuit error ++ * 0b00000001 Event 1 has occurred ++ * 0b00000010 Event 2 has occurred ++ * 0b00000100 Counter/timer 1 has run down (not implemented) ++ * 0b00001000 Counter/timer 2 has run down (not implemented) ++ * 0b00010000 Counter 3 has run down (not implemented) ++ * 0b00100000 Watchdog has run down (not implemented) ++ * 0b01000000 Voltage error ++ * 0b10000000 Short-circuit error + */ + comedi_buf_write_samples(s, &status, 1); + comedi_handle_events(dev, s); +diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c +index ddc0dc93d08b6..eca5fa8a9eb8e 100644 +--- a/drivers/staging/comedi/drivers/adv_pci1710.c ++++ b/drivers/staging/comedi/drivers/adv_pci1710.c +@@ -300,11 +300,11 @@ static int pci1710_ai_eoc(struct comedi_device *dev, + static int pci1710_ai_read_sample(struct comedi_device *dev, + struct comedi_subdevice *s, + unsigned int cur_chan, +- unsigned int *val) ++ unsigned short *val) + { + const struct boardtype *board = dev->board_ptr; + struct pci1710_private *devpriv = dev->private; +- unsigned int sample; ++ unsigned short sample; + unsigned int chan; + + sample = inw(dev->iobase + PCI171X_AD_DATA_REG); +@@ -345,7 +345,7 @@ static int pci1710_ai_insn_read(struct comedi_device *dev, + pci1710_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1); + + for (i = 0; i < insn->n; i++) { +- unsigned int val; ++ unsigned short val; + + /* start conversion */ + outw(0, dev->iobase + PCI171X_SOFTTRG_REG); +@@ -395,7 +395,7 @@ static void pci1710_handle_every_sample(struct comedi_device *dev, + { + struct comedi_cmd *cmd = &s->async->cmd; + unsigned int status; +- unsigned int val; ++ unsigned short val; + int ret; + + status = inw(dev->iobase + PCI171X_STATUS_REG); +@@ -455,7 +455,7 @@ static void pci1710_handle_fifo(struct comedi_device *dev, + } + + for (i = 0; i < devpriv->max_samples; i++) { +- unsigned int val; ++ unsigned short val; + int ret; + + ret = pci1710_ai_read_sample(dev, s, s->async->cur_chan, &val); +diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c +index f99211ec46deb..0034005bdf8f1 100644 +--- a/drivers/staging/comedi/drivers/das6402.c ++++ b/drivers/staging/comedi/drivers/das6402.c +@@ -186,7 +186,7 @@ static irqreturn_t das6402_interrupt(int irq, void *d) + if (status & DAS6402_STATUS_FFULL) { + async->events |= COMEDI_CB_OVERFLOW; + } else if (status & DAS6402_STATUS_FFNE) { +- unsigned int val; ++ unsigned short val; + + val = das6402_ai_read_sample(dev, s); + comedi_buf_write_samples(s, &val, 1); +diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c +index 8cf09ef3012fa..4bd8fd5218c8f 100644 +--- a/drivers/staging/comedi/drivers/das800.c ++++ b/drivers/staging/comedi/drivers/das800.c +@@ -427,7 +427,7 @@ static irqreturn_t das800_interrupt(int irq, void *d) + struct comedi_cmd *cmd; + unsigned long irq_flags; + unsigned int status; +- unsigned int val; ++ unsigned short val; + bool fifo_empty; + bool fifo_overflow; + int i; +diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c +index 75693cdde3138..c180d18ce517f 100644 +--- a/drivers/staging/comedi/drivers/dmm32at.c ++++ b/drivers/staging/comedi/drivers/dmm32at.c +@@ -404,7 +404,7 @@ static irqreturn_t dmm32at_isr(int irq, void *d) + { + struct comedi_device *dev = d; + unsigned char intstat; +- unsigned int val; ++ unsigned short val; + int i; + + if (!dev->attached) { +diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c +index ee53571a89698..ead8000b5929b 100644 +--- a/drivers/staging/comedi/drivers/me4000.c ++++ b/drivers/staging/comedi/drivers/me4000.c +@@ -924,7 +924,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id) + struct comedi_subdevice *s = dev->read_subdev; + int i; + int c = 0; +- unsigned int lval; ++ unsigned short lval; + + if (!dev->attached) + return IRQ_NONE; +diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c +index a5937206bf1cd..e9abae4180625 100644 +--- a/drivers/staging/comedi/drivers/pcl711.c ++++ b/drivers/staging/comedi/drivers/pcl711.c +@@ -184,7 +184,7 @@ static irqreturn_t pcl711_interrupt(int irq, void *d) + struct comedi_device *dev = d; + struct comedi_subdevice *s = dev->read_subdev; + struct comedi_cmd *cmd = &s->async->cmd; +- unsigned int data; ++ unsigned short data; + + if (!dev->attached) { + dev_err(dev->class_dev, "spurious interrupt\n"); +diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c +index 0af5315d43575..fc8afffc1815f 100644 +--- a/drivers/staging/comedi/drivers/pcl818.c ++++ b/drivers/staging/comedi/drivers/pcl818.c +@@ -423,7 +423,7 @@ static int pcl818_ai_eoc(struct comedi_device *dev, + + static bool pcl818_ai_write_sample(struct comedi_device *dev, + struct comedi_subdevice *s, +- unsigned int chan, unsigned int val) ++ unsigned int chan, unsigned short val) + { + struct pcl818_private *devpriv = dev->private; + struct comedi_cmd *cmd = &s->async->cmd; +diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c +index 3cffc8be66563..e61bd8e1d246f 100644 +--- a/drivers/staging/ks7010/ks_wlan_net.c ++++ b/drivers/staging/ks7010/ks_wlan_net.c +@@ -1120,6 +1120,7 @@ static int ks_wlan_set_scan(struct net_device *dev, + { + struct ks_wlan_private *priv = netdev_priv(dev); + struct iw_scan_req *req = NULL; ++ int len; + + if (priv->sleep_mode == SLP_SLEEP) + return -EPERM; +@@ -1129,8 +1130,9 @@ static int ks_wlan_set_scan(struct net_device *dev, + if (wrqu->data.length == sizeof(struct iw_scan_req) && + wrqu->data.flags & IW_SCAN_THIS_ESSID) { + req = (struct iw_scan_req *)extra; +- priv->scan_ssid_len = req->essid_len; +- memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len); ++ len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); ++ priv->scan_ssid_len = len; ++ memcpy(priv->scan_ssid, req->essid, len); + } else { + priv->scan_ssid_len = 0; + } +diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c +index 51a5b71f8c256..1c0300ce63369 100644 +--- a/drivers/staging/rtl8188eu/core/rtw_ap.c ++++ b/drivers/staging/rtl8188eu/core/rtw_ap.c +@@ -784,6 +784,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) + /* SSID */ + p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_)); + if (p && ie_len > 0) { ++ ie_len = min_t(int, ie_len, sizeof(pbss_network->ssid.ssid)); + memset(&pbss_network->ssid, 0, sizeof(struct ndis_802_11_ssid)); + memcpy(pbss_network->ssid.ssid, (p + 2), ie_len); + pbss_network->ssid.ssid_length = ie_len; +@@ -802,6 +803,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) + /* get supported rates */ + p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_)); + if (p) { ++ ie_len = min_t(int, ie_len, NDIS_802_11_LENGTH_RATES_EX); + memcpy(supportRate, p + 2, ie_len); + supportRateNum = ie_len; + } +@@ -809,6 +811,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) + /* get ext_supported rates */ + p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_); + if (p) { ++ ie_len = min_t(int, ie_len, ++ NDIS_802_11_LENGTH_RATES_EX - supportRateNum); + memcpy(supportRate + supportRateNum, p + 2, ie_len); + supportRateNum += ie_len; + } +@@ -922,6 +926,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) + + pht_cap->mcs.rx_mask[0] = 0xff; + pht_cap->mcs.rx_mask[1] = 0x0; ++ ie_len = min_t(int, ie_len, sizeof(pmlmepriv->htpriv.ht_cap)); + memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len); + } + +diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +index 630e7d933b104..7b83f0920f3c8 100644 +--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c ++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +@@ -1160,9 +1160,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, + break; + } + sec_len = *(pos++); len -= 1; +- if (sec_len > 0 && sec_len <= len) { ++ if (sec_len > 0 && ++ sec_len <= len && ++ sec_len <= 32) { + ssid[ssid_index].ssid_length = sec_len; +- memcpy(ssid[ssid_index].ssid, pos, ssid[ssid_index].ssid_length); ++ memcpy(ssid[ssid_index].ssid, pos, sec_len); + ssid_index++; + } + pos += sec_len; +diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +index 16bcee13f64b5..407effde5e71a 100644 +--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c ++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +@@ -406,9 +406,10 @@ static int _rtl92e_wx_set_scan(struct net_device *dev, + struct iw_scan_req *req = (struct iw_scan_req *)b; + + if (req->essid_len) { +- ieee->current_network.ssid_len = req->essid_len; +- memcpy(ieee->current_network.ssid, req->essid, +- req->essid_len); ++ int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); ++ ++ ieee->current_network.ssid_len = len; ++ memcpy(ieee->current_network.ssid, req->essid, len); + } + } + +diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c +index 5822bb7984b91..8f10672ade37e 100644 +--- a/drivers/staging/rtl8192u/r8192U_wx.c ++++ b/drivers/staging/rtl8192u/r8192U_wx.c +@@ -333,8 +333,10 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a, + struct iw_scan_req *req = (struct iw_scan_req *)b; + + if (req->essid_len) { +- ieee->current_network.ssid_len = req->essid_len; +- memcpy(ieee->current_network.ssid, req->essid, req->essid_len); ++ int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); ++ ++ ieee->current_network.ssid_len = len; ++ memcpy(ieee->current_network.ssid, req->essid, len); + } + } + +diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c +index 26b618008fcfe..d989229de88bd 100644 +--- a/drivers/staging/rtl8712/rtl871x_cmd.c ++++ b/drivers/staging/rtl8712/rtl871x_cmd.c +@@ -197,8 +197,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter, + psurveyPara->ss_ssidlen = 0; + memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1); + if ((pssid != NULL) && (pssid->SsidLength)) { +- memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength); +- psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength); ++ int len = min_t(int, pssid->SsidLength, IW_ESSID_MAX_SIZE); ++ ++ memcpy(psurveyPara->ss_ssid, pssid->Ssid, len); ++ psurveyPara->ss_ssidlen = cpu_to_le32(len); + } + set_fwstate(pmlmepriv, _FW_UNDER_SURVEY); + r8712_enqueue_cmd(pcmdpriv, ph2c); +diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +index 944336e0d2e2f..cff918d8bcb54 100644 +--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c ++++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +@@ -928,7 +928,7 @@ static int r871x_wx_set_priv(struct net_device *dev, + struct iw_point *dwrq = (struct iw_point *)awrq; + + len = dwrq->length; +- ext = memdup_user(dwrq->pointer, len); ++ ext = strndup_user(dwrq->pointer, len); + if (IS_ERR(ext)) + return PTR_ERR(ext); + +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c +index 5e931690e6979..51e690ab4d295 100644 +--- a/drivers/target/target_core_pr.c ++++ b/drivers/target/target_core_pr.c +@@ -3731,6 +3731,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) + spin_unlock(&dev->t10_pr.registration_lock); + + put_unaligned_be32(add_len, &buf[4]); ++ target_set_cmd_data_length(cmd, 8 + add_len); + + transport_kunmap_data_sg(cmd); + +@@ -3749,7 +3750,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) + struct t10_pr_registration *pr_reg; + unsigned char *buf; + u64 pr_res_key; +- u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ ++ u32 add_len = 0; + + if (cmd->data_length < 8) { + pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" +@@ -3767,8 +3768,9 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) + pr_reg = dev->dev_pr_res_holder; + if (pr_reg) { + /* +- * Set the hardcoded Additional Length ++ * Set the Additional Length to 16 when a reservation is held + */ ++ add_len = 16; + put_unaligned_be32(add_len, &buf[4]); + + if (cmd->data_length < 22) +@@ -3804,6 +3806,8 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) + (pr_reg->pr_res_type & 0x0f); + } + ++ target_set_cmd_data_length(cmd, 8 + add_len); ++ + err: + spin_unlock(&dev->dev_reservation_lock); + transport_kunmap_data_sg(cmd); +@@ -3822,7 +3826,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) + struct se_device *dev = cmd->se_dev; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + unsigned char *buf; +- u16 add_len = 8; /* Hardcoded to 8. */ ++ u16 len = 8; /* Hardcoded to 8. */ + + if (cmd->data_length < 6) { + pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" +@@ -3834,7 +3838,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + +- put_unaligned_be16(add_len, &buf[0]); ++ put_unaligned_be16(len, &buf[0]); + buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ + buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ + buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */ +@@ -3863,6 +3867,8 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) + buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ + buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + ++ target_set_cmd_data_length(cmd, len); ++ + transport_kunmap_data_sg(cmd); + + return 0; +@@ -4023,6 +4029,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) + * Set ADDITIONAL_LENGTH + */ + put_unaligned_be32(add_len, &buf[4]); ++ target_set_cmd_data_length(cmd, 8 + add_len); + + transport_kunmap_data_sg(cmd); + +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index b1f4be055f838..a16835c0bb1dd 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -873,11 +873,9 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) + } + EXPORT_SYMBOL(target_complete_cmd); + +-void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) ++void target_set_cmd_data_length(struct se_cmd *cmd, int length) + { +- if ((scsi_status == SAM_STAT_GOOD || +- cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && +- length < cmd->data_length) { ++ if (length < cmd->data_length) { + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + cmd->residual_count += cmd->data_length - length; + } else { +@@ -887,6 +885,15 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len + + cmd->data_length = length; + } ++} ++EXPORT_SYMBOL(target_set_cmd_data_length); ++ ++void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) ++{ ++ if (scsi_status == SAM_STAT_GOOD || ++ cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) { ++ target_set_cmd_data_length(cmd, length); ++ } + + target_complete_cmd(cmd, scsi_status); + } +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 681374a3b3684..00bfc81f24702 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1941,6 +1941,11 @@ static const struct usb_device_id acm_ids[] = { + .driver_info = SEND_ZERO_PACKET, + }, + ++ /* Exclude Goodix Fingerprint Reader */ ++ { USB_DEVICE(0x27c6, 0x5395), ++ .driver_info = IGNORE_DEVICE, ++ }, ++ + /* control interfaces without any protocol set */ + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, + USB_CDC_PROTO_NONE) }, +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c +index c9f6e97582885..f27b4aecff3d4 100644 +--- a/drivers/usb/class/usblp.c ++++ b/drivers/usb/class/usblp.c +@@ -494,16 +494,24 @@ static int usblp_release(struct inode *inode, struct file *file) + /* No kernel lock - fine */ + static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait) + { +- __poll_t ret; ++ struct usblp *usblp = file->private_data; ++ __poll_t ret = 0; + unsigned long flags; + +- struct usblp *usblp = file->private_data; + /* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */ + poll_wait(file, &usblp->rwait, wait); + poll_wait(file, &usblp->wwait, wait); ++ ++ mutex_lock(&usblp->mut); ++ if (!usblp->present) ++ ret |= EPOLLHUP; ++ mutex_unlock(&usblp->mut); ++ + spin_lock_irqsave(&usblp->lock, flags); +- ret = ((usblp->bidir && usblp->rcomplete) ? EPOLLIN | EPOLLRDNORM : 0) | +- ((usblp->no_paper || usblp->wcomplete) ? EPOLLOUT | EPOLLWRNORM : 0); ++ if (usblp->bidir && usblp->rcomplete) ++ ret |= EPOLLIN | EPOLLRDNORM; ++ if (usblp->no_paper || usblp->wcomplete) ++ ret |= EPOLLOUT | EPOLLWRNORM; + spin_unlock_irqrestore(&usblp->lock, flags); + return ret; + } +diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c +index 261af9e38dddf..7874b97e33227 100644 +--- a/drivers/usb/dwc3/dwc3-qcom.c ++++ b/drivers/usb/dwc3/dwc3-qcom.c +@@ -251,8 +251,10 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) + for (i = qcom->num_clocks - 1; i >= 0; i--) + clk_disable_unprepare(qcom->clks[i]); + ++ if (device_may_wakeup(qcom->dev)) ++ dwc3_qcom_enable_interrupts(qcom); ++ + qcom->is_suspended = true; +- dwc3_qcom_enable_interrupts(qcom); + + return 0; + } +@@ -265,7 +267,8 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom) + if (!qcom->is_suspended) + return 0; + +- dwc3_qcom_disable_interrupts(qcom); ++ if (device_may_wakeup(qcom->dev)) ++ dwc3_qcom_disable_interrupts(qcom); + + for (i = 0; i < qcom->num_clocks; i++) { + ret = clk_prepare_enable(qcom->clks[i]); +@@ -528,16 +531,19 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev) + ret = of_platform_populate(np, NULL, NULL, dev); + if (ret) { + dev_err(dev, "failed to register dwc3 core - %d\n", ret); +- return ret; ++ goto node_put; + } + + qcom->dwc3 = of_find_device_by_node(dwc3_np); + if (!qcom->dwc3) { ++ ret = -ENODEV; + dev_err(dev, "failed to get dwc3 platform device\n"); +- return -ENODEV; + } + +- return 0; ++node_put: ++ of_node_put(dwc3_np); ++ ++ return ret; + } + + static const struct dwc3_acpi_pdata sdm845_acpi_pdata = { +diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c +index 00d346965f7a5..560382e0a8f38 100644 +--- a/drivers/usb/gadget/function/f_uac1.c ++++ b/drivers/usb/gadget/function/f_uac1.c +@@ -499,6 +499,7 @@ static void f_audio_disable(struct usb_function *f) + uac1->as_out_alt = 0; + uac1->as_in_alt = 0; + ++ u_audio_stop_playback(&uac1->g_audio); + u_audio_stop_capture(&uac1->g_audio); + } + +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c +index 5d960b6603b6f..6f03e944e0e31 100644 +--- a/drivers/usb/gadget/function/f_uac2.c ++++ b/drivers/usb/gadget/function/f_uac2.c +@@ -478,7 +478,7 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, + } + + max_size_bw = num_channels(chmask) * ssize * +- DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))); ++ ((srate / (factor / (1 << (ep_desc->bInterval - 1)))) + 1); + ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw, + max_size_ep)); + +diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h +index d8b92485b727e..5b1d4771d44f4 100644 +--- a/drivers/usb/gadget/function/u_ether_configfs.h ++++ b/drivers/usb/gadget/function/u_ether_configfs.h +@@ -169,12 +169,11 @@ out: \ + size_t len) \ + { \ + struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ +- int ret; \ ++ int ret = -EINVAL; \ + u8 val; \ + \ + mutex_lock(&opts->lock); \ +- ret = sscanf(page, "%02hhx", &val); \ +- if (ret > 0) { \ ++ if (sscanf(page, "%02hhx", &val) > 0) { \ + opts->_n_ = val; \ + ret = len; \ + } \ +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index d08b0079eecb1..71ef473df585f 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -62,6 +62,7 @@ + #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 + #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 + #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 ++#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242 + + static const char hcd_name[] = "xhci_hcd"; + +@@ -258,11 +259,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) + xhci->quirks |= XHCI_BROKEN_STREAMS; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && +- pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) ++ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) { + xhci->quirks |= XHCI_TRUST_TX_LENGTH; ++ xhci->quirks |= XHCI_NO_64BIT_SUPPORT; ++ } + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI || +- pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI)) ++ pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI || ++ pdev->device == PCI_DEVICE_ID_ASMEDIA_3242_XHCI)) + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && +@@ -277,6 +281,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + pdev->device == 0x9026) + xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT; + ++ if (pdev->vendor == PCI_VENDOR_ID_AMD && ++ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2 || ++ pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4)) ++ xhci->quirks |= XHCI_NO_SOFT_RETRY; ++ + if (xhci->quirks & XHCI_RESET_ON_RESUME) + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "QUIRK: Resetting on resume"); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 900ea91fb3c6b..f6b5010deb735 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -2299,7 +2299,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, + remaining = 0; + break; + case COMP_USB_TRANSACTION_ERROR: +- if ((ep_ring->err_count++ > MAX_SOFT_RETRY) || ++ if (xhci->quirks & XHCI_NO_SOFT_RETRY || ++ (ep_ring->err_count++ > MAX_SOFT_RETRY) || + le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) + break; + *status = 0; +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 91330517444e7..b5080bc1689e4 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -883,44 +883,42 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) + xhci_set_cmd_ring_deq(xhci); + } + +-static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) ++/* ++ * Disable port wake bits if do_wakeup is not set. ++ * ++ * Also clear a possible internal port wake state left hanging for ports that ++ * detected termination but never successfully enumerated (trained to 0U). ++ * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done ++ * at enumeration clears this wake, force one here as well for unconnected ports ++ */ ++ ++static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, ++ struct xhci_hub *rhub, ++ bool do_wakeup) + { +- struct xhci_port **ports; +- int port_index; + unsigned long flags; + u32 t1, t2, portsc; ++ int i; + + spin_lock_irqsave(&xhci->lock, flags); + +- /* disable usb3 ports Wake bits */ +- port_index = xhci->usb3_rhub.num_ports; +- ports = xhci->usb3_rhub.ports; +- while (port_index--) { +- t1 = readl(ports[port_index]->addr); +- portsc = t1; +- t1 = xhci_port_state_to_neutral(t1); +- t2 = t1 & ~PORT_WAKE_BITS; +- if (t1 != t2) { +- writel(t2, ports[port_index]->addr); +- xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", +- xhci->usb3_rhub.hcd->self.busnum, +- port_index + 1, portsc, t2); +- } +- } ++ for (i = 0; i < rhub->num_ports; i++) { ++ portsc = readl(rhub->ports[i]->addr); ++ t1 = xhci_port_state_to_neutral(portsc); ++ t2 = t1; ++ ++ /* clear wake bits if do_wake is not set */ ++ if (!do_wakeup) ++ t2 &= ~PORT_WAKE_BITS; ++ ++ /* Don't touch csc bit if connected or connect change is set */ ++ if (!(portsc & (PORT_CSC | PORT_CONNECT))) ++ t2 |= PORT_CSC; + +- /* disable usb2 ports Wake bits */ +- port_index = xhci->usb2_rhub.num_ports; +- ports = xhci->usb2_rhub.ports; +- while (port_index--) { +- t1 = readl(ports[port_index]->addr); +- portsc = t1; +- t1 = xhci_port_state_to_neutral(t1); +- t2 = t1 & ~PORT_WAKE_BITS; + if (t1 != t2) { +- writel(t2, ports[port_index]->addr); +- xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", +- xhci->usb2_rhub.hcd->self.busnum, +- port_index + 1, portsc, t2); ++ writel(t2, rhub->ports[i]->addr); ++ xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", ++ rhub->hcd->self.busnum, i + 1, portsc, t2); + } + } + spin_unlock_irqrestore(&xhci->lock, flags); +@@ -983,8 +981,8 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) + return -EINVAL; + + /* Clear root port wake on bits if wakeup not allowed. */ +- if (!do_wakeup) +- xhci_disable_port_wake_on_bits(xhci); ++ xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); ++ xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); + + if (!HCD_HW_ACCESSIBLE(hcd)) + return 0; +@@ -1088,6 +1086,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + struct usb_hcd *secondary_hcd; + int retval = 0; + bool comp_timer_running = false; ++ bool pending_portevent = false; + + if (!hcd->state) + return 0; +@@ -1226,13 +1225,22 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + + done: + if (retval == 0) { +- /* Resume root hubs only when have pending events. */ +- if (xhci_pending_portevent(xhci)) { ++ /* ++ * Resume roothubs only if there are pending events. ++ * USB 3 devices resend U3 LFPS wake after a 100ms delay if ++ * the first wake signalling failed, give it that chance. ++ */ ++ pending_portevent = xhci_pending_portevent(xhci); ++ if (!pending_portevent) { ++ msleep(120); ++ pending_portevent = xhci_pending_portevent(xhci); ++ } ++ ++ if (pending_portevent) { + usb_hcd_resume_root_hub(xhci->shared_hcd); + usb_hcd_resume_root_hub(hcd); + } + } +- + /* + * If system is subject to the Quirk, Compliance Mode Timer needs to + * be re-initialized Always after a system resume. Ports are subject +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 1ad1d6e9e9979..8798ed0317864 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1875,6 +1875,7 @@ struct xhci_hcd { + #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) + #define XHCI_SKIP_PHY_INIT BIT_ULL(37) + #define XHCI_DISABLE_SPARSE BIT_ULL(38) ++#define XHCI_NO_SOFT_RETRY BIT_ULL(40) + + unsigned int num_active_eps; + unsigned int limit_active_eps; +diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c +index 9e5afdde1adbf..40576e7176d8b 100644 +--- a/drivers/usb/renesas_usbhs/pipe.c ++++ b/drivers/usb/renesas_usbhs/pipe.c +@@ -746,6 +746,8 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, + + void usbhs_pipe_free(struct usbhs_pipe *pipe) + { ++ usbhsp_pipe_select(pipe); ++ usbhsp_pipe_cfg_set(pipe, 0xFFFF, 0); + usbhsp_put_pipe(pipe); + } + +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c +index fdaefbe924908..a82ba9cc0c724 100644 +--- a/drivers/usb/serial/ch341.c ++++ b/drivers/usb/serial/ch341.c +@@ -85,6 +85,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1a86, 0x7522) }, + { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x4348, 0x5523) }, ++ { USB_DEVICE(0x9986, 0x7523) }, + { }, + }; + MODULE_DEVICE_TABLE(usb, id_table); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 361a2e3ccad8d..caf27a0d51f03 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -146,6 +146,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ ++ { USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */ + { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ + { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ +@@ -202,6 +203,8 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ ++ { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */ ++ { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */ + { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c +index 4cca0b836f430..3b4d1ff9033dd 100644 +--- a/drivers/usb/serial/io_edgeport.c ++++ b/drivers/usb/serial/io_edgeport.c +@@ -3003,26 +3003,32 @@ static int edge_startup(struct usb_serial *serial) + response = -ENODEV; + } + +- usb_free_urb(edge_serial->interrupt_read_urb); +- kfree(edge_serial->interrupt_in_buffer); +- +- usb_free_urb(edge_serial->read_urb); +- kfree(edge_serial->bulk_in_buffer); +- +- kfree(edge_serial); +- +- return response; ++ goto error; + } + + /* start interrupt read for this edgeport this interrupt will + * continue as long as the edgeport is connected */ + response = usb_submit_urb(edge_serial->interrupt_read_urb, + GFP_KERNEL); +- if (response) ++ if (response) { + dev_err(ddev, "%s - Error %d submitting control urb\n", + __func__, response); ++ ++ goto error; ++ } + } + return response; ++ ++error: ++ usb_free_urb(edge_serial->interrupt_read_urb); ++ kfree(edge_serial->interrupt_in_buffer); ++ ++ usb_free_urb(edge_serial->read_urb); ++ kfree(edge_serial->bulk_in_buffer); ++ ++ kfree(edge_serial); ++ ++ return response; + } + + +diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c +index 2305d425e6c9a..8f1de1fbbeedf 100644 +--- a/drivers/usb/usbip/stub_dev.c ++++ b/drivers/usb/usbip/stub_dev.c +@@ -46,6 +46,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a + int sockfd = 0; + struct socket *socket; + int rv; ++ struct task_struct *tcp_rx = NULL; ++ struct task_struct *tcp_tx = NULL; + + if (!sdev) { + dev_err(dev, "sdev is null\n"); +@@ -69,23 +71,47 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a + } + + socket = sockfd_lookup(sockfd, &err); +- if (!socket) ++ if (!socket) { ++ dev_err(dev, "failed to lookup sock"); + goto err; ++ } + +- sdev->ud.tcp_socket = socket; +- sdev->ud.sockfd = sockfd; ++ if (socket->type != SOCK_STREAM) { ++ dev_err(dev, "Expecting SOCK_STREAM - found %d", ++ socket->type); ++ goto sock_err; ++ } + ++ /* unlock and create threads and get tasks */ + spin_unlock_irq(&sdev->ud.lock); ++ tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); ++ if (IS_ERR(tcp_rx)) { ++ sockfd_put(socket); ++ return -EINVAL; ++ } ++ tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); ++ if (IS_ERR(tcp_tx)) { ++ kthread_stop(tcp_rx); ++ sockfd_put(socket); ++ return -EINVAL; ++ } + +- sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud, +- "stub_rx"); +- sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud, +- "stub_tx"); ++ /* get task structs now */ ++ get_task_struct(tcp_rx); ++ get_task_struct(tcp_tx); + ++ /* lock and update sdev->ud state */ + spin_lock_irq(&sdev->ud.lock); ++ sdev->ud.tcp_socket = socket; ++ sdev->ud.sockfd = sockfd; ++ sdev->ud.tcp_rx = tcp_rx; ++ sdev->ud.tcp_tx = tcp_tx; + sdev->ud.status = SDEV_ST_USED; + spin_unlock_irq(&sdev->ud.lock); + ++ wake_up_process(sdev->ud.tcp_rx); ++ wake_up_process(sdev->ud.tcp_tx); ++ + } else { + dev_info(dev, "stub down\n"); + +@@ -100,6 +126,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a + + return count; + ++sock_err: ++ sockfd_put(socket); + err: + spin_unlock_irq(&sdev->ud.lock); + return -EINVAL; +diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c +index be37aec250c2b..e64ea314930be 100644 +--- a/drivers/usb/usbip/vhci_sysfs.c ++++ b/drivers/usb/usbip/vhci_sysfs.c +@@ -312,6 +312,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, + struct vhci *vhci; + int err; + unsigned long flags; ++ struct task_struct *tcp_rx = NULL; ++ struct task_struct *tcp_tx = NULL; + + /* + * @rhport: port number of vhci_hcd +@@ -349,12 +351,35 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, + + /* Extract socket from fd. */ + socket = sockfd_lookup(sockfd, &err); +- if (!socket) ++ if (!socket) { ++ dev_err(dev, "failed to lookup sock"); + return -EINVAL; ++ } ++ if (socket->type != SOCK_STREAM) { ++ dev_err(dev, "Expecting SOCK_STREAM - found %d", ++ socket->type); ++ sockfd_put(socket); ++ return -EINVAL; ++ } ++ ++ /* create threads before locking */ ++ tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); ++ if (IS_ERR(tcp_rx)) { ++ sockfd_put(socket); ++ return -EINVAL; ++ } ++ tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); ++ if (IS_ERR(tcp_tx)) { ++ kthread_stop(tcp_rx); ++ sockfd_put(socket); ++ return -EINVAL; ++ } + +- /* now need lock until setting vdev status as used */ ++ /* get task structs now */ ++ get_task_struct(tcp_rx); ++ get_task_struct(tcp_tx); + +- /* begin a lock */ ++ /* now begin lock until setting vdev status set */ + spin_lock_irqsave(&vhci->lock, flags); + spin_lock(&vdev->ud.lock); + +@@ -364,6 +389,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, + spin_unlock_irqrestore(&vhci->lock, flags); + + sockfd_put(socket); ++ kthread_stop_put(tcp_rx); ++ kthread_stop_put(tcp_tx); + + dev_err(dev, "port %d already used\n", rhport); + /* +@@ -382,14 +409,16 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, + vdev->speed = speed; + vdev->ud.sockfd = sockfd; + vdev->ud.tcp_socket = socket; ++ vdev->ud.tcp_rx = tcp_rx; ++ vdev->ud.tcp_tx = tcp_tx; + vdev->ud.status = VDEV_ST_NOTASSIGNED; + + spin_unlock(&vdev->ud.lock); + spin_unlock_irqrestore(&vhci->lock, flags); + /* end the lock */ + +- vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); +- vdev->ud.tcp_tx = kthread_get_run(vhci_tx_loop, &vdev->ud, "vhci_tx"); ++ wake_up_process(vdev->ud.tcp_rx); ++ wake_up_process(vdev->ud.tcp_tx); + + rh_port_connect(vdev, speed); + +diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c +index 100f680c572ae..a3ec39fc61778 100644 +--- a/drivers/usb/usbip/vudc_sysfs.c ++++ b/drivers/usb/usbip/vudc_sysfs.c +@@ -90,8 +90,9 @@ unlock: + } + static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor)); + +-static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr, +- const char *in, size_t count) ++static ssize_t usbip_sockfd_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *in, size_t count) + { + struct vudc *udc = (struct vudc *) dev_get_drvdata(dev); + int rv; +@@ -100,6 +101,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a + struct socket *socket; + unsigned long flags; + int ret; ++ struct task_struct *tcp_rx = NULL; ++ struct task_struct *tcp_tx = NULL; + + rv = kstrtoint(in, 0, &sockfd); + if (rv != 0) +@@ -138,24 +141,54 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a + goto unlock_ud; + } + +- udc->ud.tcp_socket = socket; ++ if (socket->type != SOCK_STREAM) { ++ dev_err(dev, "Expecting SOCK_STREAM - found %d", ++ socket->type); ++ ret = -EINVAL; ++ goto sock_err; ++ } + ++ /* unlock and create threads and get tasks */ + spin_unlock_irq(&udc->ud.lock); + spin_unlock_irqrestore(&udc->lock, flags); + +- udc->ud.tcp_rx = kthread_get_run(&v_rx_loop, +- &udc->ud, "vudc_rx"); +- udc->ud.tcp_tx = kthread_get_run(&v_tx_loop, +- &udc->ud, "vudc_tx"); ++ tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx"); ++ if (IS_ERR(tcp_rx)) { ++ sockfd_put(socket); ++ return -EINVAL; ++ } ++ tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx"); ++ if (IS_ERR(tcp_tx)) { ++ kthread_stop(tcp_rx); ++ sockfd_put(socket); ++ return -EINVAL; ++ } ++ ++ /* get task structs now */ ++ get_task_struct(tcp_rx); ++ get_task_struct(tcp_tx); + ++ /* lock and update udc->ud state */ + spin_lock_irqsave(&udc->lock, flags); + spin_lock_irq(&udc->ud.lock); ++ ++ udc->ud.tcp_socket = socket; ++ udc->ud.tcp_rx = tcp_rx; ++ udc->ud.tcp_rx = tcp_tx; + udc->ud.status = SDEV_ST_USED; ++ + spin_unlock_irq(&udc->ud.lock); + + ktime_get_ts64(&udc->start_time); + v_start_timer(udc); + udc->connected = 1; ++ ++ spin_unlock_irqrestore(&udc->lock, flags); ++ ++ wake_up_process(udc->ud.tcp_rx); ++ wake_up_process(udc->ud.tcp_tx); ++ return count; ++ + } else { + if (!udc->connected) { + dev_err(dev, "Device not connected"); +@@ -177,6 +210,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a + + return count; + ++sock_err: ++ sockfd_put(socket); + unlock_ud: + spin_unlock_irq(&udc->ud.lock); + unlock: +diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c +index f026624898e7a..77cc80bcb479c 100644 +--- a/drivers/xen/events/events_2l.c ++++ b/drivers/xen/events/events_2l.c +@@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(void) + return EVTCHN_2L_NR_CHANNELS; + } + ++static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu) ++{ ++ clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); ++} ++ + static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu) + { + clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); +@@ -71,12 +76,6 @@ static bool evtchn_2l_is_pending(unsigned port) + return sync_test_bit(port, BM(&s->evtchn_pending[0])); + } + +-static bool evtchn_2l_test_and_set_mask(unsigned port) +-{ +- struct shared_info *s = HYPERVISOR_shared_info; +- return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0])); +-} +- + static void evtchn_2l_mask(unsigned port) + { + struct shared_info *s = HYPERVISOR_shared_info; +@@ -354,18 +353,27 @@ static void evtchn_2l_resume(void) + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); + } + ++static int evtchn_2l_percpu_deinit(unsigned int cpu) ++{ ++ memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * ++ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); ++ ++ return 0; ++} ++ + static const struct evtchn_ops evtchn_ops_2l = { + .max_channels = evtchn_2l_max_channels, + .nr_channels = evtchn_2l_max_channels, ++ .remove = evtchn_2l_remove, + .bind_to_cpu = evtchn_2l_bind_to_cpu, + .clear_pending = evtchn_2l_clear_pending, + .set_pending = evtchn_2l_set_pending, + .is_pending = evtchn_2l_is_pending, +- .test_and_set_mask = evtchn_2l_test_and_set_mask, + .mask = evtchn_2l_mask, + .unmask = evtchn_2l_unmask, + .handle_events = evtchn_2l_handle_events, + .resume = evtchn_2l_resume, ++ .percpu_deinit = evtchn_2l_percpu_deinit, + }; + + void __init xen_evtchn_2l_init(void) +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c +index 7b94a6c316643..e492f5fe5be62 100644 +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -99,6 +99,7 @@ static DEFINE_RWLOCK(evtchn_rwlock); + * evtchn_rwlock + * IRQ-desc lock + * percpu eoi_list_lock ++ * irq_info->lock + */ + + static LIST_HEAD(xen_irq_list_head); +@@ -220,6 +221,8 @@ static int xen_irq_info_common_setup(struct irq_info *info, + info->irq = irq; + info->evtchn = evtchn; + info->cpu = cpu; ++ info->mask_reason = EVT_MASK_REASON_EXPLICIT; ++ spin_lock_init(&info->lock); + + ret = set_evtchn_to_irq(evtchn, irq); + if (ret < 0) +@@ -286,6 +289,7 @@ static int xen_irq_info_pirq_setup(unsigned irq, + static void xen_irq_info_cleanup(struct irq_info *info) + { + set_evtchn_to_irq(info->evtchn, -1); ++ xen_evtchn_port_remove(info->evtchn, info->cpu); + info->evtchn = 0; + } + +@@ -366,6 +370,34 @@ unsigned int cpu_from_evtchn(unsigned int evtchn) + return ret; + } + ++static void do_mask(struct irq_info *info, u8 reason) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ if (!info->mask_reason) ++ mask_evtchn(info->evtchn); ++ ++ info->mask_reason |= reason; ++ ++ spin_unlock_irqrestore(&info->lock, flags); ++} ++ ++static void do_unmask(struct irq_info *info, u8 reason) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ ++ info->mask_reason &= ~reason; ++ ++ if (!info->mask_reason) ++ unmask_evtchn(info->evtchn); ++ ++ spin_unlock_irqrestore(&info->lock, flags); ++} ++ + #ifdef CONFIG_X86 + static bool pirq_check_eoi_map(unsigned irq) + { +@@ -493,7 +525,7 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious) + } + + info->eoi_time = 0; +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EOI_PENDING); + } + + static void xen_irq_lateeoi_worker(struct work_struct *work) +@@ -662,6 +694,12 @@ static void xen_evtchn_close(unsigned int port) + BUG(); + } + ++static void event_handler_exit(struct irq_info *info) ++{ ++ smp_store_release(&info->is_active, 0); ++ clear_evtchn(info->evtchn); ++} ++ + static void pirq_query_unmask(int irq) + { + struct physdev_irq_status_query irq_status; +@@ -680,7 +718,8 @@ static void pirq_query_unmask(int irq) + + static void eoi_pirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ int evtchn = info ? info->evtchn : 0; + struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; + int rc = 0; + +@@ -689,16 +728,15 @@ static void eoi_pirq(struct irq_data *data) + + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { +- int masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + irq_move_masked_irq(data); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + if (pirq_needs_eoi(data->irq)) { + rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); +@@ -749,7 +787,8 @@ static unsigned int __startup_pirq(unsigned int irq) + goto err; + + out: +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EXPLICIT); ++ + eoi_pirq(irq_get_irq_data(irq)); + + return 0; +@@ -776,7 +815,7 @@ static void shutdown_pirq(struct irq_data *data) + if (!VALID_EVTCHN(evtchn)) + return; + +- mask_evtchn(evtchn); ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); + xen_evtchn_close(evtchn); + xen_irq_info_cleanup(info); + } +@@ -1533,6 +1572,8 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) + } + + info = info_for_irq(irq); ++ if (xchg_acquire(&info->is_active, 1)) ++ return; + + if (ctrl->defer_eoi) { + info->eoi_cpu = smp_processor_id(); +@@ -1635,10 +1676,10 @@ void rebind_evtchn_irq(int evtchn, int irq) + } + + /* Rebind an evtchn so that it gets delivered to a specific cpu */ +-static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu) ++static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu) + { + struct evtchn_bind_vcpu bind_vcpu; +- int masked; ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return -1; +@@ -1654,7 +1695,7 @@ static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu) + * Mask the event while changing the VCPU binding to prevent + * it being delivered on an unexpected VCPU. + */ +- masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + + /* + * If this fails, it usually just indicates that we're dealing with a +@@ -1664,8 +1705,7 @@ static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu) + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) + bind_evtchn_to_cpu(evtchn, tcpu); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + + return 0; + } +@@ -1674,7 +1714,7 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, + bool force) + { + unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); +- int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); ++ int ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu); + + if (!ret) + irq_data_update_effective_affinity(data, cpumask_of(tcpu)); +@@ -1693,39 +1733,41 @@ EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn); + + static void enable_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EXPLICIT); + } + + static void disable_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- mask_evtchn(evtchn); ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); + } + + static void ack_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return; + + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { +- int masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + irq_move_masked_irq(data); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + + static void mask_ack_dynirq(struct irq_data *data) +@@ -1734,18 +1776,39 @@ static void mask_ack_dynirq(struct irq_data *data) + ack_dynirq(data); + } + ++static void lateeoi_ack_dynirq(struct irq_data *data) ++{ ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; ++ ++ if (VALID_EVTCHN(evtchn)) { ++ do_mask(info, EVT_MASK_REASON_EOI_PENDING); ++ event_handler_exit(info); ++ } ++} ++ ++static void lateeoi_mask_ack_dynirq(struct irq_data *data) ++{ ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; ++ ++ if (VALID_EVTCHN(evtchn)) { ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); ++ event_handler_exit(info); ++ } ++} ++ + static int retrigger_dynirq(struct irq_data *data) + { +- unsigned int evtchn = evtchn_from_irq(data->irq); +- int masked; ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return 0; + +- masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + set_evtchn(evtchn); +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + + return 1; + } +@@ -1840,10 +1903,11 @@ static void restore_cpu_ipis(unsigned int cpu) + /* Clear an irq's pending state, in preparation for polling on it */ + void xen_clear_irq_pending(int irq) + { +- int evtchn = evtchn_from_irq(irq); ++ struct irq_info *info = info_for_irq(irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + EXPORT_SYMBOL(xen_clear_irq_pending); + void xen_set_irq_pending(int irq) +@@ -1951,8 +2015,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = { + .irq_mask = disable_dynirq, + .irq_unmask = enable_dynirq, + +- .irq_ack = mask_ack_dynirq, +- .irq_mask_ack = mask_ack_dynirq, ++ .irq_ack = lateeoi_ack_dynirq, ++ .irq_mask_ack = lateeoi_mask_ack_dynirq, + + .irq_set_affinity = set_affinity_irq, + .irq_retrigger = retrigger_dynirq, +diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c +index 33462521bfd0f..360a7f8cdf754 100644 +--- a/drivers/xen/events/events_fifo.c ++++ b/drivers/xen/events/events_fifo.c +@@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(unsigned port) + return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); + } + +-static bool evtchn_fifo_test_and_set_mask(unsigned port) +-{ +- event_word_t *word = event_word_from_port(port); +- return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); +-} +- + static void evtchn_fifo_mask(unsigned port) + { + event_word_t *word = event_word_from_port(port); +@@ -420,7 +414,6 @@ static const struct evtchn_ops evtchn_ops_fifo = { + .clear_pending = evtchn_fifo_clear_pending, + .set_pending = evtchn_fifo_set_pending, + .is_pending = evtchn_fifo_is_pending, +- .test_and_set_mask = evtchn_fifo_test_and_set_mask, + .mask = evtchn_fifo_mask, + .unmask = evtchn_fifo_unmask, + .handle_events = evtchn_fifo_handle_events, +diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h +index a35c8c7ac6066..d3a89b4646b8b 100644 +--- a/drivers/xen/events/events_internal.h ++++ b/drivers/xen/events/events_internal.h +@@ -33,13 +33,19 @@ struct irq_info { + struct list_head eoi_list; + short refcnt; + short spurious_cnt; +- enum xen_irq_type type; /* type */ ++ short type; /* type */ ++ u8 mask_reason; /* Why is event channel masked */ ++#define EVT_MASK_REASON_EXPLICIT 0x01 ++#define EVT_MASK_REASON_TEMPORARY 0x02 ++#define EVT_MASK_REASON_EOI_PENDING 0x04 ++ u8 is_active; /* Is event just being handled? */ + unsigned irq; + unsigned int evtchn; /* event channel */ + unsigned short cpu; /* cpu bound */ + unsigned short eoi_cpu; /* EOI must happen on this cpu */ + unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ + u64 eoi_time; /* Time in jiffies when to EOI. */ ++ spinlock_t lock; + + union { + unsigned short virq; +@@ -65,12 +71,12 @@ struct evtchn_ops { + unsigned (*nr_channels)(void); + + int (*setup)(struct irq_info *info); ++ void (*remove)(evtchn_port_t port, unsigned int cpu); + void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); + + void (*clear_pending)(unsigned port); + void (*set_pending)(unsigned port); + bool (*is_pending)(unsigned port); +- bool (*test_and_set_mask)(unsigned port); + void (*mask)(unsigned port); + void (*unmask)(unsigned port); + +@@ -107,6 +113,13 @@ static inline int xen_evtchn_port_setup(struct irq_info *info) + return 0; + } + ++static inline void xen_evtchn_port_remove(evtchn_port_t evtchn, ++ unsigned int cpu) ++{ ++ if (evtchn_ops->remove) ++ evtchn_ops->remove(evtchn, cpu); ++} ++ + static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info, + unsigned cpu) + { +@@ -128,11 +141,6 @@ static inline bool test_evtchn(unsigned port) + return evtchn_ops->is_pending(port); + } + +-static inline bool test_and_set_mask(unsigned port) +-{ +- return evtchn_ops->test_and_set_mask(port); +-} +- + static inline void mask_evtchn(unsigned port) + { + return evtchn_ops->mask(port); +diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c +index cdb45829354d9..056a68292e152 100644 +--- a/fs/binfmt_misc.c ++++ b/fs/binfmt_misc.c +@@ -696,12 +696,24 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, + struct super_block *sb = file_inode(file)->i_sb; + struct dentry *root = sb->s_root, *dentry; + int err = 0; ++ struct file *f = NULL; + + e = create_entry(buffer, count); + + if (IS_ERR(e)) + return PTR_ERR(e); + ++ if (e->flags & MISC_FMT_OPEN_FILE) { ++ f = open_exec(e->interpreter); ++ if (IS_ERR(f)) { ++ pr_notice("register: failed to install interpreter file %s\n", ++ e->interpreter); ++ kfree(e); ++ return PTR_ERR(f); ++ } ++ e->interp_file = f; ++ } ++ + inode_lock(d_inode(root)); + dentry = lookup_one_len(e->name, root, strlen(e->name)); + err = PTR_ERR(dentry); +@@ -725,21 +737,6 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, + goto out2; + } + +- if (e->flags & MISC_FMT_OPEN_FILE) { +- struct file *f; +- +- f = open_exec(e->interpreter); +- if (IS_ERR(f)) { +- err = PTR_ERR(f); +- pr_notice("register: failed to install interpreter file %s\n", e->interpreter); +- simple_release_fs(&bm_mnt, &entry_count); +- iput(inode); +- inode = NULL; +- goto out2; +- } +- e->interp_file = f; +- } +- + e->dentry = dget(dentry); + inode->i_private = e; + inode->i_fop = &bm_entry_operations; +@@ -756,6 +753,8 @@ out: + inode_unlock(d_inode(root)); + + if (err) { ++ if (f) ++ filp_close(f, NULL); + kfree(e); + return err; + } +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index 115f063497ffa..41b3c5fc958c7 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -278,7 +278,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf) + rc = server->ops->queryfs(xid, tcon, buf); + + free_xid(xid); +- return 0; ++ return rc; + } + + static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) +diff --git a/fs/configfs/file.c b/fs/configfs/file.c +index fb65b706cc0db..84b4d58fc65f7 100644 +--- a/fs/configfs/file.c ++++ b/fs/configfs/file.c +@@ -378,7 +378,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type + + attr = to_attr(dentry); + if (!attr) +- goto out_put_item; ++ goto out_free_buffer; + + if (type & CONFIGFS_ITEM_BIN_ATTR) { + buffer->bin_attr = to_bin_attr(dentry); +@@ -391,7 +391,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type + /* Grab the module reference for this attribute if we have one */ + error = -ENODEV; + if (!try_module_get(buffer->owner)) +- goto out_put_item; ++ goto out_free_buffer; + + error = -EACCES; + if (!buffer->item->ci_type) +@@ -435,8 +435,6 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type + + out_put_module: + module_put(buffer->owner); +-out_put_item: +- config_item_put(buffer->item); + out_free_buffer: + up_read(&frag->frag_sem); + kfree(buffer); +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index 188b17a3b19eb..e7c0790308fe0 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -1073,6 +1073,15 @@ out_force: + goto out; + } + ++static void nfs_mark_dir_for_revalidate(struct inode *inode) ++{ ++ struct nfs_inode *nfsi = NFS_I(inode); ++ ++ spin_lock(&inode->i_lock); ++ nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE; ++ spin_unlock(&inode->i_lock); ++} ++ + /* + * We judge how long we want to trust negative + * dentries by looking at the parent inode mtime. +@@ -1107,19 +1116,14 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry, + __func__, dentry); + return 1; + case 0: +- nfs_mark_for_revalidate(dir); +- if (inode && S_ISDIR(inode->i_mode)) { +- /* Purge readdir caches. */ +- nfs_zap_caches(inode); +- /* +- * We can't d_drop the root of a disconnected tree: +- * its d_hash is on the s_anon list and d_drop() would hide +- * it from shrink_dcache_for_unmount(), leading to busy +- * inodes on unmount and further oopses. +- */ +- if (IS_ROOT(dentry)) +- return 1; +- } ++ /* ++ * We can't d_drop the root of a disconnected tree: ++ * its d_hash is on the s_anon list and d_drop() would hide ++ * it from shrink_dcache_for_unmount(), leading to busy ++ * inodes on unmount and further oopses. ++ */ ++ if (inode && IS_ROOT(dentry)) ++ return 1; + dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n", + __func__, dentry); + return 0; +@@ -1188,6 +1192,13 @@ out: + nfs_free_fattr(fattr); + nfs_free_fhandle(fhandle); + nfs4_label_free(label); ++ ++ /* ++ * If the lookup failed despite the dentry change attribute being ++ * a match, then we should revalidate the directory cache. ++ */ ++ if (!ret && nfs_verify_change_attribute(dir, dentry->d_time)) ++ nfs_mark_dir_for_revalidate(dir); + return nfs_lookup_revalidate_done(dir, dentry, inode, ret); + } + +@@ -1230,7 +1241,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, + error = nfs_lookup_verify_inode(inode, flags); + if (error) { + if (error == -ESTALE) +- nfs_zap_caches(dir); ++ nfs_mark_dir_for_revalidate(dir); + goto out_bad; + } + nfs_advise_use_readdirplus(dir); +@@ -1725,7 +1736,6 @@ out: + dput(parent); + return d; + out_error: +- nfs_mark_for_revalidate(dir); + d = ERR_PTR(error); + goto out; + } +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 30e44b33040a4..b2119159dead2 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -5830,7 +5830,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf, + return ret; + if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) + return -ENOENT; +- return 0; ++ return label.len; + } + + static int nfs4_get_security_label(struct inode *inode, void *buf, +diff --git a/fs/pnode.h b/fs/pnode.h +index 26f74e092bd98..988f1aa9b02ae 100644 +--- a/fs/pnode.h ++++ b/fs/pnode.h +@@ -12,7 +12,7 @@ + + #define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED) + #define IS_MNT_SLAVE(m) ((m)->mnt_master) +-#define IS_MNT_NEW(m) (!(m)->mnt_ns) ++#define IS_MNT_NEW(m) (!(m)->mnt_ns || is_anon_ns((m)->mnt_ns)) + #define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED) + #define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE) + #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED) +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index 97a192eb9949c..507f8f9103270 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -547,11 +547,14 @@ static int udf_do_extend_file(struct inode *inode, + + udf_write_aext(inode, last_pos, &last_ext->extLocation, + last_ext->extLength, 1); ++ + /* +- * We've rewritten the last extent but there may be empty +- * indirect extent after it - enter it. ++ * We've rewritten the last extent. If we are going to add ++ * more extents, we may need to enter possible following ++ * empty indirect extent. + */ +- udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); ++ if (new_block_bytes || prealloc_len) ++ udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); + } + + /* Managed to do everything necessary? */ +diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h +index 0783b0c6d9e2f..1ef071e5a55ef 100644 +--- a/include/linux/can/skb.h ++++ b/include/linux/can/skb.h +@@ -49,8 +49,12 @@ static inline void can_skb_reserve(struct sk_buff *skb) + + static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) + { +- if (sk) { +- sock_hold(sk); ++ /* If the socket has already been closed by user space, the ++ * refcount may already be 0 (and the socket will be freed ++ * after the last TX skb has been freed). So only increase ++ * socket refcount if the refcount is > 0. ++ */ ++ if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { + skb->destructor = sock_efree; + skb->sk = sk; + } +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h +index a132d875d3518..3a1d899019af0 100644 +--- a/include/linux/sched/mm.h ++++ b/include/linux/sched/mm.h +@@ -167,7 +167,8 @@ static inline bool in_vfork(struct task_struct *tsk) + * another oom-unkillable task does this it should blame itself. + */ + rcu_read_lock(); +- ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; ++ ret = tsk->vfork_done && ++ rcu_dereference(tsk->real_parent)->mm == tsk->mm; + rcu_read_unlock(); + + return ret; +diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h +index f9a0c6189852e..69998fc5ffe9d 100644 +--- a/include/linux/stop_machine.h ++++ b/include/linux/stop_machine.h +@@ -139,7 +139,7 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, + const struct cpumask *cpus); + #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ + +-static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, ++static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, + const struct cpumask *cpus) + { + unsigned long flags; +@@ -150,14 +150,15 @@ static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, + return ret; + } + +-static inline int stop_machine(cpu_stop_fn_t fn, void *data, +- const struct cpumask *cpus) ++static __always_inline int ++stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) + { + return stop_machine_cpuslocked(fn, data, cpus); + } + +-static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, +- const struct cpumask *cpus) ++static __always_inline int ++stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, ++ const struct cpumask *cpus) + { + return stop_machine(fn, data, cpus); + } +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h +index e8a924eeea3d0..6b5fcfa1e5553 100644 +--- a/include/linux/virtio_net.h ++++ b/include/linux/virtio_net.h +@@ -79,8 +79,13 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + if (gso_type && skb->network_header) { + struct flow_keys_basic keys; + +- if (!skb->protocol) ++ if (!skb->protocol) { ++ __be16 protocol = dev_parse_header_protocol(skb); ++ + virtio_net_hdr_set_proto(skb, hdr); ++ if (protocol && protocol != skb->protocol) ++ return -EINVAL; ++ } + retry: + if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, + NULL, 0, 0, 0, +diff --git a/include/media/rc-map.h b/include/media/rc-map.h +index c2ef3906e1cd1..a358c87a65de2 100644 +--- a/include/media/rc-map.h ++++ b/include/media/rc-map.h +@@ -126,6 +126,13 @@ struct rc_map_list { + struct rc_map map; + }; + ++#ifdef CONFIG_MEDIA_CEC_RC ++/* ++ * rc_map_list from rc-cec.c ++ */ ++extern struct rc_map_list cec_map; ++#endif ++ + /* Routines from rc-map.c */ + + /** +diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h +index 51b6f50eabeeb..0deeff9b4496a 100644 +--- a/include/target/target_core_backend.h ++++ b/include/target/target_core_backend.h +@@ -69,6 +69,7 @@ int transport_backend_register(const struct target_backend_ops *); + void target_backend_unregister(const struct target_backend_ops *); + + void target_complete_cmd(struct se_cmd *, u8); ++void target_set_cmd_data_length(struct se_cmd *, int); + void target_complete_cmd_with_length(struct se_cmd *, u8, int); + + void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *); +diff --git a/include/uapi/linux/netfilter/nfnetlink_cthelper.h b/include/uapi/linux/netfilter/nfnetlink_cthelper.h +index a13137afc4299..70af02092d16e 100644 +--- a/include/uapi/linux/netfilter/nfnetlink_cthelper.h ++++ b/include/uapi/linux/netfilter/nfnetlink_cthelper.h +@@ -5,7 +5,7 @@ + #define NFCT_HELPER_STATUS_DISABLED 0 + #define NFCT_HELPER_STATUS_ENABLED 1 + +-enum nfnl_acct_msg_types { ++enum nfnl_cthelper_msg_types { + NFNL_MSG_CTHELPER_NEW, + NFNL_MSG_CTHELPER_GET, + NFNL_MSG_CTHELPER_DEL, +diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c +index be0ca3306be8c..46c142b695988 100644 +--- a/kernel/sched/membarrier.c ++++ b/kernel/sched/membarrier.c +@@ -265,9 +265,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) + } + rcu_read_unlock(); + +- preempt_disable(); +- smp_call_function_many(tmpmask, ipi_sync_rq_state, mm, 1); +- preempt_enable(); ++ on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true); + + free_cpumask_var(tmpmask); + cpus_read_unlock(); +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 70665934d53e2..eae6a078619f9 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -1563,7 +1563,7 @@ static struct ctl_table vm_table[] = { + .data = &block_dump, + .maxlen = sizeof(block_dump), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, + { +@@ -1571,7 +1571,7 @@ static struct ctl_table vm_table[] = { + .data = &sysctl_vfs_cache_pressure, + .maxlen = sizeof(sysctl_vfs_cache_pressure), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, + #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ +@@ -1581,7 +1581,7 @@ static struct ctl_table vm_table[] = { + .data = &sysctl_legacy_va_layout, + .maxlen = sizeof(sysctl_legacy_va_layout), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, + #endif +@@ -1591,7 +1591,7 @@ static struct ctl_table vm_table[] = { + .data = &node_reclaim_mode, + .maxlen = sizeof(node_reclaim_mode), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, + { +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 7f31932216a12..299a4c5b6cf8d 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -547,8 +547,11 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, + } + + /* +- * Recomputes cpu_base::*next_timer and returns the earliest expires_next but +- * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram. ++ * Recomputes cpu_base::*next_timer and returns the earliest expires_next ++ * but does not set cpu_base::*expires_next, that is done by ++ * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating ++ * cpu_base::*expires_next right away, reprogramming logic would no longer ++ * work. + * + * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases, + * those timers will get run whenever the softirq gets handled, at the end of +@@ -589,6 +592,37 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_ + return expires_next; + } + ++static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base) ++{ ++ ktime_t expires_next, soft = KTIME_MAX; ++ ++ /* ++ * If the soft interrupt has already been activated, ignore the ++ * soft bases. They will be handled in the already raised soft ++ * interrupt. ++ */ ++ if (!cpu_base->softirq_activated) { ++ soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT); ++ /* ++ * Update the soft expiry time. clock_settime() might have ++ * affected it. ++ */ ++ cpu_base->softirq_expires_next = soft; ++ } ++ ++ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); ++ /* ++ * If a softirq timer is expiring first, update cpu_base->next_timer ++ * and program the hardware with the soft expiry time. ++ */ ++ if (expires_next > soft) { ++ cpu_base->next_timer = cpu_base->softirq_next_timer; ++ expires_next = soft; ++ } ++ ++ return expires_next; ++} ++ + static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) + { + ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; +@@ -629,23 +663,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) + { + ktime_t expires_next; + +- /* +- * Find the current next expiration time. +- */ +- expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); +- +- if (cpu_base->next_timer && cpu_base->next_timer->is_soft) { +- /* +- * When the softirq is activated, hrtimer has to be +- * programmed with the first hard hrtimer because soft +- * timer interrupt could occur too late. +- */ +- if (cpu_base->softirq_activated) +- expires_next = __hrtimer_get_next_event(cpu_base, +- HRTIMER_ACTIVE_HARD); +- else +- cpu_base->softirq_expires_next = expires_next; +- } ++ expires_next = hrtimer_update_next_event(cpu_base); + + if (skip_equal && expires_next == cpu_base->expires_next) + return; +@@ -1640,8 +1658,8 @@ retry: + + __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); + +- /* Reevaluate the clock bases for the next expiry */ +- expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); ++ /* Reevaluate the clock bases for the [soft] next expiry */ ++ expires_next = hrtimer_update_next_event(cpu_base); + /* + * Store the new expiry value so the migration code can verify + * against it. +diff --git a/lib/logic_pio.c b/lib/logic_pio.c +index 905027574e5d8..774bb02fff10a 100644 +--- a/lib/logic_pio.c ++++ b/lib/logic_pio.c +@@ -27,6 +27,8 @@ static DEFINE_MUTEX(io_range_mutex); + * @new_range: pointer to the IO range to be registered. + * + * Returns 0 on success, the error code in case of failure. ++ * If the range already exists, -EEXIST will be returned, which should be ++ * considered a success. + * + * Register a new IO range node in the IO range list. + */ +@@ -49,6 +51,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range) + list_for_each_entry(range, &io_range_list, list) { + if (range->fwnode == new_range->fwnode) { + /* range already there */ ++ ret = -EEXIST; + goto end_register; + } + if (range->flags == LOGIC_PIO_CPU_MMIO && +diff --git a/mm/slub.c b/mm/slub.c +index e622e8f4c2ac4..52ded855b4ed0 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1887,7 +1887,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, + + t = acquire_slab(s, n, page, object == NULL, &objects); + if (!t) +- continue; /* cmpxchg raced */ ++ break; + + available += objects; + if (!object) { +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c +index a23094b050f8b..e290a0c9e9282 100644 +--- a/net/ipv4/cipso_ipv4.c ++++ b/net/ipv4/cipso_ipv4.c +@@ -519,16 +519,10 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) + ret_val = -ENOENT; + goto doi_remove_return; + } +- if (!refcount_dec_and_test(&doi_def->refcount)) { +- spin_unlock(&cipso_v4_doi_list_lock); +- ret_val = -EBUSY; +- goto doi_remove_return; +- } + list_del_rcu(&doi_def->list); + spin_unlock(&cipso_v4_doi_list_lock); + +- cipso_v4_cache_invalidate(); +- call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ++ cipso_v4_doi_putdef(doi_def); + ret_val = 0; + + doi_remove_return: +@@ -585,9 +579,6 @@ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) + + if (!refcount_dec_and_test(&doi_def->refcount)) + return; +- spin_lock(&cipso_v4_doi_list_lock); +- list_del_rcu(&doi_def->list); +- spin_unlock(&cipso_v4_doi_list_lock); + + cipso_v4_cache_invalidate(); + call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c +index c2b7d43d92b0e..f5f4369c131c9 100644 +--- a/net/ipv4/nexthop.c ++++ b/net/ipv4/nexthop.c +@@ -1065,7 +1065,7 @@ out: + + /* rtnl */ + /* remove all nexthops tied to a device being deleted */ +-static void nexthop_flush_dev(struct net_device *dev) ++static void nexthop_flush_dev(struct net_device *dev, unsigned long event) + { + unsigned int hash = nh_dev_hashfn(dev->ifindex); + struct net *net = dev_net(dev); +@@ -1077,6 +1077,10 @@ static void nexthop_flush_dev(struct net_device *dev) + if (nhi->fib_nhc.nhc_dev != dev) + continue; + ++ if (nhi->reject_nh && ++ (event == NETDEV_DOWN || event == NETDEV_CHANGE)) ++ continue; ++ + remove_nexthop(net, nhi->nh_parent, NULL); + } + } +@@ -1794,11 +1798,11 @@ static int nh_netdev_event(struct notifier_block *this, + switch (event) { + case NETDEV_DOWN: + case NETDEV_UNREGISTER: +- nexthop_flush_dev(dev); ++ nexthop_flush_dev(dev, event); + break; + case NETDEV_CHANGE: + if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP))) +- nexthop_flush_dev(dev); ++ nexthop_flush_dev(dev, event); + break; + case NETDEV_CHANGEMTU: + info_ext = ptr; +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 6ddec8a23942b..5c8d0fb498256 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -2957,16 +2957,23 @@ static int do_tcp_setsockopt(struct sock *sk, int level, + break; + + case TCP_QUEUE_SEQ: +- if (sk->sk_state != TCP_CLOSE) ++ if (sk->sk_state != TCP_CLOSE) { + err = -EPERM; +- else if (tp->repair_queue == TCP_SEND_QUEUE) +- WRITE_ONCE(tp->write_seq, val); +- else if (tp->repair_queue == TCP_RECV_QUEUE) { +- WRITE_ONCE(tp->rcv_nxt, val); +- WRITE_ONCE(tp->copied_seq, val); +- } +- else ++ } else if (tp->repair_queue == TCP_SEND_QUEUE) { ++ if (!tcp_rtx_queue_empty(sk)) ++ err = -EPERM; ++ else ++ WRITE_ONCE(tp->write_seq, val); ++ } else if (tp->repair_queue == TCP_RECV_QUEUE) { ++ if (tp->rcv_nxt != tp->copied_seq) { ++ err = -EPERM; ++ } else { ++ WRITE_ONCE(tp->rcv_nxt, val); ++ WRITE_ONCE(tp->copied_seq, val); ++ } ++ } else { + err = -EINVAL; ++ } + break; + + case TCP_REPAIR_OPTIONS: +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c +index d7c64e953e9a5..c463eebdc8fe2 100644 +--- a/net/ipv4/udp_offload.c ++++ b/net/ipv4/udp_offload.c +@@ -426,7 +426,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, + } + + if (NAPI_GRO_CB(skb)->encap_mark || +- (skb->ip_summed != CHECKSUM_PARTIAL && ++ (uh->check && skb->ip_summed != CHECKSUM_PARTIAL && + NAPI_GRO_CB(skb)->csum_cnt == 0 && + !NAPI_GRO_CB(skb)->csum_valid) || + !udp_sk(sk)->gro_receive) +diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c +index 8d3f66c310dbd..7426e33686d13 100644 +--- a/net/ipv6/calipso.c ++++ b/net/ipv6/calipso.c +@@ -83,6 +83,9 @@ struct calipso_map_cache_entry { + + static struct calipso_map_cache_bkt *calipso_cache; + ++static void calipso_cache_invalidate(void); ++static void calipso_doi_putdef(struct calipso_doi *doi_def); ++ + /* Label Mapping Cache Functions + */ + +@@ -444,15 +447,10 @@ static int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info) + ret_val = -ENOENT; + goto doi_remove_return; + } +- if (!refcount_dec_and_test(&doi_def->refcount)) { +- spin_unlock(&calipso_doi_list_lock); +- ret_val = -EBUSY; +- goto doi_remove_return; +- } + list_del_rcu(&doi_def->list); + spin_unlock(&calipso_doi_list_lock); + +- call_rcu(&doi_def->rcu, calipso_doi_free_rcu); ++ calipso_doi_putdef(doi_def); + ret_val = 0; + + doi_remove_return: +@@ -508,10 +506,8 @@ static void calipso_doi_putdef(struct calipso_doi *doi_def) + + if (!refcount_dec_and_test(&doi_def->refcount)) + return; +- spin_lock(&calipso_doi_list_lock); +- list_del_rcu(&doi_def->list); +- spin_unlock(&calipso_doi_list_lock); + ++ calipso_cache_invalidate(); + call_rcu(&doi_def->rcu, calipso_doi_free_rcu); + } + +diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c +index b1690149b6fa0..1482259de9b5d 100644 +--- a/net/mpls/mpls_gso.c ++++ b/net/mpls/mpls_gso.c +@@ -14,6 +14,7 @@ + #include <linux/netdev_features.h> + #include <linux/netdevice.h> + #include <linux/skbuff.h> ++#include <net/mpls.h> + + static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, + netdev_features_t features) +@@ -27,6 +28,8 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, + + skb_reset_network_header(skb); + mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb); ++ if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN)) ++ goto out; + if (unlikely(!pskb_may_pull(skb, mpls_hlen))) + goto out; + +diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c +index e87b6bd6b3cdb..4731d21fc3ad8 100644 +--- a/net/netfilter/nf_nat_proto.c ++++ b/net/netfilter/nf_nat_proto.c +@@ -646,8 +646,8 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb, + } + + static unsigned int +-nf_nat_ipv4_in(void *priv, struct sk_buff *skb, +- const struct nf_hook_state *state) ++nf_nat_ipv4_pre_routing(void *priv, struct sk_buff *skb, ++ const struct nf_hook_state *state) + { + unsigned int ret; + __be32 daddr = ip_hdr(skb)->daddr; +@@ -659,6 +659,23 @@ nf_nat_ipv4_in(void *priv, struct sk_buff *skb, + return ret; + } + ++static unsigned int ++nf_nat_ipv4_local_in(void *priv, struct sk_buff *skb, ++ const struct nf_hook_state *state) ++{ ++ __be32 saddr = ip_hdr(skb)->saddr; ++ struct sock *sk = skb->sk; ++ unsigned int ret; ++ ++ ret = nf_nat_ipv4_fn(priv, skb, state); ++ ++ if (ret == NF_ACCEPT && sk && saddr != ip_hdr(skb)->saddr && ++ !inet_sk_transparent(sk)) ++ skb_orphan(skb); /* TCP edemux obtained wrong socket */ ++ ++ return ret; ++} ++ + static unsigned int + nf_nat_ipv4_out(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +@@ -736,7 +753,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb, + static const struct nf_hook_ops nf_nat_ipv4_ops[] = { + /* Before packet filtering, change destination */ + { +- .hook = nf_nat_ipv4_in, ++ .hook = nf_nat_ipv4_pre_routing, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_PRE_ROUTING, + .priority = NF_IP_PRI_NAT_DST, +@@ -757,7 +774,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = { + }, + /* After packet filtering, change source */ + { +- .hook = nf_nat_ipv4_fn, ++ .hook = nf_nat_ipv4_local_in, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_LOCAL_IN, + .priority = NF_IP_PRI_NAT_SRC, +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c +index d1ef2d7930739..8b60fc04c67c2 100644 +--- a/net/netfilter/x_tables.c ++++ b/net/netfilter/x_tables.c +@@ -330,6 +330,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) + const struct xt_match *m; + int have_rev = 0; + ++ mutex_lock(&xt[af].mutex); + list_for_each_entry(m, &xt[af].match, list) { + if (strcmp(m->name, name) == 0) { + if (m->revision > *bestp) +@@ -338,6 +339,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) + have_rev = 1; + } + } ++ mutex_unlock(&xt[af].mutex); + + if (af != NFPROTO_UNSPEC && !have_rev) + return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); +@@ -350,6 +352,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) + const struct xt_target *t; + int have_rev = 0; + ++ mutex_lock(&xt[af].mutex); + list_for_each_entry(t, &xt[af].target, list) { + if (strcmp(t->name, name) == 0) { + if (t->revision > *bestp) +@@ -358,6 +361,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) + have_rev = 1; + } + } ++ mutex_unlock(&xt[af].mutex); + + if (af != NFPROTO_UNSPEC && !have_rev) + return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); +@@ -371,12 +375,10 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target, + { + int have_rev, best = -1; + +- mutex_lock(&xt[af].mutex); + if (target == 1) + have_rev = target_revfn(af, name, revision, &best); + else + have_rev = match_revfn(af, name, revision, &best); +- mutex_unlock(&xt[af].mutex); + + /* Nothing at all? Return 0 to try loading module. */ + if (best == -1) { +diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c +index 0f16080b87cb9..4cb43a2c07d14 100644 +--- a/net/netlabel/netlabel_cipso_v4.c ++++ b/net/netlabel/netlabel_cipso_v4.c +@@ -575,6 +575,7 @@ list_start: + + break; + } ++ cipso_v4_doi_putdef(doi_def); + rcu_read_unlock(); + + genlmsg_end(ans_skb, data); +@@ -583,12 +584,14 @@ list_start: + list_retry: + /* XXX - this limit is a guesstimate */ + if (nlsze_mult < 4) { ++ cipso_v4_doi_putdef(doi_def); + rcu_read_unlock(); + kfree_skb(ans_skb); + nlsze_mult *= 2; + goto list_start; + } + list_failure_lock: ++ cipso_v4_doi_putdef(doi_def); + rcu_read_unlock(); + list_failure: + kfree_skb(ans_skb); +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c +index ef602976bb2c8..6e023e93d3186 100644 +--- a/net/qrtr/qrtr.c ++++ b/net/qrtr/qrtr.c +@@ -791,8 +791,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + plen = (len + 3) & ~3; + skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE, + msg->msg_flags & MSG_DONTWAIT, &rc); +- if (!skb) ++ if (!skb) { ++ rc = -ENOMEM; + goto out_node; ++ } + + skb_reserve(skb, QRTR_HDR_MAX_SIZE); + +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index b65a405f607b2..3b1b5ee521379 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -2157,7 +2157,7 @@ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, + + static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, + struct tcmsg *tcm, struct netlink_callback *cb, +- int *t_p, int s_t) ++ int *t_p, int s_t, bool recur) + { + struct Qdisc *q; + int b; +@@ -2168,7 +2168,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, + if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) + return -1; + +- if (!qdisc_dev(root)) ++ if (!qdisc_dev(root) || !recur) + return 0; + + if (tcm->tcm_parent) { +@@ -2203,13 +2203,13 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) + s_t = cb->args[0]; + t = 0; + +- if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0) ++ if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0) + goto done; + + dev_queue = dev_ingress_queue(dev); + if (dev_queue && + tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, +- &t, s_t) < 0) ++ &t, s_t, false) < 0) + goto done; + + done: +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c +index 7afbf15bcbd9a..4beb6d2957c33 100644 +--- a/net/sunrpc/sched.c ++++ b/net/sunrpc/sched.c +@@ -990,8 +990,11 @@ void rpc_execute(struct rpc_task *task) + + rpc_set_active(task); + rpc_make_runnable(rpciod_workqueue, task); +- if (!is_async) ++ if (!is_async) { ++ unsigned int pflags = memalloc_nofs_save(); + __rpc_execute(task); ++ memalloc_nofs_restore(pflags); ++ } + } + + static void rpc_async_schedule(struct work_struct *work) +diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c +index 79d1005ff2ee3..3b604c1eb3c3b 100644 +--- a/samples/bpf/xdpsock_user.c ++++ b/samples/bpf/xdpsock_user.c +@@ -783,5 +783,7 @@ int main(int argc, char **argv) + else + l2fwd_all(); + ++ munmap(bufs, NUM_FRAMES * opt_xsk_frame_size); ++ + return 0; + } +diff --git a/security/commoncap.c b/security/commoncap.c +index ed89a6dd4f83d..28a6939bcc4e5 100644 +--- a/security/commoncap.c ++++ b/security/commoncap.c +@@ -500,8 +500,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size) + __u32 magic, nsmagic; + struct inode *inode = d_backing_inode(dentry); + struct user_namespace *task_ns = current_user_ns(), +- *fs_ns = inode->i_sb->s_user_ns, +- *ancestor; ++ *fs_ns = inode->i_sb->s_user_ns; + kuid_t rootid; + size_t newsize; + +@@ -524,15 +523,6 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size) + if (nsrootid == -1) + return -EINVAL; + +- /* +- * Do not allow allow adding a v3 filesystem capability xattr +- * if the rootid field is ambiguous. +- */ +- for (ancestor = task_ns->parent; ancestor; ancestor = ancestor->parent) { +- if (from_kuid(ancestor, rootid) == 0) +- return -EINVAL; +- } +- + newsize = sizeof(struct vfs_ns_cap_data); + nscap = kmalloc(newsize, GFP_ATOMIC); + if (!nscap) +diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c +index 6a85645663759..17a25e453f60c 100644 +--- a/sound/pci/hda/hda_bind.c ++++ b/sound/pci/hda/hda_bind.c +@@ -47,6 +47,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev) + if (codec->bus->shutdown) + return; + ++ /* ignore unsol events during system suspend/resume */ ++ if (codec->core.dev.power.power_state.event != PM_EVENT_ON) ++ return; ++ + if (codec->patch_ops.unsol_event) + codec->patch_ops.unsol_event(codec, ev); + } +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c +index 5e6081750bd9b..6a159c6c2f546 100644 +--- a/sound/pci/hda/hda_controller.c ++++ b/sound/pci/hda/hda_controller.c +@@ -613,13 +613,6 @@ static int azx_pcm_open(struct snd_pcm_substream *substream) + 20, + 178000000); + +- /* by some reason, the playback stream stalls on PulseAudio with +- * tsched=1 when a capture stream triggers. Until we figure out the +- * real cause, disable tsched mode by telling the PCM info flag. +- */ +- if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) +- runtime->hw.info |= SNDRV_PCM_INFO_BATCH; +- + if (chip->align_buffer_size) + /* constrain buffer sizes to be multiple of 128 + bytes. This is more efficient in terms of memory +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 7f9f6bbca5489..febd16c9efd7a 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -1025,6 +1025,8 @@ static int azx_prepare(struct device *dev) + chip = card->private_data; + chip->pm_prepared = 1; + ++ flush_work(&azx_bus(chip)->unsol_work); ++ + /* HDA controller always requires different WAKEEN for runtime suspend + * and system suspend, so don't use direct-complete here. + */ +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c +index d7b2aae6d4289..9412bdda85c82 100644 +--- a/sound/pci/hda/patch_ca0132.c ++++ b/sound/pci/hda/patch_ca0132.c +@@ -1185,6 +1185,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = { + SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D), + SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D), + SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5), ++ SND_PCI_QUIRK(0x1102, 0x0191, "Sound Blaster AE-5 Plus", QUIRK_AE5), + SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7), + {} + }; +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index df4771b9eff24..ce38b5d4670da 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -2382,6 +2382,18 @@ static void generic_hdmi_free(struct hda_codec *codec) + } + + #ifdef CONFIG_PM ++static int generic_hdmi_suspend(struct hda_codec *codec) ++{ ++ struct hdmi_spec *spec = codec->spec; ++ int pin_idx; ++ ++ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { ++ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); ++ cancel_delayed_work_sync(&per_pin->work); ++ } ++ return 0; ++} ++ + static int generic_hdmi_resume(struct hda_codec *codec) + { + struct hdmi_spec *spec = codec->spec; +@@ -2405,6 +2417,7 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = { + .build_controls = generic_hdmi_build_controls, + .unsol_event = hdmi_unsol_event, + #ifdef CONFIG_PM ++ .suspend = generic_hdmi_suspend, + .resume = generic_hdmi_resume, + #endif + }; +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index a7f31766d14df..9fb03c646a88f 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -1452,6 +1452,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) + case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */ + case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ + case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */ ++ case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */ + return true; + } + +@@ -1604,6 +1605,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, + && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + msleep(20); + ++ /* ++ * Plantronics headsets (C320, C320-M, etc) need a delay to avoid ++ * random microhpone failures. ++ */ ++ if (USB_ID_VENDOR(chip->usb_id) == 0x047f && ++ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) ++ msleep(20); ++ + /* Zoom R16/24, many Logitech(at least H650e/H570e/BCC950), + * Jabra 550a, Kingston HyperX needs a tiny delay here, + * otherwise requests like get/set frequency return +diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c +index 8593d3c200c61..0116b0c06e976 100644 +--- a/tools/perf/util/trace-event-read.c ++++ b/tools/perf/util/trace-event-read.c +@@ -361,6 +361,7 @@ static int read_saved_cmdline(struct tep_handle *pevent) + pr_debug("error reading saved cmdlines\n"); + goto out; + } ++ buf[ret] = '\0'; + + parse_saved_cmdline(pevent, buf, size); + ret = 0; +diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +index 0f98724120deb..b4e9a1d8c6cdb 100644 +--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c ++++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +@@ -446,10 +446,8 @@ int _geneve_get_tunnel(struct __sk_buff *skb) + } + + ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt)); +- if (ret < 0) { +- ERROR(ret); +- return TC_ACT_SHOT; +- } ++ if (ret < 0) ++ gopt.opt_class = 0; + + bpf_trace_printk(fmt, sizeof(fmt), + key.tunnel_id, key.remote_ipv4, gopt.opt_class); +diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c +index f3c33e128709b..bcc87906c4c19 100644 +--- a/tools/testing/selftests/bpf/verifier/array_access.c ++++ b/tools/testing/selftests/bpf/verifier/array_access.c +@@ -250,12 +250,13 @@ + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_csum_diff), ++ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_map_array_ro = { 3 }, + .result = ACCEPT, +- .retval = -29, ++ .retval = 65507, + }, + { + "invalid write map access into a read-only array 1", +diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh +index 197e769c2ed16..f8cda822c1cec 100755 +--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh ++++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh +@@ -86,11 +86,20 @@ test_ip6gretap() + + test_gretap_stp() + { ++ # Sometimes after mirror installation, the neighbor's state is not valid. ++ # The reason is that there is no SW datapath activity related to the ++ # neighbor for the remote GRE address. Therefore whether the corresponding ++ # neighbor will be valid is a matter of luck, and the test is thus racy. ++ # Set the neighbor's state to permanent, so it would be always valid. ++ ip neigh replace 192.0.2.130 lladdr $(mac_get $h3) \ ++ nud permanent dev br2 + full_test_span_gre_stp gt4 $swp3.555 "mirror to gretap" + } + + test_ip6gretap_stp() + { ++ ip neigh replace 2001:db8:2::2 lladdr $(mac_get $h3) \ ++ nud permanent dev br2 + full_test_span_gre_stp gt6 $swp3.555 "mirror to ip6gretap" + } + +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c +index 986fbc3cf667b..2e7d2b3f29079 100644 +--- a/virt/kvm/arm/arm.c ++++ b/virt/kvm/arm/arm.c +@@ -373,11 +373,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + cpu_data = this_cpu_ptr(&kvm_host_data); + + /* ++ * We guarantee that both TLBs and I-cache are private to each ++ * vcpu. If detecting that a vcpu from the same VM has ++ * previously run on the same physical CPU, call into the ++ * hypervisor code to nuke the relevant contexts. ++ * ++ * We might get preempted before the vCPU actually runs, but + * We might get preempted before the vCPU actually runs, but + * over-invalidation doesn't affect correctness. + */ + if (*last_ran != vcpu->vcpu_id) { +- kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu); ++ kvm_call_hyp(__kvm_flush_cpu_context, vcpu); + *last_ran = vcpu->vcpu_id; + } + +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c +index 03a586ab6d27b..c6ba672f07ccf 100644 +--- a/virt/kvm/arm/mmu.c ++++ b/virt/kvm/arm/mmu.c +@@ -2307,8 +2307,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, + * Prevent userspace from creating a memory region outside of the IPA + * space addressable by the KVM guest IPA space. + */ +- if (memslot->base_gfn + memslot->npages >= +- (kvm_phys_size(kvm) >> PAGE_SHIFT)) ++ if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) + return -EFAULT; + + down_read(¤t->mm->mmap_sem); |