diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1040_linux-5.4.41.patch | 2828 |
2 files changed, 2832 insertions, 0 deletions
diff --git a/0000_README b/0000_README index ed1776e1..e6a90581 100644 --- a/0000_README +++ b/0000_README @@ -203,6 +203,10 @@ Patch: 1039_linux-5.4.40.patch From: http://www.kernel.org Desc: Linux 5.4.40 +Patch: 1040_linux-5.4.41.patch +From: http://www.kernel.org +Desc: Linux 5.4.41 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1040_linux-5.4.41.patch b/1040_linux-5.4.41.patch new file mode 100644 index 00000000..50cddb87 --- /dev/null +++ b/1040_linux-5.4.41.patch @@ -0,0 +1,2828 @@ +diff --git a/Makefile b/Makefile +index 6d4fca82529a..a8c772b299aa 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 40 ++SUBLEVEL = 41 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c +index ae5aefc44a4d..ffa8d73fe722 100644 +--- a/arch/arm/crypto/nhpoly1305-neon-glue.c ++++ b/arch/arm/crypto/nhpoly1305-neon-glue.c +@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc, + return crypto_nhpoly1305_update(desc, src, srclen); + + do { +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K); + + kernel_neon_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); +diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c +index 895d3727c1fb..c5405e6a6db7 100644 +--- a/arch/arm64/crypto/nhpoly1305-neon-glue.c ++++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c +@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc, + return crypto_nhpoly1305_update(desc, src, srclen); + + do { +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K); + + kernel_neon_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); +diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c +index dfd626447482..5271ab366bee 100644 +--- a/arch/arm64/kvm/guest.c ++++ b/arch/arm64/kvm/guest.c +@@ -202,6 +202,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) + } + + memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); ++ ++ if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) { ++ int i; ++ ++ for (i = 0; i < 16; i++) ++ *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i); ++ } + out: + return err; + } +diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c +index bbeb6a5a6ba6..0be3355e3499 100644 +--- a/arch/arm64/mm/hugetlbpage.c ++++ b/arch/arm64/mm/hugetlbpage.c +@@ -230,6 +230,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, + ptep = (pte_t *)pudp; + } else if (sz == (CONT_PTE_SIZE)) { + pmdp = pmd_alloc(mm, pudp, addr); ++ if (!pmdp) ++ return NULL; + + WARN_ON(addr & (sz - 1)); + /* +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c +index f5d813c1304d..319812923012 100644 +--- a/arch/riscv/mm/init.c ++++ b/arch/riscv/mm/init.c +@@ -116,7 +116,8 @@ void __init setup_bootmem(void) + memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); + + set_max_mapnr(PFN_DOWN(mem_size)); +- max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); ++ max_pfn = PFN_DOWN(memblock_end_of_DRAM()); ++ max_low_pfn = max_pfn; + + #ifdef CONFIG_BLK_DEV_INITRD + setup_initrd(); +diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c +index ed52ffa8d5d4..560310e29e27 100644 +--- a/arch/s390/kvm/priv.c ++++ b/arch/s390/kvm/priv.c +@@ -626,10 +626,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu) + * available for the guest are AQIC and TAPQ with the t bit set + * since we do not set IC.3 (FIII) we currently will only intercept + * the AQIC function code. ++ * Note: running nested under z/VM can result in intercepts for other ++ * function codes, e.g. PQAP(QCI). We do not support this and bail out. + */ + reg0 = vcpu->run->s.regs.gprs[0]; + fc = (reg0 >> 24) & 0xff; +- if (WARN_ON_ONCE(fc != 0x03)) ++ if (fc != 0x03) + return -EOPNOTSUPP; + + /* PQAP instruction is allowed for guest kernel only */ +diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c +index f7567cbd35b6..80fcb85736e1 100644 +--- a/arch/x86/crypto/nhpoly1305-avx2-glue.c ++++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c +@@ -29,7 +29,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc, + return crypto_nhpoly1305_update(desc, src, srclen); + + do { +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K); + + kernel_fpu_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2); +diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c +index a661ede3b5cf..cc6b7c1a2705 100644 +--- a/arch/x86/crypto/nhpoly1305-sse2-glue.c ++++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c +@@ -29,7 +29,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc, + return crypto_nhpoly1305_update(desc, src, srclen); + + do { +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K); + + kernel_fpu_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2); +diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h +index 515c0ceeb4a3..b3f121478738 100644 +--- a/arch/x86/entry/calling.h ++++ b/arch/x86/entry/calling.h +@@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with + #define SIZEOF_PTREGS 21*8 + + .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 +- /* +- * Push registers and sanitize registers of values that a +- * speculation attack might otherwise want to exploit. The +- * lower registers are likely clobbered well before they +- * could be put to use in a speculative execution gadget. +- * Interleave XOR with PUSH for better uop scheduling: +- */ + .if \save_ret + pushq %rsi /* pt_regs->si */ + movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ +@@ -114,34 +107,43 @@ For 32-bit we have the following conventions - kernel is built with + pushq %rsi /* pt_regs->si */ + .endif + pushq \rdx /* pt_regs->dx */ +- xorl %edx, %edx /* nospec dx */ + pushq %rcx /* pt_regs->cx */ +- xorl %ecx, %ecx /* nospec cx */ + pushq \rax /* pt_regs->ax */ + pushq %r8 /* pt_regs->r8 */ +- xorl %r8d, %r8d /* nospec r8 */ + pushq %r9 /* pt_regs->r9 */ +- xorl %r9d, %r9d /* nospec r9 */ + pushq %r10 /* pt_regs->r10 */ +- xorl %r10d, %r10d /* nospec r10 */ + pushq %r11 /* pt_regs->r11 */ +- xorl %r11d, %r11d /* nospec r11*/ + pushq %rbx /* pt_regs->rbx */ +- xorl %ebx, %ebx /* nospec rbx*/ + pushq %rbp /* pt_regs->rbp */ +- xorl %ebp, %ebp /* nospec rbp*/ + pushq %r12 /* pt_regs->r12 */ +- xorl %r12d, %r12d /* nospec r12*/ + pushq %r13 /* pt_regs->r13 */ +- xorl %r13d, %r13d /* nospec r13*/ + pushq %r14 /* pt_regs->r14 */ +- xorl %r14d, %r14d /* nospec r14*/ + pushq %r15 /* pt_regs->r15 */ +- xorl %r15d, %r15d /* nospec r15*/ + UNWIND_HINT_REGS ++ + .if \save_ret + pushq %rsi /* return address on top of stack */ + .endif ++ ++ /* ++ * Sanitize registers of values that a speculation attack might ++ * otherwise want to exploit. The lower registers are likely clobbered ++ * well before they could be put to use in a speculative execution ++ * gadget. ++ */ ++ xorl %edx, %edx /* nospec dx */ ++ xorl %ecx, %ecx /* nospec cx */ ++ xorl %r8d, %r8d /* nospec r8 */ ++ xorl %r9d, %r9d /* nospec r9 */ ++ xorl %r10d, %r10d /* nospec r10 */ ++ xorl %r11d, %r11d /* nospec r11 */ ++ xorl %ebx, %ebx /* nospec rbx */ ++ xorl %ebp, %ebp /* nospec rbp */ ++ xorl %r12d, %r12d /* nospec r12 */ ++ xorl %r13d, %r13d /* nospec r13 */ ++ xorl %r14d, %r14d /* nospec r14 */ ++ xorl %r15d, %r15d /* nospec r15 */ ++ + .endm + + .macro POP_REGS pop_rdi=1 skip_r11rcx=0 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S +index b7c3ea4cb19d..2ba3d53ac5b1 100644 +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -249,7 +249,6 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) + */ + syscall_return_via_sysret: + /* rcx and r11 are already restored (see code above) */ +- UNWIND_HINT_EMPTY + POP_REGS pop_rdi=0 skip_r11rcx=1 + + /* +@@ -258,6 +257,7 @@ syscall_return_via_sysret: + */ + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp ++ UNWIND_HINT_EMPTY + + pushq RSP-RDI(%rdi) /* RSP */ + pushq (%rdi) /* RDI */ +@@ -512,7 +512,7 @@ END(spurious_entries_start) + * +----------------------------------------------------+ + */ + ENTRY(interrupt_entry) +- UNWIND_HINT_FUNC ++ UNWIND_HINT_IRET_REGS offset=16 + ASM_CLAC + cld + +@@ -544,9 +544,9 @@ ENTRY(interrupt_entry) + pushq 5*8(%rdi) /* regs->eflags */ + pushq 4*8(%rdi) /* regs->cs */ + pushq 3*8(%rdi) /* regs->ip */ ++ UNWIND_HINT_IRET_REGS + pushq 2*8(%rdi) /* regs->orig_ax */ + pushq 8(%rdi) /* return address */ +- UNWIND_HINT_FUNC + + movq (%rdi), %rdi + jmp 2f +@@ -637,6 +637,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) + */ + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp ++ UNWIND_HINT_EMPTY + + /* Copy the IRET frame to the trampoline stack. */ + pushq 6*8(%rdi) /* SS */ +@@ -1739,7 +1740,7 @@ ENTRY(rewind_stack_do_exit) + + movq PER_CPU_VAR(cpu_current_top_of_stack), %rax + leaq -PTREGS_SIZE(%rax), %rsp +- UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE ++ UNWIND_HINT_REGS + + call do_exit + END(rewind_stack_do_exit) +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 380cee9bc175..f5341edbfa16 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1608,8 +1608,8 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, + static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) + { + /* We can only post Fixed and LowPrio IRQs */ +- return (irq->delivery_mode == dest_Fixed || +- irq->delivery_mode == dest_LowestPrio); ++ return (irq->delivery_mode == APIC_DM_FIXED || ++ irq->delivery_mode == APIC_DM_LOWEST); + } + + static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) +diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h +index 499578f7e6d7..70fc159ebe69 100644 +--- a/arch/x86/include/asm/unwind.h ++++ b/arch/x86/include/asm/unwind.h +@@ -19,7 +19,7 @@ struct unwind_state { + #if defined(CONFIG_UNWINDER_ORC) + bool signal, full_regs; + unsigned long sp, bp, ip; +- struct pt_regs *regs; ++ struct pt_regs *regs, *prev_regs; + #elif defined(CONFIG_UNWINDER_FRAME_POINTER) + bool got_irq; + unsigned long *bp, *orig_sp, ip; +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c +index 332ae6530fa8..fb37221a1532 100644 +--- a/arch/x86/kernel/unwind_orc.c ++++ b/arch/x86/kernel/unwind_orc.c +@@ -142,9 +142,6 @@ static struct orc_entry *orc_find(unsigned long ip) + { + static struct orc_entry *orc; + +- if (!orc_init) +- return NULL; +- + if (ip == 0) + return &null_orc_entry; + +@@ -378,9 +375,38 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr + return true; + } + ++/* ++ * If state->regs is non-NULL, and points to a full pt_regs, just get the reg ++ * value from state->regs. ++ * ++ * Otherwise, if state->regs just points to IRET regs, and the previous frame ++ * had full regs, it's safe to get the value from the previous regs. This can ++ * happen when early/late IRQ entry code gets interrupted by an NMI. ++ */ ++static bool get_reg(struct unwind_state *state, unsigned int reg_off, ++ unsigned long *val) ++{ ++ unsigned int reg = reg_off/8; ++ ++ if (!state->regs) ++ return false; ++ ++ if (state->full_regs) { ++ *val = ((unsigned long *)state->regs)[reg]; ++ return true; ++ } ++ ++ if (state->prev_regs) { ++ *val = ((unsigned long *)state->prev_regs)[reg]; ++ return true; ++ } ++ ++ return false; ++} ++ + bool unwind_next_frame(struct unwind_state *state) + { +- unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp; ++ unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp; + enum stack_type prev_type = state->stack_info.type; + struct orc_entry *orc; + bool indirect = false; +@@ -442,39 +468,35 @@ bool unwind_next_frame(struct unwind_state *state) + break; + + case ORC_REG_R10: +- if (!state->regs || !state->full_regs) { ++ if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) { + orc_warn("missing regs for base reg R10 at ip %pB\n", + (void *)state->ip); + goto err; + } +- sp = state->regs->r10; + break; + + case ORC_REG_R13: +- if (!state->regs || !state->full_regs) { ++ if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) { + orc_warn("missing regs for base reg R13 at ip %pB\n", + (void *)state->ip); + goto err; + } +- sp = state->regs->r13; + break; + + case ORC_REG_DI: +- if (!state->regs || !state->full_regs) { ++ if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) { + orc_warn("missing regs for base reg DI at ip %pB\n", + (void *)state->ip); + goto err; + } +- sp = state->regs->di; + break; + + case ORC_REG_DX: +- if (!state->regs || !state->full_regs) { ++ if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) { + orc_warn("missing regs for base reg DX at ip %pB\n", + (void *)state->ip); + goto err; + } +- sp = state->regs->dx; + break; + + default: +@@ -501,6 +523,7 @@ bool unwind_next_frame(struct unwind_state *state) + + state->sp = sp; + state->regs = NULL; ++ state->prev_regs = NULL; + state->signal = false; + break; + +@@ -512,6 +535,7 @@ bool unwind_next_frame(struct unwind_state *state) + } + + state->regs = (struct pt_regs *)sp; ++ state->prev_regs = NULL; + state->full_regs = true; + state->signal = true; + break; +@@ -523,6 +547,8 @@ bool unwind_next_frame(struct unwind_state *state) + goto err; + } + ++ if (state->full_regs) ++ state->prev_regs = state->regs; + state->regs = (void *)sp - IRET_FRAME_OFFSET; + state->full_regs = false; + state->signal = true; +@@ -531,14 +557,14 @@ bool unwind_next_frame(struct unwind_state *state) + default: + orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", + orc->type, (void *)orig_ip); +- break; ++ goto err; + } + + /* Find BP: */ + switch (orc->bp_reg) { + case ORC_REG_UNDEFINED: +- if (state->regs && state->full_regs) +- state->bp = state->regs->bp; ++ if (get_reg(state, offsetof(struct pt_regs, bp), &tmp)) ++ state->bp = tmp; + break; + + case ORC_REG_PREV_SP: +@@ -582,6 +608,9 @@ EXPORT_SYMBOL_GPL(unwind_next_frame); + void __unwind_start(struct unwind_state *state, struct task_struct *task, + struct pt_regs *regs, unsigned long *first_frame) + { ++ if (!orc_init) ++ goto done; ++ + memset(state, 0, sizeof(*state)); + state->task = task; + +@@ -648,7 +677,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, + /* Otherwise, skip ahead to the user-specified starting frame: */ + while (!unwind_done(state) && + (!on_stack(&state->stack_info, first_frame, sizeof(long)) || +- state->sp <= (unsigned long)first_frame)) ++ state->sp < (unsigned long)first_frame)) + unwind_next_frame(state); + + return; +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 51ff6b342279..fda2126f9a97 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -1861,7 +1861,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, + return NULL; + + /* Pin the user virtual address. */ +- npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); ++ npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); + if (npinned != npages) { + pr_err("SEV: Failure locking %lu pages.\n", npages); + goto err; +diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S +index c7012f6c971c..ca4252f81bf8 100644 +--- a/arch/x86/kvm/vmx/vmenter.S ++++ b/arch/x86/kvm/vmx/vmenter.S +@@ -86,6 +86,9 @@ ENTRY(vmx_vmexit) + /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ + FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE + ++ /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */ ++ or $1, %_ASM_AX ++ + pop %_ASM_AX + .Lvmexit_skip_rsb: + #endif +diff --git a/block/blk-iocost.c b/block/blk-iocost.c +index 2dc5dc54e257..d083f7704082 100644 +--- a/block/blk-iocost.c ++++ b/block/blk-iocost.c +@@ -469,7 +469,7 @@ struct ioc_gq { + */ + atomic64_t vtime; + atomic64_t done_vtime; +- atomic64_t abs_vdebt; ++ u64 abs_vdebt; + u64 last_vtime; + + /* +@@ -1145,7 +1145,7 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) + struct iocg_wake_ctx ctx = { .iocg = iocg }; + u64 margin_ns = (u64)(ioc->period_us * + WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC; +- u64 abs_vdebt, vdebt, vshortage, expires, oexpires; ++ u64 vdebt, vshortage, expires, oexpires; + s64 vbudget; + u32 hw_inuse; + +@@ -1155,18 +1155,15 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) + vbudget = now->vnow - atomic64_read(&iocg->vtime); + + /* pay off debt */ +- abs_vdebt = atomic64_read(&iocg->abs_vdebt); +- vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse); ++ vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); + if (vdebt && vbudget > 0) { + u64 delta = min_t(u64, vbudget, vdebt); + u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse), +- abs_vdebt); ++ iocg->abs_vdebt); + + atomic64_add(delta, &iocg->vtime); + atomic64_add(delta, &iocg->done_vtime); +- atomic64_sub(abs_delta, &iocg->abs_vdebt); +- if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0)) +- atomic64_set(&iocg->abs_vdebt, 0); ++ iocg->abs_vdebt -= abs_delta; + } + + /* +@@ -1222,12 +1219,18 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) + u64 expires, oexpires; + u32 hw_inuse; + ++ lockdep_assert_held(&iocg->waitq.lock); ++ + /* debt-adjust vtime */ + current_hweight(iocg, NULL, &hw_inuse); +- vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse); ++ vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); + +- /* clear or maintain depending on the overage */ +- if (time_before_eq64(vtime, now->vnow)) { ++ /* ++ * Clear or maintain depending on the overage. Non-zero vdebt is what ++ * guarantees that @iocg is online and future iocg_kick_delay() will ++ * clear use_delay. Don't leave it on when there's no vdebt. ++ */ ++ if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) { + blkcg_clear_delay(blkg); + return false; + } +@@ -1261,9 +1264,12 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) + { + struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer); + struct ioc_now now; ++ unsigned long flags; + ++ spin_lock_irqsave(&iocg->waitq.lock, flags); + ioc_now(iocg->ioc, &now); + iocg_kick_delay(iocg, &now, 0); ++ spin_unlock_irqrestore(&iocg->waitq.lock, flags); + + return HRTIMER_NORESTART; + } +@@ -1371,14 +1377,13 @@ static void ioc_timer_fn(struct timer_list *timer) + * should have woken up in the last period and expire idle iocgs. + */ + list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { +- if (!waitqueue_active(&iocg->waitq) && +- !atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg)) ++ if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt && ++ !iocg_is_idle(iocg)) + continue; + + spin_lock(&iocg->waitq.lock); + +- if (waitqueue_active(&iocg->waitq) || +- atomic64_read(&iocg->abs_vdebt)) { ++ if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) { + /* might be oversleeping vtime / hweight changes, kick */ + iocg_kick_waitq(iocg, &now); + iocg_kick_delay(iocg, &now, 0); +@@ -1721,28 +1726,49 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) + * tests are racy but the races aren't systemic - we only miss once + * in a while which is fine. + */ +- if (!waitqueue_active(&iocg->waitq) && +- !atomic64_read(&iocg->abs_vdebt) && ++ if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && + time_before_eq64(vtime + cost, now.vnow)) { + iocg_commit_bio(iocg, bio, cost); + return; + } + + /* +- * We're over budget. If @bio has to be issued regardless, +- * remember the abs_cost instead of advancing vtime. +- * iocg_kick_waitq() will pay off the debt before waking more IOs. ++ * We activated above but w/o any synchronization. Deactivation is ++ * synchronized with waitq.lock and we won't get deactivated as long ++ * as we're waiting or has debt, so we're good if we're activated ++ * here. In the unlikely case that we aren't, just issue the IO. ++ */ ++ spin_lock_irq(&iocg->waitq.lock); ++ ++ if (unlikely(list_empty(&iocg->active_list))) { ++ spin_unlock_irq(&iocg->waitq.lock); ++ iocg_commit_bio(iocg, bio, cost); ++ return; ++ } ++ ++ /* ++ * We're over budget. If @bio has to be issued regardless, remember ++ * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay ++ * off the debt before waking more IOs. ++ * + * This way, the debt is continuously paid off each period with the +- * actual budget available to the cgroup. If we just wound vtime, +- * we would incorrectly use the current hw_inuse for the entire +- * amount which, for example, can lead to the cgroup staying +- * blocked for a long time even with substantially raised hw_inuse. ++ * actual budget available to the cgroup. If we just wound vtime, we ++ * would incorrectly use the current hw_inuse for the entire amount ++ * which, for example, can lead to the cgroup staying blocked for a ++ * long time even with substantially raised hw_inuse. ++ * ++ * An iocg with vdebt should stay online so that the timer can keep ++ * deducting its vdebt and [de]activate use_delay mechanism ++ * accordingly. We don't want to race against the timer trying to ++ * clear them and leave @iocg inactive w/ dangling use_delay heavily ++ * penalizing the cgroup and its descendants. + */ + if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { +- atomic64_add(abs_cost, &iocg->abs_vdebt); ++ iocg->abs_vdebt += abs_cost; + if (iocg_kick_delay(iocg, &now, cost)) + blkcg_schedule_throttle(rqos->q, + (bio->bi_opf & REQ_SWAP) == REQ_SWAP); ++ spin_unlock_irq(&iocg->waitq.lock); + return; + } + +@@ -1759,20 +1785,6 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) + * All waiters are on iocg->waitq and the wait states are + * synchronized using waitq.lock. + */ +- spin_lock_irq(&iocg->waitq.lock); +- +- /* +- * We activated above but w/o any synchronization. Deactivation is +- * synchronized with waitq.lock and we won't get deactivated as +- * long as we're waiting, so we're good if we're activated here. +- * In the unlikely case that we are deactivated, just issue the IO. +- */ +- if (unlikely(list_empty(&iocg->active_list))) { +- spin_unlock_irq(&iocg->waitq.lock); +- iocg_commit_bio(iocg, bio, cost); +- return; +- } +- + init_waitqueue_func_entry(&wait.wait, iocg_wake_fn); + wait.wait.private = current; + wait.bio = bio; +@@ -1804,6 +1816,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, + struct ioc_now now; + u32 hw_inuse; + u64 abs_cost, cost; ++ unsigned long flags; + + /* bypass if disabled or for root cgroup */ + if (!ioc->enabled || !iocg->level) +@@ -1823,15 +1836,28 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, + iocg->cursor = bio_end; + + /* +- * Charge if there's enough vtime budget and the existing request +- * has cost assigned. Otherwise, account it as debt. See debt +- * handling in ioc_rqos_throttle() for details. ++ * Charge if there's enough vtime budget and the existing request has ++ * cost assigned. + */ + if (rq->bio && rq->bio->bi_iocost_cost && +- time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) ++ time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) { + iocg_commit_bio(iocg, bio, cost); +- else +- atomic64_add(abs_cost, &iocg->abs_vdebt); ++ return; ++ } ++ ++ /* ++ * Otherwise, account it as debt if @iocg is online, which it should ++ * be for the vast majority of cases. See debt handling in ++ * ioc_rqos_throttle() for details. ++ */ ++ spin_lock_irqsave(&iocg->waitq.lock, flags); ++ if (likely(!list_empty(&iocg->active_list))) { ++ iocg->abs_vdebt += abs_cost; ++ iocg_kick_delay(iocg, &now, cost); ++ } else { ++ iocg_commit_bio(iocg, bio, cost); ++ } ++ spin_unlock_irqrestore(&iocg->waitq.lock, flags); + } + + static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) +@@ -2001,7 +2027,6 @@ static void ioc_pd_init(struct blkg_policy_data *pd) + iocg->ioc = ioc; + atomic64_set(&iocg->vtime, now.vnow); + atomic64_set(&iocg->done_vtime, now.vnow); +- atomic64_set(&iocg->abs_vdebt, 0); + atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); + INIT_LIST_HEAD(&iocg->active_list); + iocg->hweight_active = HWEIGHT_WHOLE; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index 630e8342d162..5e1dce424154 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -3070,15 +3070,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) + } + } + +- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); +- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); +- +- amdgpu_amdkfd_suspend(adev); +- + amdgpu_ras_suspend(adev); + + r = amdgpu_device_ip_suspend_phase1(adev); + ++ amdgpu_amdkfd_suspend(adev); ++ + /* evict vram memory */ + amdgpu_bo_evict_vram(adev); + +diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c +index f156f245fdec..9e95f6fd5406 100644 +--- a/drivers/gpu/drm/ingenic/ingenic-drm.c ++++ b/drivers/gpu/drm/ingenic/ingenic-drm.c +@@ -824,6 +824,7 @@ static const struct of_device_id ingenic_drm_of_match[] = { + { .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info }, + { /* sentinel */ }, + }; ++MODULE_DEVICE_TABLE(of, ingenic_drm_of_match); + + static struct platform_driver ingenic_drm_driver = { + .driver = { +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c +index c7bc9db5b192..17a638f15082 100644 +--- a/drivers/hid/usbhid/hid-core.c ++++ b/drivers/hid/usbhid/hid-core.c +@@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid) + struct usbhid_device *usbhid = hid->driver_data; + int res; + ++ mutex_lock(&usbhid->mutex); ++ + set_bit(HID_OPENED, &usbhid->iofl); + +- if (hid->quirks & HID_QUIRK_ALWAYS_POLL) +- return 0; ++ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { ++ res = 0; ++ goto Done; ++ } + + res = usb_autopm_get_interface(usbhid->intf); + /* the device must be awake to reliably request remote wakeup */ + if (res < 0) { + clear_bit(HID_OPENED, &usbhid->iofl); +- return -EIO; ++ res = -EIO; ++ goto Done; + } + + usbhid->intf->needs_remote_wakeup = 1; +@@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid) + msleep(50); + + clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); ++ ++ Done: ++ mutex_unlock(&usbhid->mutex); + return res; + } + +@@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid) + { + struct usbhid_device *usbhid = hid->driver_data; + ++ mutex_lock(&usbhid->mutex); ++ + /* + * Make sure we don't restart data acquisition due to + * a resumption we no longer care about by avoiding racing +@@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid) + clear_bit(HID_IN_POLLING, &usbhid->iofl); + spin_unlock_irq(&usbhid->lock); + +- if (hid->quirks & HID_QUIRK_ALWAYS_POLL) +- return; ++ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) { ++ hid_cancel_delayed_stuff(usbhid); ++ usb_kill_urb(usbhid->urbin); ++ usbhid->intf->needs_remote_wakeup = 0; ++ } + +- hid_cancel_delayed_stuff(usbhid); +- usb_kill_urb(usbhid->urbin); +- usbhid->intf->needs_remote_wakeup = 0; ++ mutex_unlock(&usbhid->mutex); + } + + /* +@@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid) + unsigned int n, insize = 0; + int ret; + ++ mutex_lock(&usbhid->mutex); ++ + clear_bit(HID_DISCONNECTED, &usbhid->iofl); + + usbhid->bufsize = HID_MIN_BUFFER_SIZE; +@@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid) + usbhid_set_leds(hid); + device_set_wakeup_enable(&dev->dev, 1); + } ++ ++ mutex_unlock(&usbhid->mutex); + return 0; + + fail: +@@ -1187,6 +1202,7 @@ fail: + usbhid->urbout = NULL; + usbhid->urbctrl = NULL; + hid_free_buffers(dev, hid); ++ mutex_unlock(&usbhid->mutex); + return ret; + } + +@@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid) + usbhid->intf->needs_remote_wakeup = 0; + } + ++ mutex_lock(&usbhid->mutex); ++ + clear_bit(HID_STARTED, &usbhid->iofl); + spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ + set_bit(HID_DISCONNECTED, &usbhid->iofl); +@@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid) + usbhid->urbout = NULL; + + hid_free_buffers(hid_to_usb_dev(hid), hid); ++ ++ mutex_unlock(&usbhid->mutex); + } + + static int usbhid_power(struct hid_device *hid, int lvl) +@@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * + INIT_WORK(&usbhid->reset_work, hid_reset); + timer_setup(&usbhid->io_retry, hid_retry_timeout, 0); + spin_lock_init(&usbhid->lock); ++ mutex_init(&usbhid->mutex); + + ret = hid_add_device(hid); + if (ret) { +diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h +index 8620408bd7af..75fe85d3d27a 100644 +--- a/drivers/hid/usbhid/usbhid.h ++++ b/drivers/hid/usbhid/usbhid.h +@@ -80,6 +80,7 @@ struct usbhid_device { + dma_addr_t outbuf_dma; /* Output buffer dma */ + unsigned long last_out; /* record of last output for timeouts */ + ++ struct mutex mutex; /* start/stop/open/close */ + spinlock_t lock; /* fifo spinlock */ + unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ + struct timer_list io_retry; /* Retry timer */ +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c +index 5ded94b7bf68..cd71e7133944 100644 +--- a/drivers/hid/wacom_sys.c ++++ b/drivers/hid/wacom_sys.c +@@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev, + data[0] = field->report->id; + ret = wacom_get_report(hdev, HID_FEATURE_REPORT, + data, n, WAC_CMD_RETRIES); +- if (ret == n) { ++ if (ret == n && features->type == HID_GENERIC) { + ret = hid_report_raw_event(hdev, + HID_FEATURE_REPORT, data, n, 0); ++ } else if (ret == 2 && features->type != HID_GENERIC) { ++ features->touch_max = data[1]; + } else { + features->touch_max = 16; + hid_warn(hdev, "wacom_feature_mapping: " +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index d99a9d407671..1c96809b51c9 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) + { + struct input_dev *pad_input = wacom->pad_input; + unsigned char *data = wacom->data; ++ int nbuttons = wacom->features.numbered_buttons; + +- int buttons = data[282] | ((data[281] & 0x40) << 2); ++ int expresskeys = data[282]; ++ int center = (data[281] & 0x40) >> 6; + int ring = data[285] & 0x7F; + bool ringstatus = data[285] & 0x80; +- bool prox = buttons || ringstatus; ++ bool prox = expresskeys || center || ringstatus; + + /* Fix touchring data: userspace expects 0 at left and increasing clockwise */ + ring = 71 - ring; +@@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) + if (ring > 71) + ring -= 72; + +- wacom_report_numbered_buttons(pad_input, 9, buttons); ++ wacom_report_numbered_buttons(pad_input, nbuttons, ++ expresskeys | (center << (nbuttons - 1))); + + input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0); + +@@ -2637,9 +2640,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, + case HID_DG_TIPSWITCH: + hid_data->last_slot_field = equivalent_usage; + break; ++ case HID_DG_CONTACTCOUNT: ++ hid_data->cc_report = report->id; ++ hid_data->cc_index = i; ++ hid_data->cc_value_index = j; ++ break; + } + } + } ++ ++ if (hid_data->cc_report != 0 && ++ hid_data->cc_index >= 0) { ++ struct hid_field *field = report->field[hid_data->cc_index]; ++ int value = field->value[hid_data->cc_value_index]; ++ if (value) ++ hid_data->num_expected = value; ++ } ++ else { ++ hid_data->num_expected = wacom_wac->features.touch_max; ++ } + } + + static void wacom_wac_finger_report(struct hid_device *hdev, +@@ -2649,7 +2668,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev, + struct wacom_wac *wacom_wac = &wacom->wacom_wac; + struct input_dev *input = wacom_wac->touch_input; + unsigned touch_max = wacom_wac->features.touch_max; +- struct hid_data *hid_data = &wacom_wac->hid_data; + + /* If more packets of data are expected, give us a chance to + * process them rather than immediately syncing a partial +@@ -2663,7 +2681,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev, + + input_sync(input); + wacom_wac->hid_data.num_received = 0; +- hid_data->num_expected = 0; + + /* keep touch state for pen event */ + wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac); +@@ -2738,73 +2755,12 @@ static void wacom_report_events(struct hid_device *hdev, + } + } + +-static void wacom_set_num_expected(struct hid_device *hdev, +- struct hid_report *report, +- int collection_index, +- struct hid_field *field, +- int field_index) +-{ +- struct wacom *wacom = hid_get_drvdata(hdev); +- struct wacom_wac *wacom_wac = &wacom->wacom_wac; +- struct hid_data *hid_data = &wacom_wac->hid_data; +- unsigned int original_collection_level = +- hdev->collection[collection_index].level; +- bool end_collection = false; +- int i; +- +- if (hid_data->num_expected) +- return; +- +- // find the contact count value for this segment +- for (i = field_index; i < report->maxfield && !end_collection; i++) { +- struct hid_field *field = report->field[i]; +- unsigned int field_level = +- hdev->collection[field->usage[0].collection_index].level; +- unsigned int j; +- +- if (field_level != original_collection_level) +- continue; +- +- for (j = 0; j < field->maxusage; j++) { +- struct hid_usage *usage = &field->usage[j]; +- +- if (usage->collection_index != collection_index) { +- end_collection = true; +- break; +- } +- if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) { +- hid_data->cc_report = report->id; +- hid_data->cc_index = i; +- hid_data->cc_value_index = j; +- +- if (hid_data->cc_report != 0 && +- hid_data->cc_index >= 0) { +- +- struct hid_field *field = +- report->field[hid_data->cc_index]; +- int value = +- field->value[hid_data->cc_value_index]; +- +- if (value) +- hid_data->num_expected = value; +- } +- } +- } +- } +- +- if (hid_data->cc_report == 0 || hid_data->cc_index < 0) +- hid_data->num_expected = wacom_wac->features.touch_max; +-} +- + static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report, + int collection_index, struct hid_field *field, + int field_index) + { + struct wacom *wacom = hid_get_drvdata(hdev); + +- if (WACOM_FINGER_FIELD(field)) +- wacom_set_num_expected(hdev, report, collection_index, field, +- field_index); + wacom_report_events(hdev, report, collection_index, field_index); + + /* +diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c +index 6c340a4f4fd2..60e659a24f90 100644 +--- a/drivers/iommu/virtio-iommu.c ++++ b/drivers/iommu/virtio-iommu.c +@@ -454,7 +454,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev, + if (!region) + return -ENOMEM; + +- list_add(&vdev->resv_regions, ®ion->list); ++ list_add(®ion->list, &vdev->resv_regions); + return 0; + } + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 6862594b49ab..edf8452a2574 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -6649,7 +6649,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, + int rc; + + if (!mem_size) +- return 0; ++ return -EINVAL; + + ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { +@@ -9755,6 +9755,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, + netdev_features_t features) + { + struct bnxt *bp = netdev_priv(dev); ++ netdev_features_t vlan_features; + + if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) + features &= ~NETIF_F_NTUPLE; +@@ -9771,12 +9772,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, + /* Both CTAG and STAG VLAN accelaration on the RX side have to be + * turned on or off together. + */ +- if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != +- (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { ++ vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | ++ NETIF_F_HW_VLAN_STAG_RX); ++ if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | ++ NETIF_F_HW_VLAN_STAG_RX)) { + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + features &= ~(NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX); +- else ++ else if (vlan_features) + features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX; + } +@@ -12066,12 +12069,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) + } + } + +- if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) +- dev_close(netdev); ++ if (result != PCI_ERS_RESULT_RECOVERED) { ++ if (netif_running(netdev)) ++ dev_close(netdev); ++ pci_disable_device(pdev); ++ } + + rtnl_unlock(); + +- return PCI_ERS_RESULT_RECOVERED; ++ return result; + } + + /** +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h +index cda7ba31095a..a61a5873ab0a 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h +@@ -1058,7 +1058,6 @@ struct bnxt_vf_info { + #define BNXT_VF_LINK_FORCED 0x4 + #define BNXT_VF_LINK_UP 0x8 + #define BNXT_VF_TRUST 0x10 +- u32 func_flags; /* func cfg flags */ + u32 min_tx_rate; + u32 max_tx_rate; + void *hwrm_cmd_req_addr; +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +index 689c47ab2155..ba94edec9fb8 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +@@ -39,7 +39,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) + #define NVM_OFF_DIS_GRE_VER_CHECK 171 + #define NVM_OFF_ENABLE_SRIOV 401 + +-#define BNXT_MSIX_VEC_MAX 1280 ++#define BNXT_MSIX_VEC_MAX 512 + #define BNXT_MSIX_VEC_MIN_MAX 128 + + enum bnxt_nvm_dir_type { +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +index f6f3454d6059..1046b22220a3 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +@@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) + if (old_setting == setting) + return 0; + +- func_flags = vf->func_flags; + if (setting) +- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; ++ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; + else +- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; ++ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; + /*TODO: if the driver supports VLAN filter on guest VLAN, + * the spoof check should also include vlan anti-spoofing + */ +@@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) + req.flags = cpu_to_le32(func_flags); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { +- vf->func_flags = func_flags; + if (setting) + vf->flags |= BNXT_VF_SPOOFCHK; + else +@@ -230,7 +228,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) + memcpy(vf->mac_addr, mac, ETH_ALEN); + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); +- req.flags = cpu_to_le32(vf->func_flags); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, mac, ETH_ALEN); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +@@ -268,7 +265,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); +- req.flags = cpu_to_le32(vf->func_flags); + req.dflt_vlan = cpu_to_le16(vlan_tag); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +@@ -307,7 +303,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); +- req.flags = cpu_to_le32(vf->func_flags); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); + req.max_bw = cpu_to_le32(max_tx_rate); + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); +@@ -479,7 +474,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) + vf = &bp->pf.vf[vf_id]; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); +- req.flags = cpu_to_le32(vf->func_flags); + + if (is_valid_ether_addr(vf->mac_addr)) { + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c +index 234c13ebbc41..1ec19d9fab00 100644 +--- a/drivers/net/ethernet/cadence/macb_main.c ++++ b/drivers/net/ethernet/cadence/macb_main.c +@@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) + int status; + + status = pm_runtime_get_sync(&bp->pdev->dev); +- if (status < 0) ++ if (status < 0) { ++ pm_runtime_put_noidle(&bp->pdev->dev); + goto mdio_pm_exit; ++ } + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) +@@ -367,8 +369,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + int status; + + status = pm_runtime_get_sync(&bp->pdev->dev); +- if (status < 0) ++ if (status < 0) { ++ pm_runtime_put_noidle(&bp->pdev->dev); + goto mdio_pm_exit; ++ } + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) +@@ -3691,8 +3695,10 @@ static int at91ether_open(struct net_device *dev) + int ret; + + ret = pm_runtime_get_sync(&lp->pdev->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(&lp->pdev->dev); + return ret; ++ } + + /* Clear internal statistics */ + ctl = macb_readl(lp, NCR); +@@ -4048,15 +4054,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, + + static int fu540_c000_init(struct platform_device *pdev) + { +- struct resource *res; +- +- res = platform_get_resource(pdev, IORESOURCE_MEM, 1); +- if (!res) +- return -ENODEV; +- +- mgmt->reg = ioremap(res->start, resource_size(res)); +- if (!mgmt->reg) +- return -ENOMEM; ++ mgmt->reg = devm_platform_ioremap_resource(pdev, 1); ++ if (IS_ERR(mgmt->reg)) ++ return PTR_ERR(mgmt->reg); + + return macb_init(pdev); + } +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +index 35478cba2aa5..4344a59c823f 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +@@ -1422,6 +1422,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, + struct mvpp2_ethtool_fs *efs; + int ret; + ++ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) ++ return -EINVAL; ++ + efs = port->rfs_rules[info->fs.location]; + if (!efs) + return -EINVAL; +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +index ef44c6979a31..373b8c832850 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +@@ -4319,6 +4319,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; ++ if (rss_context >= MVPP22_N_RSS_TABLES) ++ return -EINVAL; + + if (hfunc) + *hfunc = ETH_RSS_HASH_CRC32; +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c +index d44ac666e730..87c2e8de6102 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c +@@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev) + + if (!err || err == -ENOSPC) { + priv->def_counter[port] = idx; ++ err = 0; + } else if (err == -ENOENT) { + err = 0; + continue; +@@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage) + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *idx = get_param_l(&out_param); +- ++ if (WARN_ON(err == -ENOSPC)) ++ err = -EINVAL; + return err; + } + return __mlx4_counter_alloc(dev, idx); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index ea934cd02448..08048a2d7259 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work) + } + + cmd->ent_arr[ent->idx] = ent; +- set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); + lay = get_inst(cmd, ent->idx); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); +@@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work) + + if (ent->callback) + schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); ++ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); + + /* Skip sending command to fw if internal error */ + if (pci_channel_offline(dev->pdev) || +@@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work) + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); + + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); ++ /* no doorbell, no need to keep the entry */ ++ free_ent(cmd, ent->idx); ++ if (ent->callback) ++ free_cmd(ent); + return; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +index 095ec7b1399d..7c77378accf0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +@@ -689,6 +689,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq, + pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn); + } + ++static void dr_cq_complete(struct mlx5_core_cq *mcq, ++ struct mlx5_eqe *eqe) ++{ ++ pr_err("CQ completion CQ: #%u\n", mcq->cqn); ++} ++ + static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, + struct mlx5_uars_page *uar, + size_t ncqe) +@@ -750,6 +756,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas); + + cq->mcq.event = dr_cq_event; ++ cq->mcq.comp = dr_cq_complete; + + err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); + kvfree(in); +@@ -761,7 +768,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, + cq->mcq.set_ci_db = cq->wq_ctrl.db.db; + cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; + *cq->mcq.set_ci_db = 0; +- *cq->mcq.arm_db = 0; ++ ++ /* set no-zero value, in order to avoid the HW to run db-recovery on ++ * CQ that used in polling mode. ++ */ ++ *cq->mcq.arm_db = cpu_to_be32(2 << 28); ++ + cq->mcq.vector = 0; + cq->mcq.irqn = irqn; + cq->mcq.uar = uar; +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +index e993159e8e4c..295b27112d36 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +@@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) + { ++ struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2; + struct mlxsw_sp_acl_tcam_vregion *vregion; +- struct mlxsw_sp_acl_tcam_vchunk *vchunk; ++ struct list_head *pos; + int err; + + if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) +@@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, + } + + mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion); +- list_add_tail(&vchunk->list, &vregion->vchunk_list); ++ ++ /* Position the vchunk inside the list according to priority */ ++ list_for_each(pos, &vregion->vchunk_list) { ++ vchunk2 = list_entry(pos, typeof(*vchunk2), list); ++ if (vchunk2->priority > priority) ++ break; ++ } ++ list_add_tail(&vchunk->list, pos); + mutex_unlock(&vregion->lock); + + return vchunk; +diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c +index 9183b3e85d21..354efffac0f9 100644 +--- a/drivers/net/ethernet/netronome/nfp/abm/main.c ++++ b/drivers/net/ethernet/netronome/nfp/abm/main.c +@@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, + if (!nfp_nsp_has_hwinfo_lookup(nsp)) { + nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); + eth_hw_addr_random(nn->dp.netdev); ++ nfp_nsp_close(nsp); + return; + } + +diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c +index 12466a72cefc..aab0cf57c654 100644 +--- a/drivers/net/ethernet/toshiba/tc35815.c ++++ b/drivers/net/ethernet/toshiba/tc35815.c +@@ -644,7 +644,7 @@ static int tc_mii_probe(struct net_device *dev) + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); + } +- linkmode_and(phydev->supported, phydev->supported, mask); ++ linkmode_andnot(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); + + lp->link = 0; +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c +index a0abc729f0ca..4c86a73db475 100644 +--- a/drivers/net/macsec.c ++++ b/drivers/net/macsec.c +@@ -1309,7 +1309,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) + struct crypto_aead *tfm; + int ret; + +- tfm = crypto_alloc_aead("gcm(aes)", 0, 0); ++ /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ ++ tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); + + if (IS_ERR(tfm)) + return tfm; +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c +index 8f241b57fcf6..1c75b2627ca8 100644 +--- a/drivers/net/phy/dp83640.c ++++ b/drivers/net/phy/dp83640.c +@@ -1119,7 +1119,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus) + goto out; + } + dp83640_clock_init(clock, bus); +- list_add_tail(&phyter_clocks, &clock->list); ++ list_add_tail(&clock->list, &phyter_clocks); + out: + mutex_unlock(&phyter_clocks_lock); + +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 6c738a271257..4bb8552a00d3 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1359,6 +1359,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ ++ {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index 31b7dcd791c2..f0e0af3aa714 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -1071,8 +1071,17 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, + + status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, + NVME_IDENTIFY_DATA_SIZE); +- if (status) ++ if (status) { ++ dev_warn(ctrl->device, ++ "Identify Descriptors failed (%d)\n", status); ++ /* ++ * Don't treat an error as fatal, as we potentially already ++ * have a NGUID or EUI-64. ++ */ ++ if (status > 0 && !(status & NVME_SC_DNR)) ++ status = 0; + goto free_data; ++ } + + for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { + struct nvme_ns_id_desc *cur = data + pos; +@@ -1730,26 +1739,15 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) + static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, + struct nvme_id_ns *id, struct nvme_ns_ids *ids) + { +- int ret = 0; +- + memset(ids, 0, sizeof(*ids)); + + if (ctrl->vs >= NVME_VS(1, 1, 0)) + memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); + if (ctrl->vs >= NVME_VS(1, 2, 0)) + memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); +- if (ctrl->vs >= NVME_VS(1, 3, 0)) { +- /* Don't treat error as fatal we potentially +- * already have a NGUID or EUI-64 +- */ +- ret = nvme_identify_ns_descs(ctrl, nsid, ids); +- if (ret) +- dev_warn(ctrl->device, +- "Identify Descriptors failed (%d)\n", ret); +- if (ret > 0) +- ret = 0; +- } +- return ret; ++ if (ctrl->vs >= NVME_VS(1, 3, 0)) ++ return nvme_identify_ns_descs(ctrl, nsid, ids); ++ return 0; + } + + static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) +diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c +index 13179f063a61..6f9c0d18d9ce 100644 +--- a/drivers/staging/gasket/gasket_core.c ++++ b/drivers/staging/gasket/gasket_core.c +@@ -926,6 +926,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma, + gasket_get_bar_index(gasket_dev, + (vma->vm_pgoff << PAGE_SHIFT) + + driver_desc->legacy_mmap_address_offset); ++ ++ if (bar_index < 0) ++ return DO_MAP_REGION_INVALID; ++ + phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset; + while (mapped_bytes < map_length) { + /* +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c +index fe098cf14e6a..3cb9aacfe0b2 100644 +--- a/drivers/tty/serial/xilinx_uartps.c ++++ b/drivers/tty/serial/xilinx_uartps.c +@@ -1445,6 +1445,7 @@ static int cdns_uart_probe(struct platform_device *pdev) + cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; + #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE + cdns_uart_uart_driver.cons = &cdns_uart_console; ++ cdns_uart_console.index = id; + #endif + + rc = uart_register_driver(&cdns_uart_uart_driver); +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 8b3ecef50394..fd0361d72738 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -365,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) + return uniscr; + } + ++static void vc_uniscr_free(struct uni_screen *uniscr) ++{ ++ vfree(uniscr); ++} ++ + static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr) + { +- vfree(vc->vc_uni_screen); ++ vc_uniscr_free(vc->vc_uni_screen); + vc->vc_uni_screen = new_uniscr; + } + +@@ -1230,7 +1235,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, + err = resize_screen(vc, new_cols, new_rows, user); + if (err) { + kfree(newscreen); +- kfree(new_uniscr); ++ vc_uniscr_free(new_uniscr); + return err; + } + +diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c +index af648ba6544d..46105457e1ca 100644 +--- a/drivers/usb/chipidea/ci_hdrc_msm.c ++++ b/drivers/usb/chipidea/ci_hdrc_msm.c +@@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) + hw_write_id_reg(ci, HS_PHY_GENCONFIG_2, + HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0); + +- if (!IS_ERR(ci->platdata->vbus_extcon.edev)) { ++ if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) { + hw_write_id_reg(ci, HS_PHY_GENCONFIG_2, + HS_PHY_SESS_VLD_CTRL_EN, + HS_PHY_SESS_VLD_CTRL_EN); +diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c +index 633550ec3025..f29c3a936a08 100644 +--- a/drivers/usb/serial/garmin_gps.c ++++ b/drivers/usb/serial/garmin_gps.c +@@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p, + send it directly to the tty port */ + if (garmin_data_p->flags & FLAGS_QUEUING) { + pkt_add(garmin_data_p, data, data_length); +- } else if (bulk_data || +- getLayerId(data) == GARMIN_LAYERID_APPL) { ++ } else if (bulk_data || (data_length >= sizeof(u32) && ++ getLayerId(data) == GARMIN_LAYERID_APPL)) { + + spin_lock_irqsave(&garmin_data_p->lock, flags); + garmin_data_p->flags |= APP_RESP_SEEN; +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index 613f91add03d..ce0401d3137f 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ ++ {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ + {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ + {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ + {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h +index 1b23741036ee..37157ed9a881 100644 +--- a/drivers/usb/storage/unusual_uas.h ++++ b/drivers/usb/storage/unusual_uas.h +@@ -28,6 +28,13 @@ + * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org> + */ + ++/* Reported-by: Julian Groß <julian.g@posteo.de> */ ++UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, ++ "LaCie", ++ "2Big Quadra USB3", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_NO_REPORT_OPCODES), ++ + /* + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI + * commands in UAS mode. Observed with the 1.28 firmware; are there others? +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c +index 8c1f04c3a684..b79fe6549df6 100644 +--- a/fs/ceph/mds_client.c ++++ b/fs/ceph/mds_client.c +@@ -3072,8 +3072,7 @@ static void handle_session(struct ceph_mds_session *session, + void *end = p + msg->front.iov_len; + struct ceph_mds_session_head *h; + u32 op; +- u64 seq; +- unsigned long features = 0; ++ u64 seq, features = 0; + int wake = 0; + bool blacklisted = false; + +@@ -3092,9 +3091,8 @@ static void handle_session(struct ceph_mds_session *session, + goto bad; + /* version >= 3, feature bits */ + ceph_decode_32_safe(&p, end, len, bad); +- ceph_decode_need(&p, end, len, bad); +- memcpy(&features, p, min_t(size_t, len, sizeof(features))); +- p += len; ++ ceph_decode_64_safe(&p, end, features, bad); ++ p += len - sizeof(features); + } + + mutex_lock(&mdsc->mutex); +diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c +index de56dee60540..19507e2fdb57 100644 +--- a/fs/ceph/quota.c ++++ b/fs/ceph/quota.c +@@ -159,8 +159,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc, + } + + if (IS_ERR(in)) { +- pr_warn("Can't lookup inode %llx (err: %ld)\n", +- realm->ino, PTR_ERR(in)); ++ dout("Can't lookup inode %llx (err: %ld)\n", ++ realm->ino, PTR_ERR(in)); + qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */ + } else { + qri->timeout = 0; +diff --git a/fs/coredump.c b/fs/coredump.c +index d25bad2ed061..5c0375e7440f 100644 +--- a/fs/coredump.c ++++ b/fs/coredump.c +@@ -788,6 +788,14 @@ void do_coredump(const kernel_siginfo_t *siginfo) + if (displaced) + put_files_struct(displaced); + if (!dump_interrupted()) { ++ /* ++ * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would ++ * have this set to NULL. ++ */ ++ if (!cprm.file) { ++ pr_info("Core dump to |%s disabled\n", cn.corename); ++ goto close_fail; ++ } + file_start_write(cprm.file); + core_dumped = binfmt->core_dump(&cprm); + file_end_write(cprm.file); +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 33cff49769cc..6307c1d883e0 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -1176,6 +1176,10 @@ static inline bool chain_epi_lockless(struct epitem *epi) + { + struct eventpoll *ep = epi->ep; + ++ /* Fast preliminary check */ ++ if (epi->next != EP_UNACTIVE_PTR) ++ return false; ++ + /* Check that the same epi has not been just chained from another CPU */ + if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR) + return false; +@@ -1242,16 +1246,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v + * chained in ep->ovflist and requeued later on. + */ + if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { +- if (epi->next == EP_UNACTIVE_PTR && +- chain_epi_lockless(epi)) ++ if (chain_epi_lockless(epi)) ++ ep_pm_stay_awake_rcu(epi); ++ } else if (!ep_is_linked(epi)) { ++ /* In the usual case, add event to ready list. */ ++ if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) + ep_pm_stay_awake_rcu(epi); +- goto out_unlock; +- } +- +- /* If this file is already in the ready list we exit soon */ +- if (!ep_is_linked(epi) && +- list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) { +- ep_pm_stay_awake_rcu(epi); + } + + /* +@@ -1827,7 +1827,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, + { + int res = 0, eavail, timed_out = 0; + u64 slack = 0; +- bool waiter = false; + wait_queue_entry_t wait; + ktime_t expires, *to = NULL; + +@@ -1872,21 +1871,23 @@ fetch_events: + */ + ep_reset_busy_poll_napi_id(ep); + +- /* +- * We don't have any available event to return to the caller. We need +- * to sleep here, and we will be woken by ep_poll_callback() when events +- * become available. +- */ +- if (!waiter) { +- waiter = true; +- init_waitqueue_entry(&wait, current); +- ++ do { ++ /* ++ * Internally init_wait() uses autoremove_wake_function(), ++ * thus wait entry is removed from the wait queue on each ++ * wakeup. Why it is important? In case of several waiters ++ * each new wakeup will hit the next waiter, giving it the ++ * chance to harvest new event. Otherwise wakeup can be ++ * lost. This is also good performance-wise, because on ++ * normal wakeup path no need to call __remove_wait_queue() ++ * explicitly, thus ep->lock is not taken, which halts the ++ * event delivery. ++ */ ++ init_wait(&wait); + write_lock_irq(&ep->lock); + __add_wait_queue_exclusive(&ep->wq, &wait); + write_unlock_irq(&ep->lock); +- } + +- for (;;) { + /* + * We don't want to sleep if the ep_poll_callback() sends us + * a wakeup in between. That's why we set the task state +@@ -1916,10 +1917,20 @@ fetch_events: + timed_out = 1; + break; + } +- } ++ ++ /* We were woken up, thus go and try to harvest some events */ ++ eavail = 1; ++ ++ } while (0); + + __set_current_state(TASK_RUNNING); + ++ if (!list_empty_careful(&wait.entry)) { ++ write_lock_irq(&ep->lock); ++ __remove_wait_queue(&ep->wq, &wait); ++ write_unlock_irq(&ep->lock); ++ } ++ + send_events: + /* + * Try to transfer events to user space. In case we get 0 events and +@@ -1930,12 +1941,6 @@ send_events: + !(res = ep_send_events(ep, events, maxevents)) && !timed_out) + goto fetch_events; + +- if (waiter) { +- write_lock_irq(&ep->lock); +- __remove_wait_queue(&ep->wq, &wait); +- write_unlock_irq(&ep->lock); +- } +- + return res; + } + +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c +index 5778d1347b35..f5d30573f4a9 100644 +--- a/fs/notify/fanotify/fanotify.c ++++ b/fs/notify/fanotify/fanotify.c +@@ -26,7 +26,7 @@ static bool should_merge(struct fsnotify_event *old_fsn, + old = FANOTIFY_E(old_fsn); + new = FANOTIFY_E(new_fsn); + +- if (old_fsn->inode != new_fsn->inode || old->pid != new->pid || ++ if (old_fsn->objectid != new_fsn->objectid || old->pid != new->pid || + old->fh_type != new->fh_type || old->fh_len != new->fh_len) + return false; + +@@ -314,7 +314,12 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, + if (!event) + goto out; + init: __maybe_unused +- fsnotify_init_event(&event->fse, inode); ++ /* ++ * Use the victim inode instead of the watching inode as the id for ++ * event queue, so event reported on parent is merged with event ++ * reported on child when both directory and child watches exist. ++ */ ++ fsnotify_init_event(&event->fse, (unsigned long)id); + event->mask = mask; + if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) + event->pid = get_pid(task_pid(current)); +diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c +index d510223d302c..589dee962993 100644 +--- a/fs/notify/inotify/inotify_fsnotify.c ++++ b/fs/notify/inotify/inotify_fsnotify.c +@@ -39,7 +39,7 @@ static bool event_compare(struct fsnotify_event *old_fsn, + if (old->mask & FS_IN_IGNORED) + return false; + if ((old->mask == new->mask) && +- (old_fsn->inode == new_fsn->inode) && ++ (old_fsn->objectid == new_fsn->objectid) && + (old->name_len == new->name_len) && + (!old->name_len || !strcmp(old->name, new->name))) + return true; +@@ -118,7 +118,7 @@ int inotify_handle_event(struct fsnotify_group *group, + mask &= ~IN_ISDIR; + + fsn_event = &event->fse; +- fsnotify_init_event(fsn_event, inode); ++ fsnotify_init_event(fsn_event, (unsigned long)inode); + event->mask = mask; + event->wd = i_mark->wd; + event->sync_cookie = cookie; +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c +index 107537a543fd..81ffc8629fc4 100644 +--- a/fs/notify/inotify/inotify_user.c ++++ b/fs/notify/inotify/inotify_user.c +@@ -635,7 +635,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events) + return ERR_PTR(-ENOMEM); + } + group->overflow_event = &oevent->fse; +- fsnotify_init_event(group->overflow_event, NULL); ++ fsnotify_init_event(group->overflow_event, 0); + oevent->mask = FS_Q_OVERFLOW; + oevent->wd = -1; + oevent->sync_cookie = 0; +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h +index 4fc87dee005a..2849bdbb3acb 100644 +--- a/include/linux/backing-dev-defs.h ++++ b/include/linux/backing-dev-defs.h +@@ -220,6 +220,7 @@ struct backing_dev_info { + wait_queue_head_t wb_waitq; + + struct device *dev; ++ char dev_name[64]; + struct device *owner; + + struct timer_list laptop_mode_wb_timer; +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h +index f88197c1ffc2..c9ad5c3b7b4b 100644 +--- a/include/linux/backing-dev.h ++++ b/include/linux/backing-dev.h +@@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) + (1 << WB_async_congested)); + } + +-extern const char *bdi_unknown_name; +- +-static inline const char *bdi_dev_name(struct backing_dev_info *bdi) +-{ +- if (!bdi || !bdi->dev) +- return bdi_unknown_name; +- return dev_name(bdi->dev); +-} ++const char *bdi_dev_name(struct backing_dev_info *bdi); + + #endif /* _LINUX_BACKING_DEV_H */ +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h +index 1915bdba2fad..64cfb5446f4d 100644 +--- a/include/linux/fsnotify_backend.h ++++ b/include/linux/fsnotify_backend.h +@@ -133,8 +133,7 @@ struct fsnotify_ops { + */ + struct fsnotify_event { + struct list_head list; +- /* inode may ONLY be dereferenced during handle_event(). */ +- struct inode *inode; /* either the inode the event happened to or its parent */ ++ unsigned long objectid; /* identifier for queue merges */ + }; + + /* +@@ -500,10 +499,10 @@ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); + extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); + + static inline void fsnotify_init_event(struct fsnotify_event *event, +- struct inode *inode) ++ unsigned long objectid) + { + INIT_LIST_HEAD(&event->list); +- event->inode = inode; ++ event->objectid = objectid; + } + + #else +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h +index 0d1fe9297ac6..6f6ade63b04c 100644 +--- a/include/linux/virtio_net.h ++++ b/include/linux/virtio_net.h +@@ -3,6 +3,8 @@ + #define _LINUX_VIRTIO_NET_H + + #include <linux/if_vlan.h> ++#include <uapi/linux/tcp.h> ++#include <uapi/linux/udp.h> + #include <uapi/linux/virtio_net.h> + + static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, +@@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + bool little_endian) + { + unsigned int gso_type = 0; ++ unsigned int thlen = 0; ++ unsigned int ip_proto; + + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + gso_type = SKB_GSO_TCPV4; ++ ip_proto = IPPROTO_TCP; ++ thlen = sizeof(struct tcphdr); + break; + case VIRTIO_NET_HDR_GSO_TCPV6: + gso_type = SKB_GSO_TCPV6; ++ ip_proto = IPPROTO_TCP; ++ thlen = sizeof(struct tcphdr); + break; + case VIRTIO_NET_HDR_GSO_UDP: + gso_type = SKB_GSO_UDP; ++ ip_proto = IPPROTO_UDP; ++ thlen = sizeof(struct udphdr); + break; + default: + return -EINVAL; +@@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + + if (!skb_partial_csum_set(skb, start, off)) + return -EINVAL; ++ ++ if (skb_transport_offset(skb) + thlen > skb_headlen(skb)) ++ return -EINVAL; + } else { + /* gso packets without NEEDS_CSUM do not set transport_offset. + * probe and drop if does not match one of the above types. + */ + if (gso_type && skb->network_header) { ++ struct flow_keys_basic keys; ++ + if (!skb->protocol) + virtio_net_hdr_set_proto(skb, hdr); + retry: +- skb_probe_transport_header(skb); +- if (!skb_transport_header_was_set(skb)) { ++ if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, ++ NULL, 0, 0, 0, ++ 0)) { + /* UFO does not specify ipv4 or 6: try both */ + if (gso_type & SKB_GSO_UDP && + skb->protocol == htons(ETH_P_IP)) { +@@ -75,6 +91,12 @@ retry: + } + return -EINVAL; + } ++ ++ if (keys.control.thoff + thlen > skb_headlen(skb) || ++ keys.basic.ip_proto != ip_proto) ++ return -EINVAL; ++ ++ skb_set_transport_header(skb, keys.control.thoff); + } + } + +diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h +index c8e2bebd8d93..0f0d1efe06dd 100644 +--- a/include/net/inet_ecn.h ++++ b/include/net/inet_ecn.h +@@ -99,6 +99,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph) + return 1; + } + ++static inline int IP_ECN_set_ect1(struct iphdr *iph) ++{ ++ u32 check = (__force u32)iph->check; ++ ++ if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0) ++ return 0; ++ ++ check += (__force u16)htons(0x100); ++ ++ iph->check = (__force __sum16)(check + (check>=0xFFFF)); ++ iph->tos ^= INET_ECN_MASK; ++ return 1; ++} ++ + static inline void IP_ECN_clear(struct iphdr *iph) + { + iph->tos &= ~INET_ECN_MASK; +@@ -134,6 +148,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) + return 1; + } + ++static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph) ++{ ++ __be32 from, to; ++ ++ if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0) ++ return 0; ++ ++ from = *(__be32 *)iph; ++ to = from ^ htonl(INET_ECN_MASK << 20); ++ *(__be32 *)iph = to; ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), ++ (__force __wsum)to); ++ return 1; ++} ++ + static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) + { + dscp &= ~INET_ECN_MASK; +@@ -159,6 +189,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) + return 0; + } + ++static inline int INET_ECN_set_ect1(struct sk_buff *skb) ++{ ++ switch (skb->protocol) { ++ case cpu_to_be16(ETH_P_IP): ++ if (skb_network_header(skb) + sizeof(struct iphdr) <= ++ skb_tail_pointer(skb)) ++ return IP_ECN_set_ect1(ip_hdr(skb)); ++ break; ++ ++ case cpu_to_be16(ETH_P_IPV6): ++ if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= ++ skb_tail_pointer(skb)) ++ return IP6_ECN_set_ect1(skb, ipv6_hdr(skb)); ++ break; ++ } ++ ++ return 0; ++} ++ + /* + * RFC 6040 4.2 + * To decapsulate the inner header at the tunnel egress, a compliant +@@ -208,8 +257,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb, + int rc; + + rc = __INET_ECN_decapsulate(outer, inner, &set_ce); +- if (!rc && set_ce) +- INET_ECN_set_ce(skb); ++ if (!rc) { ++ if (set_ce) ++ INET_ECN_set_ce(skb); ++ else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1) ++ INET_ECN_set_ect1(skb); ++ } + + return rc; + } +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h +index 4b5656c71abc..bd0f1595bdc7 100644 +--- a/include/net/ip6_fib.h ++++ b/include/net/ip6_fib.h +@@ -177,6 +177,7 @@ struct fib6_info { + struct rt6_info { + struct dst_entry dst; + struct fib6_info __rcu *from; ++ int sernum; + + struct rt6key rt6i_dst; + struct rt6key rt6i_src; +@@ -260,6 +261,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt) + struct fib6_info *from; + u32 cookie = 0; + ++ if (rt->sernum) ++ return rt->sernum; ++ + rcu_read_lock(); + + from = rcu_dereference(rt->from); +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h +index c7e15a213ef2..0fca98a3d2d3 100644 +--- a/include/net/net_namespace.h ++++ b/include/net/net_namespace.h +@@ -428,6 +428,13 @@ static inline int rt_genid_ipv4(struct net *net) + return atomic_read(&net->ipv4.rt_genid); + } + ++#if IS_ENABLED(CONFIG_IPV6) ++static inline int rt_genid_ipv6(const struct net *net) ++{ ++ return atomic_read(&net->ipv6.fib6_sernum); ++} ++#endif ++ + static inline void rt_genid_bump_ipv4(struct net *net) + { + atomic_inc(&net->ipv4.rt_genid); +diff --git a/ipc/mqueue.c b/ipc/mqueue.c +index 3d920ff15c80..2ea0c08188e6 100644 +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -82,6 +82,7 @@ struct mqueue_inode_info { + + struct sigevent notify; + struct pid *notify_owner; ++ u32 notify_self_exec_id; + struct user_namespace *notify_user_ns; + struct user_struct *user; /* user who created, for accounting */ + struct sock *notify_sock; +@@ -709,28 +710,44 @@ static void __do_notify(struct mqueue_inode_info *info) + * synchronously. */ + if (info->notify_owner && + info->attr.mq_curmsgs == 1) { +- struct kernel_siginfo sig_i; + switch (info->notify.sigev_notify) { + case SIGEV_NONE: + break; +- case SIGEV_SIGNAL: +- /* sends signal */ ++ case SIGEV_SIGNAL: { ++ struct kernel_siginfo sig_i; ++ struct task_struct *task; ++ ++ /* do_mq_notify() accepts sigev_signo == 0, why?? */ ++ if (!info->notify.sigev_signo) ++ break; + + clear_siginfo(&sig_i); + sig_i.si_signo = info->notify.sigev_signo; + sig_i.si_errno = 0; + sig_i.si_code = SI_MESGQ; + sig_i.si_value = info->notify.sigev_value; +- /* map current pid/uid into info->owner's namespaces */ + rcu_read_lock(); ++ /* map current pid/uid into info->owner's namespaces */ + sig_i.si_pid = task_tgid_nr_ns(current, + ns_of_pid(info->notify_owner)); +- sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); ++ sig_i.si_uid = from_kuid_munged(info->notify_user_ns, ++ current_uid()); ++ /* ++ * We can't use kill_pid_info(), this signal should ++ * bypass check_kill_permission(). It is from kernel ++ * but si_fromuser() can't know this. ++ * We do check the self_exec_id, to avoid sending ++ * signals to programs that don't expect them. ++ */ ++ task = pid_task(info->notify_owner, PIDTYPE_TGID); ++ if (task && task->self_exec_id == ++ info->notify_self_exec_id) { ++ do_send_sig_info(info->notify.sigev_signo, ++ &sig_i, task, PIDTYPE_TGID); ++ } + rcu_read_unlock(); +- +- kill_pid_info(info->notify.sigev_signo, +- &sig_i, info->notify_owner); + break; ++ } + case SIGEV_THREAD: + set_cookie(info->notify_cookie, NOTIFY_WOKENUP); + netlink_sendskb(info->notify_sock, info->notify_cookie); +@@ -1315,6 +1332,7 @@ retry: + info->notify.sigev_signo = notification->sigev_signo; + info->notify.sigev_value = notification->sigev_value; + info->notify.sigev_notify = SIGEV_SIGNAL; ++ info->notify_self_exec_id = current->self_exec_id; + break; + } + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index c6ccaf6c62f7..721947b9962d 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -8318,6 +8318,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) + */ + allocate_snapshot = false; + #endif ++ ++ /* ++ * Because of some magic with the way alloc_percpu() works on ++ * x86_64, we need to synchronize the pgd of all the tables, ++ * otherwise the trace events that happen in x86_64 page fault ++ * handlers can't cope with accessing the chance that a ++ * alloc_percpu()'d memory might be touched in the page fault trace ++ * event. Oh, and we need to audit all other alloc_percpu() and vmalloc() ++ * calls in tracing, because something might get triggered within a ++ * page fault trace event! ++ */ ++ vmalloc_sync_mappings(); ++ + return 0; + } + +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 2f0f7fcee73e..fba4b48451f6 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -454,7 +454,7 @@ static bool __within_notrace_func(unsigned long addr) + + static bool within_notrace_func(struct trace_kprobe *tk) + { +- unsigned long addr = addr = trace_kprobe_address(tk); ++ unsigned long addr = trace_kprobe_address(tk); + char symname[KSYM_NAME_LEN], *p; + + if (!__within_notrace_func(addr)) +diff --git a/kernel/umh.c b/kernel/umh.c +index 7f255b5a8845..11bf5eea474c 100644 +--- a/kernel/umh.c ++++ b/kernel/umh.c +@@ -544,6 +544,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob); + * Runs a user-space application. The application is started + * asynchronously if wait is not set, and runs as a child of system workqueues. + * (ie. it runs with full root capabilities and optimized affinity). ++ * ++ * Note: successful return value does not guarantee the helper was called at ++ * all. You can't rely on sub_info->{init,cleanup} being called even for ++ * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers ++ * into a successful no-op. + */ + int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) + { +diff --git a/mm/backing-dev.c b/mm/backing-dev.c +index 62f05f605fb5..3f2480e4c5af 100644 +--- a/mm/backing-dev.c ++++ b/mm/backing-dev.c +@@ -21,7 +21,7 @@ struct backing_dev_info noop_backing_dev_info = { + EXPORT_SYMBOL_GPL(noop_backing_dev_info); + + static struct class *bdi_class; +-const char *bdi_unknown_name = "(unknown)"; ++static const char *bdi_unknown_name = "(unknown)"; + + /* + * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU +@@ -938,7 +938,8 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) + if (bdi->dev) /* The driver needs to use separate queues per device */ + return 0; + +- dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args); ++ vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); ++ dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); + if (IS_ERR(dev)) + return PTR_ERR(dev); + +@@ -1043,6 +1044,14 @@ void bdi_put(struct backing_dev_info *bdi) + } + EXPORT_SYMBOL(bdi_put); + ++const char *bdi_dev_name(struct backing_dev_info *bdi) ++{ ++ if (!bdi || !bdi->dev) ++ return bdi_unknown_name; ++ return bdi->dev_name; ++} ++EXPORT_SYMBOL_GPL(bdi_dev_name); ++ + static wait_queue_head_t congestion_wqh[2] = { + __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), + __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 8159000781be..0d6f3ea86738 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -5101,19 +5101,22 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + unsigned int size; + int node; + int __maybe_unused i; ++ long error = -ENOMEM; + + size = sizeof(struct mem_cgroup); + size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); + + memcg = kzalloc(size, GFP_KERNEL); + if (!memcg) +- return NULL; ++ return ERR_PTR(error); + + memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, + 1, MEM_CGROUP_ID_MAX, + GFP_KERNEL); +- if (memcg->id.id < 0) ++ if (memcg->id.id < 0) { ++ error = memcg->id.id; + goto fail; ++ } + + memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu); + if (!memcg->vmstats_local) +@@ -5158,7 +5161,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + fail: + mem_cgroup_id_remove(memcg); + __mem_cgroup_free(memcg); +- return NULL; ++ return ERR_PTR(error); + } + + static struct cgroup_subsys_state * __ref +@@ -5169,8 +5172,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + long error = -ENOMEM; + + memcg = mem_cgroup_alloc(); +- if (!memcg) +- return ERR_PTR(error); ++ if (IS_ERR(memcg)) ++ return ERR_CAST(memcg); + + memcg->high = PAGE_COUNTER_MAX; + memcg->soft_limit = PAGE_COUNTER_MAX; +@@ -5220,7 +5223,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + fail: + mem_cgroup_id_remove(memcg); + mem_cgroup_free(memcg); +- return ERR_PTR(-ENOMEM); ++ return ERR_PTR(error); + } + + static int mem_cgroup_css_online(struct cgroup_subsys_state *css) +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index d387ca74cb5a..98d5c940facd 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -1555,6 +1555,7 @@ void set_zone_contiguous(struct zone *zone) + if (!__pageblock_pfn_to_page(block_start_pfn, + block_end_pfn, zone)) + return; ++ cond_resched(); + } + + /* We confirm that there is no hole */ +@@ -2350,6 +2351,14 @@ static inline void boost_watermark(struct zone *zone) + + if (!watermark_boost_factor) + return; ++ /* ++ * Don't bother in zones that are unlikely to produce results. ++ * On small machines, including kdump capture kernels running ++ * in a small area, boosting the watermark can cause an out of ++ * memory situation immediately. ++ */ ++ if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) ++ return; + + max_boost = mult_frac(zone->_watermark[WMARK_HIGH], + watermark_boost_factor, 10000); +diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c +index 8033f24f506c..a9e7540c5691 100644 +--- a/net/batman-adv/bat_v_ogm.c ++++ b/net/batman-adv/bat_v_ogm.c +@@ -897,7 +897,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, + + orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig); + if (!orig_node) +- return; ++ goto out; + + neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming, + ethhdr->h_source); +diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c +index 580609389f0f..70e3b161c663 100644 +--- a/net/batman-adv/network-coding.c ++++ b/net/batman-adv/network-coding.c +@@ -1009,15 +1009,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv, + */ + static u8 batadv_nc_random_weight_tq(u8 tq) + { +- u8 rand_val, rand_tq; +- +- get_random_bytes(&rand_val, sizeof(rand_val)); +- + /* randomize the estimated packet loss (max TQ - estimated TQ) */ +- rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq); +- +- /* normalize the randomized packet loss */ +- rand_tq /= BATADV_TQ_MAX_VALUE; ++ u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq); + + /* convert to (randomized) estimated tq again */ + return BATADV_TQ_MAX_VALUE - rand_tq; +diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c +index e5bbc28ed12c..079a13493880 100644 +--- a/net/batman-adv/sysfs.c ++++ b/net/batman-adv/sysfs.c +@@ -1150,7 +1150,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, + ret = batadv_parse_throughput(net_dev, buff, "throughput_override", + &tp_override); + if (!ret) +- return count; ++ goto out; + + old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override); + if (old_tp_override == tp_override) +@@ -1190,6 +1190,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj, + + tp_override = atomic_read(&hard_iface->bat_v.throughput_override); + ++ batadv_hardif_put(hard_iface); + return sprintf(buff, "%u.%u MBit\n", tp_override / 10, + tp_override % 10); + } +diff --git a/net/core/devlink.c b/net/core/devlink.c +index 4c25f1aa2d37..5667cae57072 100644 +--- a/net/core/devlink.c ++++ b/net/core/devlink.c +@@ -3907,6 +3907,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb, + end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]); + end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]); + dump = false; ++ ++ if (start_offset == end_offset) { ++ err = 0; ++ goto nla_put_failure; ++ } + } + + err = devlink_nl_region_read_snapshot_fill(skb, devlink, +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 920784a9b7ff..7b40d12f0c22 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -1954,6 +1954,9 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, + NEIGH_UPDATE_F_OVERRIDE_ISROUTER); + } + ++ if (protocol) ++ neigh->protocol = protocol; ++ + if (ndm->ndm_flags & NTF_EXT_LEARNED) + flags |= NEIGH_UPDATE_F_EXT_LEARNED; + +@@ -1967,9 +1970,6 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, + err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, + NETLINK_CB(skb).portid, extack); + +- if (protocol) +- neigh->protocol = protocol; +- + neigh_release(neigh); + + out: +diff --git a/net/dsa/master.c b/net/dsa/master.c +index 3255dfc97f86..be0b4ed3b7d8 100644 +--- a/net/dsa/master.c ++++ b/net/dsa/master.c +@@ -259,7 +259,8 @@ static void dsa_master_ndo_teardown(struct net_device *dev) + { + struct dsa_port *cpu_dp = dev->dsa_ptr; + +- dev->netdev_ops = cpu_dp->orig_ndo_ops; ++ if (cpu_dp->orig_ndo_ops) ++ dev->netdev_ops = cpu_dp->orig_ndo_ops; + cpu_dp->orig_ndo_ops = NULL; + } + +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 894c7370c1bd..c81d8e9e5169 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -1388,9 +1388,18 @@ static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res) + } + ip6_rt_copy_init(pcpu_rt, res); + pcpu_rt->rt6i_flags |= RTF_PCPU; ++ ++ if (f6i->nh) ++ pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev)); ++ + return pcpu_rt; + } + ++static bool rt6_is_valid(const struct rt6_info *rt6) ++{ ++ return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev)); ++} ++ + /* It should be called with rcu_read_lock() acquired */ + static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res) + { +@@ -1398,6 +1407,19 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res) + + pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu); + ++ if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) { ++ struct rt6_info *prev, **p; ++ ++ p = this_cpu_ptr(res->nh->rt6i_pcpu); ++ prev = xchg(p, NULL); ++ if (prev) { ++ dst_dev_put(&prev->dst); ++ dst_release(&prev->dst); ++ } ++ ++ pcpu_rt = NULL; ++ } ++ + return pcpu_rt; + } + +@@ -2599,6 +2621,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) + + rt = container_of(dst, struct rt6_info, dst); + ++ if (rt->sernum) ++ return rt6_is_valid(rt) ? dst : NULL; ++ + rcu_read_lock(); + + /* All IPV6 dsts are created with ->obsolete set to the value +diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c +index 3d816a1e5442..59151dc07fdc 100644 +--- a/net/netfilter/nf_nat_proto.c ++++ b/net/netfilter/nf_nat_proto.c +@@ -68,15 +68,13 @@ static bool udp_manip_pkt(struct sk_buff *skb, + enum nf_nat_manip_type maniptype) + { + struct udphdr *hdr; +- bool do_csum; + + if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) + return false; + + hdr = (struct udphdr *)(skb->data + hdroff); +- do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL; ++ __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check); + +- __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum); + return true; + } + +diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c +index 9f5dea0064ea..916a3c7f9eaf 100644 +--- a/net/netfilter/nfnetlink_osf.c ++++ b/net/netfilter/nfnetlink_osf.c +@@ -165,12 +165,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb, + static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx, + const struct sk_buff *skb, + const struct iphdr *ip, +- unsigned char *opts) ++ unsigned char *opts, ++ struct tcphdr *_tcph) + { + const struct tcphdr *tcp; +- struct tcphdr _tcph; + +- tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); ++ tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph); + if (!tcp) + return NULL; + +@@ -205,10 +205,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family, + int fmatch = FMATCH_WRONG; + struct nf_osf_hdr_ctx ctx; + const struct tcphdr *tcp; ++ struct tcphdr _tcph; + + memset(&ctx, 0, sizeof(ctx)); + +- tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); ++ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph); + if (!tcp) + return false; + +@@ -265,10 +266,11 @@ bool nf_osf_find(const struct sk_buff *skb, + const struct nf_osf_finger *kf; + struct nf_osf_hdr_ctx ctx; + const struct tcphdr *tcp; ++ struct tcphdr _tcph; + + memset(&ctx, 0, sizeof(ctx)); + +- tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); ++ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph); + if (!tcp) + return false; + +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c +index dba70377bbd9..4021f726b58f 100644 +--- a/net/sched/sch_choke.c ++++ b/net/sched/sch_choke.c +@@ -323,7 +323,8 @@ static void choke_reset(struct Qdisc *sch) + + sch->q.qlen = 0; + sch->qstats.backlog = 0; +- memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); ++ if (q->tab) ++ memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); + q->head = q->tail = 0; + red_restart(&q->vars); + } +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c +index c261c0a18868..76d72c3f52ed 100644 +--- a/net/sched/sch_fq_codel.c ++++ b/net/sched/sch_fq_codel.c +@@ -417,7 +417,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, + q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); + + if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) +- q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); ++ q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); + + if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) + q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c +index c787d4d46017..5a6def5e4e6d 100644 +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -637,6 +637,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) + if (ctl->divisor && + (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) + return -EINVAL; ++ ++ /* slot->allot is a short, make sure quantum is not too big. */ ++ if (ctl->quantum) { ++ unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum); ++ ++ if (scaled <= 0 || scaled > SHRT_MAX) ++ return -EINVAL; ++ } ++ + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, + ctl_v1->Wlog)) + return -EINVAL; +diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c +index 0fb10abf7579..7a5e4c454715 100644 +--- a/net/sched/sch_skbprio.c ++++ b/net/sched/sch_skbprio.c +@@ -169,6 +169,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt, + { + struct tc_skbprio_qopt *ctl = nla_data(opt); + ++ if (opt->nla_len != nla_attr_size(sizeof(*ctl))) ++ return -EINVAL; ++ + sch->limit = ctl->limit; + return 0; + } +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index c6d83a64eac3..5625a9500f21 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -1865,7 +1865,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( + */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, +- SCTP_ST_CHUNK(0), NULL, ++ SCTP_ST_CHUNK(0), repl, + commands); + } else { + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, +@@ -5470,7 +5470,7 @@ enum sctp_disposition sctp_sf_do_9_2_start_shutdown( + * in the Cumulative TSN Ack field the last sequential TSN it + * has received from the peer. + */ +- reply = sctp_make_shutdown(asoc, NULL); ++ reply = sctp_make_shutdown(asoc, arg); + if (!reply) + goto nomem; + +@@ -6068,7 +6068,7 @@ enum sctp_disposition sctp_sf_autoclose_timer_expire( + disposition = SCTP_DISPOSITION_CONSUME; + if (sctp_outq_is_empty(&asoc->outqueue)) { + disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, +- arg, commands); ++ NULL, commands); + } + + return disposition; +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c +index 3a12fc18239b..73dbed0c4b6b 100644 +--- a/net/tipc/topsrv.c ++++ b/net/tipc/topsrv.c +@@ -402,10 +402,11 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) + read_lock_bh(&sk->sk_callback_lock); + ret = tipc_conn_rcv_sub(srv, con, &s); + read_unlock_bh(&sk->sk_callback_lock); ++ if (!ret) ++ return 0; + } +- if (ret < 0) +- tipc_conn_close(con); + ++ tipc_conn_close(con); + return ret; + } + +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index 41e9c2932b34..5513a08a4308 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -797,6 +797,8 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, + *copied -= sk_msg_free(sk, msg); + tls_free_open_rec(sk); + } ++ if (psock) ++ sk_psock_put(sk, psock); + return err; + } + more_data: +@@ -2076,8 +2078,9 @@ static void tls_data_ready(struct sock *sk) + strp_data_ready(&ctx->strp); + + psock = sk_psock_get(sk); +- if (psock && !list_empty(&psock->ingress_msg)) { +- ctx->saved_data_ready(sk); ++ if (psock) { ++ if (!list_empty(&psock->ingress_msg)) ++ ctx->saved_data_ready(sk); + sk_psock_put(sk, psock); + } + } +diff --git a/scripts/decodecode b/scripts/decodecode +index ba8b8d5834e6..fbdb325cdf4f 100755 +--- a/scripts/decodecode ++++ b/scripts/decodecode +@@ -126,7 +126,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \ + faultline=`cat $T.dis | head -1 | cut -d":" -f2-` + faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` + +-cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" ++cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" + echo + cat $T.aa + cleanup +diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py +index f79b23582a1d..7e344a78a627 100644 +--- a/tools/cgroup/iocost_monitor.py ++++ b/tools/cgroup/iocost_monitor.py +@@ -159,7 +159,12 @@ class IocgStat: + else: + self.inflight_pct = 0 + +- self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000 ++ # vdebt used to be an atomic64_t and is now u64, support both ++ try: ++ self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000 ++ except: ++ self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000 ++ + self.use_delay = blkg.use_delay.counter.value_() + self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000 + +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index d6a971326f87..fcc6cd404f56 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -1402,7 +1402,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s + struct cfi_reg *cfa = &state->cfa; + struct stack_op *op = &insn->stack_op; + +- if (cfa->base != CFI_SP) ++ if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) + return 0; + + /* push */ +diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c +index d31f267961e7..25c0e47d57cb 100644 +--- a/virt/kvm/arm/hyp/aarch32.c ++++ b/virt/kvm/arm/hyp/aarch32.c +@@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) + */ + void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) + { ++ u32 pc = *vcpu_pc(vcpu); + bool is_thumb; + + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); + if (is_thumb && !is_wide_instr) +- *vcpu_pc(vcpu) += 2; ++ pc += 2; + else +- *vcpu_pc(vcpu) += 4; ++ pc += 4; ++ ++ *vcpu_pc(vcpu) = pc; ++ + kvm_adjust_itstate(vcpu); + } +diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c +index 0d090482720d..7eacf00e5abe 100644 +--- a/virt/kvm/arm/vgic/vgic-mmio.c ++++ b/virt/kvm/arm/vgic/vgic-mmio.c +@@ -389,7 +389,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, + static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) + { + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || +- intid > VGIC_NR_PRIVATE_IRQS) ++ intid >= VGIC_NR_PRIVATE_IRQS) + kvm_arm_halt_guest(vcpu->kvm); + } + +@@ -397,7 +397,7 @@ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) + static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) + { + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || +- intid > VGIC_NR_PRIVATE_IRQS) ++ intid >= VGIC_NR_PRIVATE_IRQS) + kvm_arm_resume_guest(vcpu->kvm); + } + |