diff options
author | Mike Pagano <mpagano@gentoo.org> | 2020-04-24 08:02:16 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2020-04-24 08:02:16 -0400 |
commit | 3787c4cd245ebd74bb1b515a36f9f711c5df3728 (patch) | |
tree | ef754a9cdf63df5a1e7cdce38ec85e15873a0263 | |
parent | Update distro Kconfig to support needed options for elogind (diff) | |
download | linux-patches-3787c4cd245ebd74bb1b515a36f9f711c5df3728.tar.gz linux-patches-3787c4cd245ebd74bb1b515a36f9f711c5df3728.tar.bz2 linux-patches-3787c4cd245ebd74bb1b515a36f9f711c5df3728.zip |
Linux patch 4.14.1774.14-187
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1176_linux-4.14.177.patch | 7135 |
2 files changed, 7139 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 1de9582d..f56b415f 100644 --- a/0000_README +++ b/0000_README @@ -747,6 +747,10 @@ Patch: 1175_linux-4.14.176.patch From: https://www.kernel.org Desc: Linux 4.14.176 +Patch: 1176_linux-4.14.177.patch +From: https://www.kernel.org +Desc: Linux 4.14.177 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1176_linux-4.14.177.patch b/1176_linux-4.14.177.patch new file mode 100644 index 00000000..9f308a1b --- /dev/null +++ b/1176_linux-4.14.177.patch @@ -0,0 +1,7135 @@ +diff --git a/Documentation/sound/hd-audio/index.rst b/Documentation/sound/hd-audio/index.rst +index f8a72ffffe66..6e12de9fc34e 100644 +--- a/Documentation/sound/hd-audio/index.rst ++++ b/Documentation/sound/hd-audio/index.rst +@@ -8,3 +8,4 @@ HD-Audio + models + controls + dp-mst ++ realtek-pc-beep +diff --git a/Documentation/sound/hd-audio/realtek-pc-beep.rst b/Documentation/sound/hd-audio/realtek-pc-beep.rst +new file mode 100644 +index 000000000000..be47c6f76a6e +--- /dev/null ++++ b/Documentation/sound/hd-audio/realtek-pc-beep.rst +@@ -0,0 +1,129 @@ ++=============================== ++Realtek PC Beep Hidden Register ++=============================== ++ ++This file documents the "PC Beep Hidden Register", which is present in certain ++Realtek HDA codecs and controls a muxer and pair of passthrough mixers that can ++route audio between pins but aren't themselves exposed as HDA widgets. As far ++as I can tell, these hidden routes are designed to allow flexible PC Beep output ++for codecs that don't have mixer widgets in their output paths. Why it's easier ++to hide a mixer behind an undocumented vendor register than to just expose it ++as a widget, I have no idea. ++ ++Register Description ++==================== ++ ++The register is accessed via processing coefficient 0x36 on NID 20h. Bits not ++identified below have no discernible effect on my machine, a Dell XPS 13 9350:: ++ ++ MSB LSB ++ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ | |h|S|L| | B |R| | Known bits ++ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ ++ |0|0|1|1| 0x7 |0|0x0|1| 0x7 | Reset value ++ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ ++1Ah input select (B): 2 bits ++ When zero, expose the PC Beep line (from the internal beep generator, when ++ enabled with the Set Beep Generation verb on NID 01h, or else from the ++ external PCBEEP pin) on the 1Ah pin node. When nonzero, expose the headphone ++ jack (or possibly Line In on some machines) input instead. If PC Beep is ++ selected, the 1Ah boost control has no effect. ++ ++Amplify 1Ah loopback, left (L): 1 bit ++ Amplify the left channel of 1Ah before mixing it into outputs as specified ++ by h and S bits. Does not affect the level of 1Ah exposed to other widgets. ++ ++Amplify 1Ah loopback, right (R): 1 bit ++ Amplify the right channel of 1Ah before mixing it into outputs as specified ++ by h and S bits. Does not affect the level of 1Ah exposed to other widgets. ++ ++Loopback 1Ah to 21h [active low] (h): 1 bit ++ When zero, mix 1Ah (possibly with amplification, depending on L and R bits) ++ into 21h (headphone jack on my machine). Mixed signal respects the mute ++ setting on 21h. ++ ++Loopback 1Ah to 14h (S): 1 bit ++ When one, mix 1Ah (possibly with amplification, depending on L and R bits) ++ into 14h (internal speaker on my machine). Mixed signal **ignores** the mute ++ setting on 14h and is present whenever 14h is configured as an output. ++ ++Path diagrams ++============= ++ ++1Ah input selection (DIV is the PC Beep divider set on NID 01h):: ++ ++ <Beep generator> <PCBEEP pin> <Headphone jack> ++ | | | ++ +--DIV--+--!DIV--+ {1Ah boost control} ++ | | ++ +--(b == 0)--+--(b != 0)--+ ++ | ++ >1Ah (Beep/Headphone Mic/Line In)< ++ ++Loopback of 1Ah to 21h/14h:: ++ ++ <1Ah (Beep/Headphone Mic/Line In)> ++ | ++ {amplify if L/R} ++ | ++ +-----!h-----+-----S-----+ ++ | | ++ {21h mute control} | ++ | | ++ >21h (Headphone)< >14h (Internal Speaker)< ++ ++Background ++========== ++ ++All Realtek HDA codecs have a vendor-defined widget with node ID 20h which ++provides access to a bank of registers that control various codec functions. ++Registers are read and written via the standard HDA processing coefficient ++verbs (Set/Get Coefficient Index, Set/Get Processing Coefficient). The node is ++named "Realtek Vendor Registers" in public datasheets' verb listings and, ++apart from that, is entirely undocumented. ++ ++This particular register, exposed at coefficient 0x36 and named in commits from ++Realtek, is of note: unlike most registers, which seem to control detailed ++amplifier parameters not in scope of the HDA specification, it controls audio ++routing which could just as easily have been defined using standard HDA mixer ++and selector widgets. ++ ++Specifically, it selects between two sources for the input pin widget with Node ++ID (NID) 1Ah: the widget's signal can come either from an audio jack (on my ++laptop, a Dell XPS 13 9350, it's the headphone jack, but comments in Realtek ++commits indicate that it might be a Line In on some machines) or from the PC ++Beep line (which is itself multiplexed between the codec's internal beep ++generator and external PCBEEP pin, depending on if the beep generator is ++enabled via verbs on NID 01h). Additionally, it can mix (with optional ++amplification) that signal onto the 21h and/or 14h output pins. ++ ++The register's reset value is 0x3717, corresponding to PC Beep on 1Ah that is ++then amplified and mixed into both the headphones and the speakers. Not only ++does this violate the HDA specification, which says that "[a vendor defined ++beep input pin] connection may be maintained *only* while the Link reset ++(**RST#**) is asserted", it means that we cannot ignore the register if we care ++about the input that 1Ah would otherwise expose or if the PCBEEP trace is ++poorly shielded and picks up chassis noise (both of which are the case on my ++machine). ++ ++Unfortunately, there are lots of ways to get this register configuration wrong. ++Linux, it seems, has gone through most of them. For one, the register resets ++after S3 suspend: judging by existing code, this isn't the case for all vendor ++registers, and it's led to some fixes that improve behavior on cold boot but ++don't last after suspend. Other fixes have successfully switched the 1Ah input ++away from PC Beep but have failed to disable both loopback paths. On my ++machine, this means that the headphone input is amplified and looped back to ++the headphone output, which uses the exact same pins! As you might expect, this ++causes terrible headphone noise, the character of which is controlled by the ++1Ah boost control. (If you've seen instructions online to fix XPS 13 headphone ++noise by changing "Headphone Mic Boost" in ALSA, now you know why.) ++ ++The information here has been obtained through black-box reverse engineering of ++the ALC256 codec's behavior and is not guaranteed to be correct. It likely ++also applies for the ALC255, ALC257, ALC235, and ALC236, since those codecs ++seem to be close relatives of the ALC256. (They all share one initialization ++function.) Additionally, other codecs like the ALC225 and ALC285 also have this ++register, judging by existing fixups in ``patch_realtek.c``, but specific ++data (e.g. node IDs, bit positions, pin mappings) for those codecs may differ ++from what I've described here. +diff --git a/Makefile b/Makefile +index 9db2e7f90769..d81fb98737f7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 176 ++SUBLEVEL = 177 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c +index b18fb70c5dcf..e13aca6e6d4b 100644 +--- a/arch/arm/net/bpf_jit_32.c ++++ b/arch/arm/net/bpf_jit_32.c +@@ -796,7 +796,11 @@ static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk, + } + + /* Do LSR operation */ +- if (val < 32) { ++ if (val == 0) { ++ /* An immediate value of 0 encodes a shift amount of 32 ++ * for LSR. To shift by 0, don't do anything. ++ */ ++ } else if (val < 32) { + emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx); +@@ -829,7 +833,11 @@ static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk, + } + + /* Do ARSH operation */ +- if (val < 32) { ++ if (val == 0) { ++ /* An immediate value of 0 encodes a shift amount of 32 ++ * for ASR. To shift by 0, don't do anything. ++ */ ++ } else if (val < 32) { + emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx); +diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c +index a4dc115d7659..092046704cbc 100644 +--- a/arch/arm64/kernel/armv8_deprecated.c ++++ b/arch/arm64/kernel/armv8_deprecated.c +@@ -607,7 +607,7 @@ static struct undef_hook setend_hooks[] = { + }, + { + /* Thumb mode */ +- .instr_mask = 0x0000fff7, ++ .instr_mask = 0xfffffff7, + .instr_val = 0x0000b650, + .pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK), + .pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR), +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c +index 05fdae70e9f6..53df84b2a07f 100644 +--- a/arch/arm64/kernel/perf_event.c ++++ b/arch/arm64/kernel/perf_event.c +@@ -262,12 +262,6 @@ static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, +- +- [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, +- [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, +- +- [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, +- [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, + }; + + static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c +index ee5ce03c9315..2ff327651ebe 100644 +--- a/arch/arm64/kernel/process.c ++++ b/arch/arm64/kernel/process.c +@@ -186,11 +186,9 @@ void __show_regs(struct pt_regs *regs) + } + + show_regs_print_info(KERN_DEFAULT); +- print_symbol("PC is at %s\n", instruction_pointer(regs)); +- print_symbol("LR is at %s\n", lr); +- printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", +- regs->pc, lr, regs->pstate); +- printk("sp : %016llx\n", sp); ++ print_symbol("pc : %s\n", regs->pc); ++ print_symbol("lr : %s\n", lr); ++ printk("sp : %016llx pstate : %08llx\n", sp, regs->pstate); + + i = top_reg; + +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c +index 5ae9c86c30d1..b30d23431fe1 100644 +--- a/arch/arm64/kernel/traps.c ++++ b/arch/arm64/kernel/traps.c +@@ -59,55 +59,9 @@ static const char *handler[]= { + + int show_unhandled_signals = 0; + +-/* +- * Dump out the contents of some kernel memory nicely... +- */ +-static void dump_mem(const char *lvl, const char *str, unsigned long bottom, +- unsigned long top) +-{ +- unsigned long first; +- mm_segment_t fs; +- int i; +- +- /* +- * We need to switch to kernel mode so that we can use __get_user +- * to safely read from kernel space. +- */ +- fs = get_fs(); +- set_fs(KERNEL_DS); +- +- printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top); +- +- for (first = bottom & ~31; first < top; first += 32) { +- unsigned long p; +- char str[sizeof(" 12345678") * 8 + 1]; +- +- memset(str, ' ', sizeof(str)); +- str[sizeof(str) - 1] = '\0'; +- +- for (p = first, i = 0; i < (32 / 8) +- && p < top; i++, p += 8) { +- if (p >= bottom && p < top) { +- unsigned long val; +- +- if (__get_user(val, (unsigned long *)p) == 0) +- sprintf(str + i * 17, " %016lx", val); +- else +- sprintf(str + i * 17, " ????????????????"); +- } +- } +- printk("%s%04lx:%s\n", lvl, first & 0xffff, str); +- } +- +- set_fs(fs); +-} +- + static void dump_backtrace_entry(unsigned long where) + { +- /* +- * Note that 'where' can have a physical address, but it's not handled. +- */ +- print_ip_sym(where); ++ printk(" %pS\n", (void *)where); + } + + static void __dump_instr(const char *lvl, struct pt_regs *regs) +@@ -177,10 +131,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) + #endif + + printk("Call trace:\n"); +- while (1) { +- unsigned long stack; +- int ret; +- ++ do { + /* skip until specified stack frame */ + if (!skip) { + dump_backtrace_entry(frame.pc); +@@ -195,17 +146,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) + */ + dump_backtrace_entry(regs->pc); + } +- ret = unwind_frame(tsk, &frame); +- if (ret < 0) +- break; +- if (in_entry_text(frame.pc)) { +- stack = frame.fp - offsetof(struct pt_regs, stackframe); +- +- if (on_accessible_stack(tsk, stack)) +- dump_mem("", "Exception stack", stack, +- stack + sizeof(struct pt_regs)); +- } +- } ++ } while (!unwind_frame(tsk, &frame)); + + put_task_stack(tsk); + } +diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c +index b3aec101a65d..a27b3d70393f 100644 +--- a/arch/mips/cavium-octeon/octeon-irq.c ++++ b/arch/mips/cavium-octeon/octeon-irq.c +@@ -2199,6 +2199,9 @@ static int octeon_irq_cib_map(struct irq_domain *d, + } + + cd = kzalloc(sizeof(*cd), GFP_KERNEL); ++ if (!cd) ++ return -ENOMEM; ++ + cd->host_data = host_data; + cd->bit = hw; + +diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h +index 197ced1eaaa0..4a16115b47eb 100644 +--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h ++++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h +@@ -108,6 +108,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, + extern int hash__has_transparent_hugepage(void); + #endif + ++static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) ++{ ++ BUG(); ++ return pmd; ++} ++ + #endif /* !__ASSEMBLY__ */ + + #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ +diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h +index 8d40cf03cb67..2194866225f8 100644 +--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h ++++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h +@@ -181,7 +181,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, + */ + static inline int hash__pmd_trans_huge(pmd_t pmd) + { +- return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == ++ return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) == + (_PAGE_PTE | H_PAGE_THP_HUGE)); + } + +@@ -209,6 +209,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp); + extern int hash__has_transparent_hugepage(void); + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ ++ ++static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) ++{ ++ return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)); ++} ++ + #endif /* __ASSEMBLY__ */ + + #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ +diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h +index 4dd13b503dbb..bcb79a96a6c8 100644 +--- a/arch/powerpc/include/asm/book3s/64/pgtable.h ++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h +@@ -1179,7 +1179,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm); + + static inline pmd_t pmd_mkdevmap(pmd_t pmd) + { +- return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); ++ if (radix_enabled()) ++ return radix__pmd_mkdevmap(pmd); ++ return hash__pmd_mkdevmap(pmd); + } + + static inline int pmd_devmap(pmd_t pmd) +diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h +index 19c44e1495ae..7a1fc49aaf99 100644 +--- a/arch/powerpc/include/asm/book3s/64/radix.h ++++ b/arch/powerpc/include/asm/book3s/64/radix.h +@@ -289,6 +289,11 @@ extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, + extern int radix__has_transparent_hugepage(void); + #endif + ++static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd) ++{ ++ return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); ++} ++ + extern int __meminit radix__vmemmap_create_mapping(unsigned long start, + unsigned long page_size, + unsigned long phys); +diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h +index 279d03a1eec6..6941fe202bc8 100644 +--- a/arch/powerpc/include/asm/setjmp.h ++++ b/arch/powerpc/include/asm/setjmp.h +@@ -12,7 +12,9 @@ + + #define JMP_BUF_LEN 23 + +-extern long setjmp(long *); +-extern void longjmp(long *, long); ++typedef long jmp_buf[JMP_BUF_LEN]; ++ ++extern int setjmp(jmp_buf env) __attribute__((returns_twice)); ++extern void longjmp(jmp_buf env, int val) __attribute__((noreturn)); + + #endif /* _ASM_POWERPC_SETJMP_H */ +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile +index 5607ce67d178..681f966b7211 100644 +--- a/arch/powerpc/kernel/Makefile ++++ b/arch/powerpc/kernel/Makefile +@@ -5,9 +5,6 @@ + + CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' + +-# Avoid clang warnings around longjmp/setjmp declarations +-CFLAGS_crash.o += -ffreestanding +- + subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror + + ifeq ($(CONFIG_PPC64),y) +diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S +index 74fc20431082..01b823bdb49c 100644 +--- a/arch/powerpc/kernel/idle_book3s.S ++++ b/arch/powerpc/kernel/idle_book3s.S +@@ -163,8 +163,11 @@ core_idle_lock_held: + bne- core_idle_lock_held + blr + +-/* Reuse an unused pt_regs slot for IAMR */ ++/* Reuse some unused pt_regs slots for AMR/IAMR/UAMOR/UAMOR */ ++#define PNV_POWERSAVE_AMR _TRAP + #define PNV_POWERSAVE_IAMR _DAR ++#define PNV_POWERSAVE_UAMOR _DSISR ++#define PNV_POWERSAVE_AMOR RESULT + + /* + * Pass requested state in r3: +@@ -198,8 +201,16 @@ pnv_powersave_common: + SAVE_NVGPRS(r1) + + BEGIN_FTR_SECTION ++ mfspr r4, SPRN_AMR + mfspr r5, SPRN_IAMR ++ mfspr r6, SPRN_UAMOR ++ std r4, PNV_POWERSAVE_AMR(r1) + std r5, PNV_POWERSAVE_IAMR(r1) ++ std r6, PNV_POWERSAVE_UAMOR(r1) ++BEGIN_FTR_SECTION_NESTED(42) ++ mfspr r7, SPRN_AMOR ++ std r7, PNV_POWERSAVE_AMOR(r1) ++END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + + mfcr r5 +@@ -951,12 +962,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) + REST_GPR(2, r1) + + BEGIN_FTR_SECTION +- /* IAMR was saved in pnv_powersave_common() */ ++ /* These regs were saved in pnv_powersave_common() */ ++ ld r4, PNV_POWERSAVE_AMR(r1) + ld r5, PNV_POWERSAVE_IAMR(r1) ++ ld r6, PNV_POWERSAVE_UAMOR(r1) ++ mtspr SPRN_AMR, r4 + mtspr SPRN_IAMR, r5 ++ mtspr SPRN_UAMOR, r6 ++BEGIN_FTR_SECTION_NESTED(42) ++ ld r7, PNV_POWERSAVE_AMOR(r1) ++ mtspr SPRN_AMOR, r7 ++END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) + /* +- * We don't need an isync here because the upcoming mtmsrd is +- * execution synchronizing. ++ * We don't need an isync here after restoring IAMR because the upcoming ++ * mtmsrd is execution synchronizing. + */ + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + +diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c +index 10b46b35c059..07d3f3b40246 100644 +--- a/arch/powerpc/kernel/kprobes.c ++++ b/arch/powerpc/kernel/kprobes.c +@@ -279,6 +279,9 @@ int kprobe_handler(struct pt_regs *regs) + if (user_mode(regs)) + return 0; + ++ if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) ++ return 0; ++ + /* + * We don't want to be preempted for the entire + * duration of kprobe processing +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c +index b75bf6e74209..3e8edb1387cc 100644 +--- a/arch/powerpc/kernel/signal_64.c ++++ b/arch/powerpc/kernel/signal_64.c +@@ -469,8 +469,10 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, + err |= __get_user(tsk->thread.ckpt_regs.ccr, + &sc->gp_regs[PT_CCR]); + ++ /* Don't allow userspace to set the trap value */ ++ regs->trap = 0; ++ + /* These regs are not checkpointed; they can go in 'regs'. */ +- err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); + err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); + err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); + err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); +diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S +index 048b8e9f4492..63964af9a162 100644 +--- a/arch/powerpc/mm/tlb_nohash_low.S ++++ b/arch/powerpc/mm/tlb_nohash_low.S +@@ -400,7 +400,7 @@ _GLOBAL(set_context) + * extern void loadcam_entry(unsigned int index) + * + * Load TLBCAM[index] entry in to the L2 CAM MMU +- * Must preserve r7, r8, r9, and r10 ++ * Must preserve r7, r8, r9, r10 and r11 + */ + _GLOBAL(loadcam_entry) + mflr r5 +@@ -436,6 +436,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) + */ + _GLOBAL(loadcam_multi) + mflr r8 ++ /* Don't switch to AS=1 if already there */ ++ mfmsr r11 ++ andi. r11,r11,MSR_IS ++ bne 10f + + /* + * Set up temporary TLB entry that is the same as what we're +@@ -461,6 +465,7 @@ _GLOBAL(loadcam_multi) + mtmsr r6 + isync + ++10: + mr r9,r3 + add r10,r3,r4 + 2: bl loadcam_entry +@@ -469,6 +474,10 @@ _GLOBAL(loadcam_multi) + mr r3,r9 + blt 2b + ++ /* Don't return to AS=0 if we were in AS=1 at function start */ ++ andi. r11,r11,MSR_IS ++ bne 3f ++ + /* Return to AS=0 and clear the temporary entry */ + mfmsr r6 + rlwinm. r6,r6,0,~(MSR_IS|MSR_DS) +@@ -484,6 +493,7 @@ _GLOBAL(loadcam_multi) + tlbwe + isync + ++3: + mtlr r8 + blr + #endif +diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c +index b7f937563827..d1fee2d35b49 100644 +--- a/arch/powerpc/platforms/maple/setup.c ++++ b/arch/powerpc/platforms/maple/setup.c +@@ -299,23 +299,6 @@ static int __init maple_probe(void) + return 1; + } + +-define_machine(maple) { +- .name = "Maple", +- .probe = maple_probe, +- .setup_arch = maple_setup_arch, +- .init_IRQ = maple_init_IRQ, +- .pci_irq_fixup = maple_pci_irq_fixup, +- .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, +- .restart = maple_restart, +- .halt = maple_halt, +- .get_boot_time = maple_get_boot_time, +- .set_rtc_time = maple_set_rtc_time, +- .get_rtc_time = maple_get_rtc_time, +- .calibrate_decr = generic_calibrate_decr, +- .progress = maple_progress, +- .power_save = power4_idle, +-}; +- + #ifdef CONFIG_EDAC + /* + * Register a platform device for CPC925 memory controller on +@@ -372,3 +355,20 @@ static int __init maple_cpc925_edac_setup(void) + } + machine_device_initcall(maple, maple_cpc925_edac_setup); + #endif ++ ++define_machine(maple) { ++ .name = "Maple", ++ .probe = maple_probe, ++ .setup_arch = maple_setup_arch, ++ .init_IRQ = maple_init_IRQ, ++ .pci_irq_fixup = maple_pci_irq_fixup, ++ .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, ++ .restart = maple_restart, ++ .halt = maple_halt, ++ .get_boot_time = maple_get_boot_time, ++ .set_rtc_time = maple_set_rtc_time, ++ .get_rtc_time = maple_get_rtc_time, ++ .calibrate_decr = generic_calibrate_decr, ++ .progress = maple_progress, ++ .power_save = power4_idle, ++}; +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c +index c0ae3847b8db..215b14a373cb 100644 +--- a/arch/powerpc/platforms/pseries/lpar.c ++++ b/arch/powerpc/platforms/pseries/lpar.c +@@ -1060,7 +1060,7 @@ static int __init vpa_debugfs_init(void) + { + char name[16]; + long i; +- static struct dentry *vpa_dir; ++ struct dentry *vpa_dir; + + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + return 0; +diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c +index a820370883d9..b7ae5a027714 100644 +--- a/arch/powerpc/sysdev/xive/common.c ++++ b/arch/powerpc/sysdev/xive/common.c +@@ -72,13 +72,6 @@ static u32 xive_ipi_irq; + /* Xive state for each CPU */ + static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); + +-/* +- * A "disabled" interrupt should never fire, to catch problems +- * we set its logical number to this +- */ +-#define XIVE_BAD_IRQ 0x7fffffff +-#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1) +- + /* An invalid CPU target */ + #define XIVE_INVALID_TARGET (-1) + +@@ -1073,7 +1066,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu) + xc = per_cpu(xive_cpu, cpu); + + /* Check if we are already setup */ +- if (xc->hw_ipi != 0) ++ if (xc->hw_ipi != XIVE_BAD_IRQ) + return 0; + + /* Grab an IPI from the backend, this will populate xc->hw_ipi */ +@@ -1110,7 +1103,7 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) + /* Disable the IPI and free the IRQ data */ + + /* Already cleaned up ? */ +- if (xc->hw_ipi == 0) ++ if (xc->hw_ipi == XIVE_BAD_IRQ) + return; + + /* Mask the IPI */ +@@ -1266,6 +1259,7 @@ static int xive_prepare_cpu(unsigned int cpu) + if (np) + xc->chip_id = of_get_ibm_chip_id(np); + of_node_put(np); ++ xc->hw_ipi = XIVE_BAD_IRQ; + + per_cpu(xive_cpu, cpu) = xc; + } +diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c +index aac61374afeb..30cdcbfa1c04 100644 +--- a/arch/powerpc/sysdev/xive/native.c ++++ b/arch/powerpc/sysdev/xive/native.c +@@ -310,7 +310,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) + s64 rc; + + /* Free the IPI */ +- if (!xc->hw_ipi) ++ if (xc->hw_ipi == XIVE_BAD_IRQ) + return; + for (;;) { + rc = opal_xive_free_irq(xc->hw_ipi); +@@ -318,7 +318,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) + msleep(1); + continue; + } +- xc->hw_ipi = 0; ++ xc->hw_ipi = XIVE_BAD_IRQ; + break; + } + } +diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c +index 7fc41bf30fd5..10235098a726 100644 +--- a/arch/powerpc/sysdev/xive/spapr.c ++++ b/arch/powerpc/sysdev/xive/spapr.c +@@ -443,11 +443,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc) + + static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) + { +- if (!xc->hw_ipi) ++ if (xc->hw_ipi == XIVE_BAD_IRQ) + return; + + xive_irq_bitmap_free(xc->hw_ipi); +- xc->hw_ipi = 0; ++ xc->hw_ipi = XIVE_BAD_IRQ; + } + #endif /* CONFIG_SMP */ + +diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h +index f34abed0c05f..48808dbb25dc 100644 +--- a/arch/powerpc/sysdev/xive/xive-internal.h ++++ b/arch/powerpc/sysdev/xive/xive-internal.h +@@ -9,6 +9,13 @@ + #ifndef __XIVE_INTERNAL_H + #define __XIVE_INTERNAL_H + ++/* ++ * A "disabled" interrupt should never fire, to catch problems ++ * we set its logical number to this ++ */ ++#define XIVE_BAD_IRQ 0x7fffffff ++#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1) ++ + /* Each CPU carry one of these with various per-CPU state */ + struct xive_cpu { + #ifdef CONFIG_SMP +diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile +index a60c44b4a3e5..93974b0a5a99 100644 +--- a/arch/powerpc/xmon/Makefile ++++ b/arch/powerpc/xmon/Makefile +@@ -1,9 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + # Makefile for xmon + +-# Avoid clang warnings around longjmp/setjmp declarations +-subdir-ccflags-y := -ffreestanding +- + subdir-ccflags-$(CONFIG_PPC_WERROR) += -Werror + + GCOV_PROFILE := n +diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c +index 53a5316cc4b7..35c842aa8705 100644 +--- a/arch/s390/kernel/diag.c ++++ b/arch/s390/kernel/diag.c +@@ -79,7 +79,7 @@ static int show_diag_stat(struct seq_file *m, void *v) + + static void *show_diag_stat_start(struct seq_file *m, loff_t *pos) + { +- return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; ++ return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL; + } + + static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos) +diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c +index 6fe2e1875058..675d4be0c2b7 100644 +--- a/arch/s390/kernel/processor.c ++++ b/arch/s390/kernel/processor.c +@@ -157,8 +157,9 @@ static void show_cpu_mhz(struct seq_file *m, unsigned long n) + static int show_cpuinfo(struct seq_file *m, void *v) + { + unsigned long n = (unsigned long) v - 1; ++ unsigned long first = cpumask_first(cpu_online_mask); + +- if (!n) ++ if (n == first) + show_cpu_summary(m, v); + if (!machine_has_cpu_mhz) + return 0; +@@ -171,6 +172,8 @@ static inline void *c_update(loff_t *pos) + { + if (*pos) + *pos = cpumask_next(*pos - 1, cpu_online_mask); ++ else ++ *pos = cpumask_first(cpu_online_mask); + return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL; + } + +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c +index 061906f98dc5..0120383219c0 100644 +--- a/arch/s390/kvm/vsie.c ++++ b/arch/s390/kvm/vsie.c +@@ -1027,6 +1027,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) + scb_s->iprcc = PGM_ADDRESSING; + scb_s->pgmilc = 4; + scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4); ++ rc = 1; + } + return rc; + } +diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c +index ec9292917d3f..a29d2e88b00e 100644 +--- a/arch/s390/mm/gmap.c ++++ b/arch/s390/mm/gmap.c +@@ -762,14 +762,18 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start, + static inline unsigned long *gmap_table_walk(struct gmap *gmap, + unsigned long gaddr, int level) + { ++ const int asce_type = gmap->asce & _ASCE_TYPE_MASK; + unsigned long *table; + + if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4)) + return NULL; + if (gmap_is_shadow(gmap) && gmap->removed) + return NULL; +- if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11))) ++ ++ if (asce_type != _ASCE_TYPE_REGION1 && ++ gaddr & (-1UL << (31 + (asce_type >> 2) * 11))) + return NULL; ++ + table = gmap->table; + switch (gmap->asce & _ASCE_TYPE_MASK) { + case _ASCE_TYPE_REGION1: +@@ -1683,6 +1687,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, + goto out_free; + } else if (*table & _REGION_ENTRY_ORIGIN) { + rc = -EAGAIN; /* Race with shadow */ ++ goto out_free; + } + crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY); + /* mark as invalid as long as the parent table is not protected */ +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index 37380c0d5999..01d628ea3402 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -106,7 +106,7 @@ ENTRY(startup_32) + notl %eax + andl %eax, %ebx + cmpl $LOAD_PHYSICAL_ADDR, %ebx +- jge 1f ++ jae 1f + #endif + movl $LOAD_PHYSICAL_ADDR, %ebx + 1: +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index 39fdede523f2..a25127916e67 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -105,7 +105,7 @@ ENTRY(startup_32) + notl %eax + andl %eax, %ebx + cmpl $LOAD_PHYSICAL_ADDR, %ebx +- jge 1f ++ jae 1f + #endif + movl $LOAD_PHYSICAL_ADDR, %ebx + 1: +@@ -280,7 +280,7 @@ ENTRY(startup_64) + notq %rax + andq %rax, %rbp + cmpq $LOAD_PHYSICAL_ADDR, %rbp +- jge 1f ++ jae 1f + #endif + movq $LOAD_PHYSICAL_ADDR, %rbp + 1: +diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S +index 49adabd94f88..c19974a49378 100644 +--- a/arch/x86/entry/entry_32.S ++++ b/arch/x86/entry/entry_32.S +@@ -1057,6 +1057,7 @@ ENTRY(int3) + END(int3) + + ENTRY(general_protection) ++ ASM_CLAC + pushl $do_general_protection + jmp common_exception + END(general_protection) +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index b4bef819d5d5..157cfaf1064c 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -205,7 +205,7 @@ + #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ + #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ + #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ +- ++#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ + #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ + #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ + #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 2cdf654ed132..9529fe69e1d9 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1032,7 +1032,7 @@ struct kvm_x86_ops { + bool (*mpx_supported)(void); + bool (*xsaves_supported)(void); + +- int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); ++ int (*check_nested_events)(struct kvm_vcpu *vcpu); + + void (*sched_in)(struct kvm_vcpu *kvm, int cpu); + +diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h +index 209492849566..5c524d4f71cd 100644 +--- a/arch/x86/include/asm/microcode_amd.h ++++ b/arch/x86/include/asm/microcode_amd.h +@@ -41,7 +41,7 @@ struct microcode_amd { + unsigned int mpb[0]; + }; + +-#define PATCH_MAX_SIZE PAGE_SIZE ++#define PATCH_MAX_SIZE (3 * PAGE_SIZE) + + #ifdef CONFIG_MICROCODE_AMD + extern void __init load_ucode_amd_bsp(unsigned int family); +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h +index 6a4b1a54ff47..98a337e3835d 100644 +--- a/arch/x86/include/asm/pgtable.h ++++ b/arch/x86/include/asm/pgtable.h +@@ -588,12 +588,15 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) + return __pmd(val); + } + +-/* mprotect needs to preserve PAT bits when updating vm_page_prot */ ++/* ++ * mprotect needs to preserve PAT and encryption bits when updating ++ * vm_page_prot ++ */ + #define pgprot_modify pgprot_modify + static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) + { + pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; +- pgprotval_t addbits = pgprot_val(newprot); ++ pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK; + return __pgprot(preservebits | addbits); + } + +diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h +index 85f8279c885a..e6c870c24065 100644 +--- a/arch/x86/include/asm/pgtable_types.h ++++ b/arch/x86/include/asm/pgtable_types.h +@@ -124,7 +124,7 @@ + */ + #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ + _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ +- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) ++ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC) + #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) + + /* +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index 6dda3595acf8..40d7072be709 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -1738,7 +1738,7 @@ int __acpi_acquire_global_lock(unsigned int *lock) + new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); + val = cmpxchg(lock, old, new); + } while (unlikely (val != old)); +- return (new < 3) ? -1 : 0; ++ return ((new & 0x3) < 3) ? -1 : 0; + } + + int __acpi_release_global_lock(unsigned int *lock) +diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c +index dde437f5d14f..596e7640d895 100644 +--- a/arch/x86/kernel/acpi/cstate.c ++++ b/arch/x86/kernel/acpi/cstate.c +@@ -133,7 +133,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, + + /* Make sure we are running on right CPU */ + +- retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx); ++ retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx, ++ false); + if (retval == 0) { + /* Use the hint in CST */ + percpu_entry->states[cx->index].eax = cx->address; +diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c +index 3f731d7f04bf..07742b69d914 100644 +--- a/arch/x86/kernel/cpu/intel_rdt.c ++++ b/arch/x86/kernel/cpu/intel_rdt.c +@@ -135,6 +135,40 @@ struct rdt_resource rdt_resources_all[] = { + .format_str = "%d=%0*x", + .fflags = RFTYPE_RES_CACHE, + }, ++ [RDT_RESOURCE_L2DATA] = ++ { ++ .rid = RDT_RESOURCE_L2DATA, ++ .name = "L2DATA", ++ .domains = domain_init(RDT_RESOURCE_L2DATA), ++ .msr_base = IA32_L2_CBM_BASE, ++ .msr_update = cat_wrmsr, ++ .cache_level = 2, ++ .cache = { ++ .min_cbm_bits = 1, ++ .cbm_idx_mult = 2, ++ .cbm_idx_offset = 0, ++ }, ++ .parse_ctrlval = parse_cbm, ++ .format_str = "%d=%0*x", ++ .fflags = RFTYPE_RES_CACHE, ++ }, ++ [RDT_RESOURCE_L2CODE] = ++ { ++ .rid = RDT_RESOURCE_L2CODE, ++ .name = "L2CODE", ++ .domains = domain_init(RDT_RESOURCE_L2CODE), ++ .msr_base = IA32_L2_CBM_BASE, ++ .msr_update = cat_wrmsr, ++ .cache_level = 2, ++ .cache = { ++ .min_cbm_bits = 1, ++ .cbm_idx_mult = 2, ++ .cbm_idx_offset = 1, ++ }, ++ .parse_ctrlval = parse_cbm, ++ .format_str = "%d=%0*x", ++ .fflags = RFTYPE_RES_CACHE, ++ }, + [RDT_RESOURCE_MBA] = + { + .rid = RDT_RESOURCE_MBA, +@@ -259,15 +293,15 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) + r->alloc_enabled = true; + } + +-static void rdt_get_cdp_l3_config(int type) ++static void rdt_get_cdp_config(int level, int type) + { +- struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3]; ++ struct rdt_resource *r_l = &rdt_resources_all[level]; + struct rdt_resource *r = &rdt_resources_all[type]; + +- r->num_closid = r_l3->num_closid / 2; +- r->cache.cbm_len = r_l3->cache.cbm_len; +- r->default_ctrl = r_l3->default_ctrl; +- r->cache.shareable_bits = r_l3->cache.shareable_bits; ++ r->num_closid = r_l->num_closid / 2; ++ r->cache.cbm_len = r_l->cache.cbm_len; ++ r->default_ctrl = r_l->default_ctrl; ++ r->cache.shareable_bits = r_l->cache.shareable_bits; + r->data_width = (r->cache.cbm_len + 3) / 4; + r->alloc_capable = true; + /* +@@ -277,6 +311,18 @@ static void rdt_get_cdp_l3_config(int type) + r->alloc_enabled = false; + } + ++static void rdt_get_cdp_l3_config(void) ++{ ++ rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA); ++ rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE); ++} ++ ++static void rdt_get_cdp_l2_config(void) ++{ ++ rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA); ++ rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE); ++} ++ + static int get_cache_id(int cpu, int level) + { + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); +@@ -486,6 +532,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) + d->id = id; + cpumask_set_cpu(cpu, &d->cpu_mask); + ++ rdt_domain_reconfigure_cdp(r); ++ + if (r->alloc_capable && domain_setup_ctrlval(r, d)) { + kfree(d); + return; +@@ -729,15 +777,15 @@ static __init bool get_rdt_alloc_resources(void) + + if (rdt_cpu_has(X86_FEATURE_CAT_L3)) { + rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]); +- if (rdt_cpu_has(X86_FEATURE_CDP_L3)) { +- rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA); +- rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE); +- } ++ if (rdt_cpu_has(X86_FEATURE_CDP_L3)) ++ rdt_get_cdp_l3_config(); + ret = true; + } + if (rdt_cpu_has(X86_FEATURE_CAT_L2)) { + /* CPUID 0x10.2 fields are same format at 0x10.1 */ + rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]); ++ if (rdt_cpu_has(X86_FEATURE_CDP_L2)) ++ rdt_get_cdp_l2_config(); + ret = true; + } + +diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h +index a43a72d8e88e..b43a786ec15f 100644 +--- a/arch/x86/kernel/cpu/intel_rdt.h ++++ b/arch/x86/kernel/cpu/intel_rdt.h +@@ -7,12 +7,15 @@ + #include <linux/jump_label.h> + + #define IA32_L3_QOS_CFG 0xc81 ++#define IA32_L2_QOS_CFG 0xc82 + #define IA32_L3_CBM_BASE 0xc90 + #define IA32_L2_CBM_BASE 0xd10 + #define IA32_MBA_THRTL_BASE 0xd50 + + #define L3_QOS_CDP_ENABLE 0x01ULL + ++#define L2_QOS_CDP_ENABLE 0x01ULL ++ + /* + * Event IDs are used to program IA32_QM_EVTSEL before reading event + * counter from IA32_QM_CTR +@@ -354,6 +357,8 @@ enum { + RDT_RESOURCE_L3DATA, + RDT_RESOURCE_L3CODE, + RDT_RESOURCE_L2, ++ RDT_RESOURCE_L2DATA, ++ RDT_RESOURCE_L2CODE, + RDT_RESOURCE_MBA, + + /* Must be the last */ +@@ -437,5 +442,6 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); + void cqm_handle_limbo(struct work_struct *work); + bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); + void __check_limbo(struct rdt_domain *d, bool force_free); ++void rdt_domain_reconfigure_cdp(struct rdt_resource *r); + + #endif /* _ASM_X86_INTEL_RDT_H */ +diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +index 0ec30b2384c0..60c63b23e3ba 100644 +--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c ++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +@@ -922,6 +922,7 @@ out_destroy: + kernfs_remove(kn); + return ret; + } ++ + static void l3_qos_cfg_update(void *arg) + { + bool *enable = arg; +@@ -929,8 +930,17 @@ static void l3_qos_cfg_update(void *arg) + wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); + } + +-static int set_l3_qos_cfg(struct rdt_resource *r, bool enable) ++static void l2_qos_cfg_update(void *arg) ++{ ++ bool *enable = arg; ++ ++ wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); ++} ++ ++static int set_cache_qos_cfg(int level, bool enable) + { ++ void (*update)(void *arg); ++ struct rdt_resource *r_l; + cpumask_var_t cpu_mask; + struct rdt_domain *d; + int cpu; +@@ -938,16 +948,24 @@ static int set_l3_qos_cfg(struct rdt_resource *r, bool enable) + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; + +- list_for_each_entry(d, &r->domains, list) { ++ if (level == RDT_RESOURCE_L3) ++ update = l3_qos_cfg_update; ++ else if (level == RDT_RESOURCE_L2) ++ update = l2_qos_cfg_update; ++ else ++ return -EINVAL; ++ ++ r_l = &rdt_resources_all[level]; ++ list_for_each_entry(d, &r_l->domains, list) { + /* Pick one CPU from each domain instance to update MSR */ + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); + } + cpu = get_cpu(); + /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */ + if (cpumask_test_cpu(cpu, cpu_mask)) +- l3_qos_cfg_update(&enable); ++ update(&enable); + /* Update QOS_CFG MSR on all other cpus in cpu_mask. */ +- smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1); ++ smp_call_function_many(cpu_mask, update, &enable, 1); + put_cpu(); + + free_cpumask_var(cpu_mask); +@@ -955,52 +973,99 @@ static int set_l3_qos_cfg(struct rdt_resource *r, bool enable) + return 0; + } + +-static int cdp_enable(void) ++static int cdp_enable(int level, int data_type, int code_type) + { +- struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA]; +- struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE]; +- struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3]; ++ struct rdt_resource *r_ldata = &rdt_resources_all[data_type]; ++ struct rdt_resource *r_lcode = &rdt_resources_all[code_type]; ++ struct rdt_resource *r_l = &rdt_resources_all[level]; + int ret; + +- if (!r_l3->alloc_capable || !r_l3data->alloc_capable || +- !r_l3code->alloc_capable) ++ if (!r_l->alloc_capable || !r_ldata->alloc_capable || ++ !r_lcode->alloc_capable) + return -EINVAL; + +- ret = set_l3_qos_cfg(r_l3, true); ++ ret = set_cache_qos_cfg(level, true); + if (!ret) { +- r_l3->alloc_enabled = false; +- r_l3data->alloc_enabled = true; +- r_l3code->alloc_enabled = true; ++ r_l->alloc_enabled = false; ++ r_ldata->alloc_enabled = true; ++ r_lcode->alloc_enabled = true; + } + return ret; + } + +-static void cdp_disable(void) ++static int cdpl3_enable(void) + { +- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; ++ return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, ++ RDT_RESOURCE_L3CODE); ++} ++ ++static int cdpl2_enable(void) ++{ ++ return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, ++ RDT_RESOURCE_L2CODE); ++} ++ ++static void cdp_disable(int level, int data_type, int code_type) ++{ ++ struct rdt_resource *r = &rdt_resources_all[level]; + + r->alloc_enabled = r->alloc_capable; + +- if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) { +- rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled = false; +- rdt_resources_all[RDT_RESOURCE_L3CODE].alloc_enabled = false; +- set_l3_qos_cfg(r, false); ++ if (rdt_resources_all[data_type].alloc_enabled) { ++ rdt_resources_all[data_type].alloc_enabled = false; ++ rdt_resources_all[code_type].alloc_enabled = false; ++ set_cache_qos_cfg(level, false); + } + } + ++static void cdpl3_disable(void) ++{ ++ cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE); ++} ++ ++static void cdpl2_disable(void) ++{ ++ cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE); ++} ++ ++static void cdp_disable_all(void) ++{ ++ if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) ++ cdpl3_disable(); ++ if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) ++ cdpl2_disable(); ++} ++ + static int parse_rdtgroupfs_options(char *data) + { + char *token, *o = data; + int ret = 0; + + while ((token = strsep(&o, ",")) != NULL) { +- if (!*token) +- return -EINVAL; ++ if (!*token) { ++ ret = -EINVAL; ++ goto out; ++ } + +- if (!strcmp(token, "cdp")) +- ret = cdp_enable(); ++ if (!strcmp(token, "cdp")) { ++ ret = cdpl3_enable(); ++ if (ret) ++ goto out; ++ } else if (!strcmp(token, "cdpl2")) { ++ ret = cdpl2_enable(); ++ if (ret) ++ goto out; ++ } else { ++ ret = -EINVAL; ++ goto out; ++ } + } + ++ return 0; ++ ++out: ++ pr_err("Invalid mount option \"%s\"\n", token); ++ + return ret; + } + +@@ -1155,7 +1220,7 @@ out_mongrp: + out_info: + kernfs_remove(kn_info); + out_cdp: +- cdp_disable(); ++ cdp_disable_all(); + out: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +@@ -1322,7 +1387,7 @@ static void rdt_kill_sb(struct super_block *sb) + /*Put everything back to default values. */ + for_each_alloc_enabled_rdt_resource(r) + reset_all_ctrls(r); +- cdp_disable(); ++ cdp_disable_all(); + rmdir_all_sub(); + static_branch_disable_cpuslocked(&rdt_alloc_enable_key); + static_branch_disable_cpuslocked(&rdt_mon_enable_key); +@@ -1692,6 +1757,19 @@ out_unlock: + return ret; + } + ++/* Restore the qos cfg state when a domain comes online */ ++void rdt_domain_reconfigure_cdp(struct rdt_resource *r) ++{ ++ if (!r->alloc_capable) ++ return; ++ ++ if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA]) ++ l2_qos_cfg_update(&r->alloc_enabled); ++ ++ if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA]) ++ l3_qos_cfg_update(&r->alloc_enabled); ++} ++ + /* + * We allow creating mon groups only with in a directory called "mon_groups" + * which is present in every ctrl_mon group. Check if this is a valid +@@ -1840,7 +1918,8 @@ static int rdtgroup_rmdir(struct kernfs_node *kn) + * If the rdtgroup is a mon group and parent directory + * is a valid "mon_groups" directory, remove the mon group. + */ +- if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) ++ if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && ++ rdtgrp != &rdtgroup_default) + ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask); + else if (rdtgrp->type == RDTMON_GROUP && + is_mon_groups(parent_kn, kn->name)) +diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c +index ed7ce5184a77..0b9c7150cb23 100644 +--- a/arch/x86/kernel/cpu/scattered.c ++++ b/arch/x86/kernel/cpu/scattered.c +@@ -28,6 +28,7 @@ static const struct cpuid_bit cpuid_bits[] = { + { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, + { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 }, + { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 }, ++ { X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 }, + { X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 }, + { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, + { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index 1152afad524f..6ec1cfd0addd 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -494,7 +494,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, + entry->edx |= F(SPEC_CTRL); + if (boot_cpu_has(X86_FEATURE_STIBP)) + entry->edx |= F(INTEL_STIBP); +- if (boot_cpu_has(X86_FEATURE_SSBD)) ++ if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || ++ boot_cpu_has(X86_FEATURE_AMD_SSBD)) + entry->edx |= F(SPEC_CTRL_SSBD); + /* + * We emulate ARCH_CAPABILITIES in software even +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index f85680b86524..f8e3f3c48283 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1674,43 +1674,15 @@ static void vmcs_load(struct vmcs *vmcs) + } + + #ifdef CONFIG_KEXEC_CORE +-/* +- * This bitmap is used to indicate whether the vmclear +- * operation is enabled on all cpus. All disabled by +- * default. +- */ +-static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; +- +-static inline void crash_enable_local_vmclear(int cpu) +-{ +- cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); +-} +- +-static inline void crash_disable_local_vmclear(int cpu) +-{ +- cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); +-} +- +-static inline int crash_local_vmclear_enabled(int cpu) +-{ +- return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); +-} +- + static void crash_vmclear_local_loaded_vmcss(void) + { + int cpu = raw_smp_processor_id(); + struct loaded_vmcs *v; + +- if (!crash_local_vmclear_enabled(cpu)) +- return; +- + list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), + loaded_vmcss_on_cpu_link) + vmcs_clear(v->vmcs); + } +-#else +-static inline void crash_enable_local_vmclear(int cpu) { } +-static inline void crash_disable_local_vmclear(int cpu) { } + #endif /* CONFIG_KEXEC_CORE */ + + static void __loaded_vmcs_clear(void *arg) +@@ -1722,19 +1694,24 @@ static void __loaded_vmcs_clear(void *arg) + return; /* vcpu migration can race with cpu offline */ + if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) + per_cpu(current_vmcs, cpu) = NULL; +- crash_disable_local_vmclear(cpu); ++ ++ vmcs_clear(loaded_vmcs->vmcs); ++ if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) ++ vmcs_clear(loaded_vmcs->shadow_vmcs); ++ + list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); + + /* +- * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link +- * is before setting loaded_vmcs->vcpu to -1 which is done in +- * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist +- * then adds the vmcs into percpu list before it is deleted. ++ * Ensure all writes to loaded_vmcs, including deleting it from its ++ * current percpu list, complete before setting loaded_vmcs->vcpu to ++ * -1, otherwise a different cpu can see vcpu == -1 first and add ++ * loaded_vmcs to its percpu list before it's deleted from this cpu's ++ * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs(). + */ + smp_wmb(); + +- loaded_vmcs_init(loaded_vmcs); +- crash_enable_local_vmclear(cpu); ++ loaded_vmcs->cpu = -1; ++ loaded_vmcs->launched = 0; + } + + static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) +@@ -2497,18 +2474,17 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + if (!already_loaded) { + loaded_vmcs_clear(vmx->loaded_vmcs); + local_irq_disable(); +- crash_disable_local_vmclear(cpu); + + /* +- * Read loaded_vmcs->cpu should be before fetching +- * loaded_vmcs->loaded_vmcss_on_cpu_link. +- * See the comments in __loaded_vmcs_clear(). ++ * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to ++ * this cpu's percpu list, otherwise it may not yet be deleted ++ * from its previous cpu's percpu list. Pairs with the ++ * smb_wmb() in __loaded_vmcs_clear(). + */ + smp_rmb(); + + list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, + &per_cpu(loaded_vmcss_on_cpu, cpu)); +- crash_enable_local_vmclear(cpu); + local_irq_enable(); + } + +@@ -3800,21 +3776,6 @@ static int hardware_enable(void) + if (cr4_read_shadow() & X86_CR4_VMXE) + return -EBUSY; + +- INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); +- INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); +- spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); +- +- /* +- * Now we can enable the vmclear operation in kdump +- * since the loaded_vmcss_on_cpu list on this cpu +- * has been initialized. +- * +- * Though the cpu is not in VMX operation now, there +- * is no problem to enable the vmclear operation +- * for the loaded_vmcss_on_cpu list is empty! +- */ +- crash_enable_local_vmclear(cpu); +- + rdmsrl(MSR_IA32_FEATURE_CONTROL, old); + + test_bits = FEATURE_CONTROL_LOCKED; +@@ -6198,8 +6159,13 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) + + static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) + { +- return (!to_vmx(vcpu)->nested.nested_run_pending && +- vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && ++ if (to_vmx(vcpu)->nested.nested_run_pending) ++ return false; ++ ++ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) ++ return true; ++ ++ return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && + !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & + (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); + } +@@ -11659,7 +11625,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, + } + } + +-static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) ++static int vmx_check_nested_events(struct kvm_vcpu *vcpu) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long exit_qual; +@@ -11697,8 +11663,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) + return 0; + } + +- if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && +- nested_exit_on_intr(vcpu)) { ++ if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) { + if (block_nested_events) + return -EBUSY; + nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); +@@ -12254,17 +12219,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + + if (likely(!vmx->fail)) { +- /* +- * TODO: SDM says that with acknowledge interrupt on +- * exit, bit 31 of the VM-exit interrupt information +- * (valid interrupt) is always set to 1 on +- * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't +- * need kvm_cpu_has_interrupt(). See the commit +- * message for details. +- */ +- if (nested_exit_intr_ack_set(vcpu) && +- exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && +- kvm_cpu_has_interrupt(vcpu)) { ++ if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && ++ nested_exit_intr_ack_set(vcpu)) { + int irq = kvm_cpu_get_interrupt(vcpu); + WARN_ON(irq < 0); + vmcs12->vm_exit_intr_info = irq | +@@ -12940,7 +12896,7 @@ module_exit(vmx_exit) + + static int __init vmx_init(void) + { +- int r; ++ int r, cpu; + + r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), + __alignof__(struct vcpu_vmx), THIS_MODULE); +@@ -12962,6 +12918,12 @@ static int __init vmx_init(void) + } + } + ++ for_each_possible_cpu(cpu) { ++ INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); ++ INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); ++ spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); ++ } ++ + #ifdef CONFIG_KEXEC_CORE + rcu_assign_pointer(crash_vmclear_loaded_vmcss, + crash_vmclear_local_loaded_vmcss); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index d6851636edab..5f44827e4962 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -6638,7 +6638,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) + kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); + } + +-static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) ++static int inject_pending_event(struct kvm_vcpu *vcpu) + { + int r; + +@@ -6665,7 +6665,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) + } + + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { +- r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); ++ r = kvm_x86_ops->check_nested_events(vcpu); + if (r != 0) + return r; + } +@@ -6706,7 +6706,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) + * KVM_REQ_EVENT only on certain events and not unconditionally? + */ + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { +- r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); ++ r = kvm_x86_ops->check_nested_events(vcpu); + if (r != 0) + return r; + } +@@ -7152,7 +7152,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + goto out; + } + +- if (inject_pending_event(vcpu, req_int_win) != 0) ++ if (inject_pending_event(vcpu) != 0) + req_immediate_exit = true; + else { + /* Enable NMI/IRQ window open exits if needed. +@@ -7360,7 +7360,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) + static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) + { + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) +- kvm_x86_ops->check_nested_events(vcpu, false); ++ kvm_x86_ops->check_nested_events(vcpu); + + return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && + !vcpu->arch.apf.halted); +@@ -8584,6 +8584,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + { + int i; + ++ /* ++ * Clear out the previous array pointers for the KVM_MR_MOVE case. The ++ * old arrays will be freed by __kvm_set_memory_region() if installing ++ * the new memslot is successful. ++ */ ++ memset(&slot->arch, 0, sizeof(slot->arch)); ++ + for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { + struct kvm_lpage_info *linfo; + unsigned long ugfn; +@@ -8657,6 +8664,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) + { ++ if (change == KVM_MR_MOVE) ++ return kvm_arch_create_memslot(kvm, memslot, ++ mem->memory_size >> PAGE_SHIFT); ++ + return 0; + } + +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +index 93863c6173e6..959bee9fa911 100644 +--- a/block/bfq-iosched.c ++++ b/block/bfq-iosched.c +@@ -4541,20 +4541,28 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio) + spin_unlock_irq(&bfqd->lock); + } + +-static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) ++static void ++bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) + { +- struct bfq_data *bfqd = bfqq->bfqd; + enum bfqq_expiration reason; + unsigned long flags; + + spin_lock_irqsave(&bfqd->lock, flags); +- bfq_clear_bfqq_wait_request(bfqq); + ++ /* ++ * Considering that bfqq may be in race, we should firstly check ++ * whether bfqq is in service before doing something on it. If ++ * the bfqq in race is not in service, it has already been expired ++ * through __bfq_bfqq_expire func and its wait_request flags has ++ * been cleared in __bfq_bfqd_reset_in_service func. ++ */ + if (bfqq != bfqd->in_service_queue) { + spin_unlock_irqrestore(&bfqd->lock, flags); + return; + } + ++ bfq_clear_bfqq_wait_request(bfqq); ++ + if (bfq_bfqq_budget_timeout(bfqq)) + /* + * Also here the queue can be safely expired +@@ -4599,7 +4607,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) + * early. + */ + if (bfqq) +- bfq_idle_slice_timer_body(bfqq); ++ bfq_idle_slice_timer_body(bfqd, bfqq); + + return HRTIMER_NORESTART; + } +diff --git a/block/blk-ioc.c b/block/blk-ioc.c +index f23311e4b201..e56a480b6f92 100644 +--- a/block/blk-ioc.c ++++ b/block/blk-ioc.c +@@ -87,6 +87,7 @@ static void ioc_destroy_icq(struct io_cq *icq) + * making it impossible to determine icq_cache. Record it in @icq. + */ + icq->__rcu_icq_cache = et->icq_cache; ++ icq->flags |= ICQ_DESTROYED; + call_rcu(&icq->__rcu_head, icq_free_icq_rcu); + } + +@@ -230,15 +231,21 @@ static void __ioc_clear_queue(struct list_head *icq_list) + { + unsigned long flags; + ++ rcu_read_lock(); + while (!list_empty(icq_list)) { + struct io_cq *icq = list_entry(icq_list->next, + struct io_cq, q_node); + struct io_context *ioc = icq->ioc; + + spin_lock_irqsave(&ioc->lock, flags); ++ if (icq->flags & ICQ_DESTROYED) { ++ spin_unlock_irqrestore(&ioc->lock, flags); ++ continue; ++ } + ioc_destroy_icq(icq); + spin_unlock_irqrestore(&ioc->lock, flags); + } ++ rcu_read_unlock(); + } + + /** +diff --git a/block/blk-settings.c b/block/blk-settings.c +index 6c2faaa38cc1..e0a744921ed3 100644 +--- a/block/blk-settings.c ++++ b/block/blk-settings.c +@@ -717,6 +717,9 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", + top, bottom); + } ++ ++ t->backing_dev_info->io_pages = ++ t->limits.max_sectors >> (PAGE_SHIFT - 9); + } + EXPORT_SYMBOL(disk_stack_limits); + +diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c +index 7f9aff4b8d62..9fdc13a2f2d5 100644 +--- a/drivers/acpi/processor_throttling.c ++++ b/drivers/acpi/processor_throttling.c +@@ -909,13 +909,6 @@ static long __acpi_processor_get_throttling(void *data) + return pr->throttling.acpi_processor_get_throttling(pr); + } + +-static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) +-{ +- if (direct || (is_percpu_thread() && cpu == smp_processor_id())) +- return fn(arg); +- return work_on_cpu(cpu, fn, arg); +-} +- + static int acpi_processor_get_throttling(struct acpi_processor *pr) + { + if (!pr) +diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c +index 85aa76116a30..7924d0635718 100644 +--- a/drivers/ata/libata-pmp.c ++++ b/drivers/ata/libata-pmp.c +@@ -764,6 +764,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, + + if (dev->flags & ATA_DFLAG_DETACH) { + detach = 1; ++ rc = -ENODEV; + goto fail; + } + +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index eb0c4ee20525..2f81d6534270 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -4571,22 +4571,19 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) + */ + shost->max_host_blocked = 1; + +- rc = scsi_add_host_with_dma(ap->scsi_host, +- &ap->tdev, ap->host->dev); ++ rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); + if (rc) +- goto err_add; ++ goto err_alloc; + } + + return 0; + +- err_add: +- scsi_host_put(host->ports[i]->scsi_host); + err_alloc: + while (--i >= 0) { + struct Scsi_Host *shost = host->ports[i]->scsi_host; + ++ /* scsi_host_put() is in ata_devres_release() */ + scsi_remove_host(shost); +- scsi_host_put(shost); + } + return rc; + } +diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c +index 41be9ff7d70a..3da53cc6cf2b 100644 +--- a/drivers/base/arch_topology.c ++++ b/drivers/base/arch_topology.c +@@ -96,7 +96,7 @@ subsys_initcall(register_cpu_capacity_sysctl); + static u32 capacity_scale; + static u32 *raw_capacity; + +-static int __init free_raw_capacity(void) ++static int free_raw_capacity(void) + { + kfree(raw_capacity); + raw_capacity = NULL; +diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c +index f01d4a8a783a..b12e373aa956 100644 +--- a/drivers/block/null_blk.c ++++ b/drivers/block/null_blk.c +@@ -622,6 +622,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) + if (tag != -1U) { + cmd = &nq->cmds[tag]; + cmd->tag = tag; ++ cmd->error = BLK_STS_OK; + cmd->nq = nq; + if (nq->dev->irqmode == NULL_IRQ_TIMER) { + hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, +@@ -1399,6 +1400,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, + cmd->timer.function = null_cmd_timer_expired; + } + cmd->rq = bd->rq; ++ cmd->error = BLK_STS_OK; + cmd->nq = nq; + + blk_mq_start_request(bd->rq); +@@ -1593,7 +1595,12 @@ static void null_nvm_unregister(struct nullb *nullb) {} + + static void null_del_dev(struct nullb *nullb) + { +- struct nullb_device *dev = nullb->dev; ++ struct nullb_device *dev; ++ ++ if (!nullb) ++ return; ++ ++ dev = nullb->dev; + + ida_simple_remove(&nullb_indexes, nullb->index); + +@@ -1919,6 +1926,7 @@ out_cleanup_queues: + cleanup_queues(nullb); + out_free_nullb: + kfree(nullb); ++ dev->nullb = NULL; + out: + return rv; + } +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index f2b1994d58a0..557cf52f674b 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -3847,6 +3847,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev) + cancel_work_sync(&rbd_dev->unlock_work); + } + ++/* ++ * header_rwsem must not be held to avoid a deadlock with ++ * rbd_dev_refresh() when flushing notifies. ++ */ + static void rbd_unregister_watch(struct rbd_device *rbd_dev) + { + WARN_ON(waitqueue_active(&rbd_dev->lock_waitq)); +@@ -6044,9 +6048,10 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) + + static void rbd_dev_image_release(struct rbd_device *rbd_dev) + { +- rbd_dev_unprobe(rbd_dev); + if (rbd_dev->opts) + rbd_unregister_watch(rbd_dev); ++ ++ rbd_dev_unprobe(rbd_dev); + rbd_dev->image_format = 0; + kfree(rbd_dev->spec->image_id); + rbd_dev->spec->image_id = NULL; +@@ -6057,6 +6062,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) + * device. If this image is the one being mapped (i.e., not a + * parent), initiate a watch on its header object before using that + * object to get detailed information about the rbd image. ++ * ++ * On success, returns with header_rwsem held for write if called ++ * with @depth == 0. + */ + static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + { +@@ -6087,9 +6095,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + } + } + ++ if (!depth) ++ down_write(&rbd_dev->header_rwsem); ++ + ret = rbd_dev_header_info(rbd_dev); + if (ret) +- goto err_out_watch; ++ goto err_out_probe; + + /* + * If this image is the one being mapped, we have pool name and +@@ -6133,10 +6144,11 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + return 0; + + err_out_probe: +- rbd_dev_unprobe(rbd_dev); +-err_out_watch: ++ if (!depth) ++ up_write(&rbd_dev->header_rwsem); + if (!depth) + rbd_unregister_watch(rbd_dev); ++ rbd_dev_unprobe(rbd_dev); + err_out_format: + rbd_dev->image_format = 0; + kfree(rbd_dev->spec->image_id); +@@ -6194,12 +6206,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, + goto err_out_rbd_dev; + } + +- down_write(&rbd_dev->header_rwsem); + rc = rbd_dev_image_probe(rbd_dev, 0); +- if (rc < 0) { +- up_write(&rbd_dev->header_rwsem); ++ if (rc < 0) + goto err_out_rbd_dev; +- } + + /* If we are mapping a snapshot it must be marked read-only */ + +diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c +index 1b76d9585902..2ca2cc56bcef 100644 +--- a/drivers/bus/sunxi-rsb.c ++++ b/drivers/bus/sunxi-rsb.c +@@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, + if (ret) + goto unlock; + +- *buf = readl(rsb->regs + RSB_DATA); ++ *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0); + + unlock: + mutex_unlock(&rsb->lock); +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index c82d9fd2f05a..f72a272eeb9b 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -2647,7 +2647,9 @@ get_guid(ipmi_smi_t intf) + if (rv) + /* Send failed, no GUID available. */ + intf->bmc->guid_set = 0; +- wait_event(intf->waitq, intf->bmc->guid_set != 2); ++ else ++ wait_event(intf->waitq, intf->bmc->guid_set != 2); ++ + intf->null_user_handler = NULL; + } + +diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c +index 791770a563fc..6fac6383d024 100644 +--- a/drivers/clk/at91/clk-usb.c ++++ b/drivers/clk/at91/clk-usb.c +@@ -78,6 +78,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, + tmp_parent_rate = req->rate * div; + tmp_parent_rate = clk_hw_round_rate(parent, + tmp_parent_rate); ++ if (!tmp_parent_rate) ++ continue; ++ + tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div); + if (tmp_rate < req->rate) + tmp_diff = req->rate - tmp_rate; +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c +index 8341a128dab1..44b6f23cc851 100644 +--- a/drivers/clk/clk.c ++++ b/drivers/clk/clk.c +@@ -2172,18 +2172,16 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) + + core->dentry = d; + +- d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry, +- (u32 *)&core->rate); ++ d = debugfs_create_ulong("clk_rate", 0444, core->dentry, &core->rate); + if (!d) + goto err_out; + +- d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry, +- (u32 *)&core->accuracy); ++ d = debugfs_create_ulong("clk_accuracy", 0444, core->dentry, ++ &core->accuracy); + if (!d) + goto err_out; + +- d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry, +- (u32 *)&core->phase); ++ d = debugfs_create_u32("clk_phase", 0444, core->dentry, &core->phase); + if (!d) + goto err_out; + +@@ -2192,23 +2190,23 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) + if (!d) + goto err_out; + +- d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry, +- (u32 *)&core->prepare_count); ++ d = debugfs_create_u32("clk_prepare_count", 0444, core->dentry, ++ &core->prepare_count); + if (!d) + goto err_out; + +- d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry, +- (u32 *)&core->enable_count); ++ d = debugfs_create_u32("clk_enable_count", 0444, core->dentry, ++ &core->enable_count); + if (!d) + goto err_out; + +- d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry, +- (u32 *)&core->notifier_count); ++ d = debugfs_create_u32("clk_notifier_count", 0444, core->dentry, ++ &core->notifier_count); + if (!d) + goto err_out; + + if (core->num_parents > 1) { +- d = debugfs_create_file("clk_possible_parents", S_IRUGO, ++ d = debugfs_create_file("clk_possible_parents", 0444, + core->dentry, core, &possible_parents_fops); + if (!d) + goto err_out; +@@ -2304,22 +2302,22 @@ static int __init clk_debug_init(void) + if (!rootdir) + return -ENOMEM; + +- d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, ++ d = debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, + &clk_summary_fops); + if (!d) + return -ENOMEM; + +- d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, ++ d = debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, + &clk_dump_fops); + if (!d) + return -ENOMEM; + +- d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, ++ d = debugfs_create_file("clk_orphan_summary", 0444, rootdir, + &orphan_list, &clk_summary_fops); + if (!d) + return -ENOMEM; + +- d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, ++ d = debugfs_create_file("clk_orphan_dump", 0444, rootdir, + &orphan_list, &clk_dump_fops); + if (!d) + return -ENOMEM; +diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c +index a35579a3f884..476dab494c44 100644 +--- a/drivers/clk/tegra/clk-tegra-pmc.c ++++ b/drivers/clk/tegra/clk-tegra-pmc.c +@@ -60,16 +60,16 @@ struct pmc_clk_init_data { + + static DEFINE_SPINLOCK(clk_out_lock); + +-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2", +- "clk_m_div4", "extern1", ++static const char *clk_out1_parents[] = { "osc", "osc_div2", ++ "osc_div4", "extern1", + }; + +-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2", +- "clk_m_div4", "extern2", ++static const char *clk_out2_parents[] = { "osc", "osc_div2", ++ "osc_div4", "extern2", + }; + +-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2", +- "clk_m_div4", "extern3", ++static const char *clk_out3_parents[] = { "osc", "osc_div2", ++ "osc_div4", "extern3", + }; + + static struct pmc_clk_init_data pmc_clks[] = { +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c +index 33854bf127f9..25c9a6cdd861 100644 +--- a/drivers/cpufreq/powernv-cpufreq.c ++++ b/drivers/cpufreq/powernv-cpufreq.c +@@ -1041,6 +1041,12 @@ free_and_return: + + static inline void clean_chip_info(void) + { ++ int i; ++ ++ /* flush any pending work items */ ++ if (chips) ++ for (i = 0; i < nr_chips; i++) ++ cancel_work_sync(&chips[i].throttle); + kfree(chips); + } + +diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c +index b23c7b72525c..a3d507fb9ea5 100644 +--- a/drivers/crypto/caam/caamalg_desc.c ++++ b/drivers/crypto/caam/caamalg_desc.c +@@ -1280,7 +1280,13 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap); + */ + void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata) + { +- __be64 sector_size = cpu_to_be64(512); ++ /* ++ * Set sector size to a big value, practically disabling ++ * sector size segmentation in xts implementation. We cannot ++ * take full advantage of this HW feature with existing ++ * crypto API / dm-crypt SW architecture. ++ */ ++ __be64 sector_size = cpu_to_be64(BIT(15)); + u32 *key_jump_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); +@@ -1332,7 +1338,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap); + */ + void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata) + { +- __be64 sector_size = cpu_to_be64(512); ++ /* ++ * Set sector size to a big value, practically disabling ++ * sector size segmentation in xts implementation. We cannot ++ * take full advantage of this HW feature with existing ++ * crypto API / dm-crypt SW architecture. ++ */ ++ __be64 sector_size = cpu_to_be64(BIT(15)); + u32 *key_jump_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c +index e1e1e8110790..5a4b8aee22a8 100644 +--- a/drivers/crypto/mxs-dcp.c ++++ b/drivers/crypto/mxs-dcp.c +@@ -25,6 +25,7 @@ + #include <crypto/sha.h> + #include <crypto/internal/hash.h> + #include <crypto/internal/skcipher.h> ++#include <crypto/scatterwalk.h> + + #define DCP_MAX_CHANS 4 + #define DCP_BUF_SZ PAGE_SIZE +@@ -621,49 +622,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) + struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); + struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct hash_alg_common *halg = crypto_hash_alg_common(tfm); +- const int nents = sg_nents(req->src); + + uint8_t *in_buf = sdcp->coh->sha_in_buf; + uint8_t *out_buf = sdcp->coh->sha_out_buf; + +- uint8_t *src_buf; +- + struct scatterlist *src; + +- unsigned int i, len, clen; ++ unsigned int i, len, clen, oft = 0; + int ret; + + int fin = rctx->fini; + if (fin) + rctx->fini = 0; + +- for_each_sg(req->src, src, nents, i) { +- src_buf = sg_virt(src); +- len = sg_dma_len(src); +- +- do { +- if (actx->fill + len > DCP_BUF_SZ) +- clen = DCP_BUF_SZ - actx->fill; +- else +- clen = len; +- +- memcpy(in_buf + actx->fill, src_buf, clen); +- len -= clen; +- src_buf += clen; +- actx->fill += clen; ++ src = req->src; ++ len = req->nbytes; + +- /* +- * If we filled the buffer and still have some +- * more data, submit the buffer. +- */ +- if (len && actx->fill == DCP_BUF_SZ) { +- ret = mxs_dcp_run_sha(req); +- if (ret) +- return ret; +- actx->fill = 0; +- rctx->init = 0; +- } +- } while (len); ++ while (len) { ++ if (actx->fill + len > DCP_BUF_SZ) ++ clen = DCP_BUF_SZ - actx->fill; ++ else ++ clen = len; ++ ++ scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, ++ 0); ++ ++ len -= clen; ++ oft += clen; ++ actx->fill += clen; ++ ++ /* ++ * If we filled the buffer and still have some ++ * more data, submit the buffer. ++ */ ++ if (len && actx->fill == DCP_BUF_SZ) { ++ ret = mxs_dcp_run_sha(req); ++ if (ret) ++ return ret; ++ actx->fill = 0; ++ rctx->init = 0; ++ } + } + + if (fin) { +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c +index f50072b51aef..b39b7e6d4e4d 100644 +--- a/drivers/firmware/efi/efi.c ++++ b/drivers/firmware/efi/efi.c +@@ -550,7 +550,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, + } + } + +- if (efi_enabled(EFI_MEMMAP)) ++ if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) + efi_memattr_init(); + + /* Parse the EFI Properties table if it exists */ +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index f0777a7a4305..d5b42cc86d71 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -1245,31 +1245,14 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) + struct gpio_desc *desc = &gdev->descs[i]; + + desc->gdev = gdev; +- /* +- * REVISIT: most hardware initializes GPIOs as inputs +- * (often with pullups enabled) so power usage is +- * minimized. Linux code should set the gpio direction +- * first thing; but until it does, and in case +- * chip->get_direction is not set, we may expose the +- * wrong direction in sysfs. +- */ +- +- if (chip->get_direction) { +- /* +- * If we have .get_direction, set up the initial +- * direction flag from the hardware. +- */ +- int dir = chip->get_direction(chip, i); + +- if (!dir) +- set_bit(FLAG_IS_OUT, &desc->flags); +- } else if (!chip->direction_input) { +- /* +- * If the chip lacks the .direction_input callback +- * we logically assume all lines are outputs. +- */ +- set_bit(FLAG_IS_OUT, &desc->flags); +- } ++ /* REVISIT: most hardware initializes GPIOs as inputs (often ++ * with pullups enabled) so power usage is minimized. Linux ++ * code should set the gpio direction first thing; but until ++ * it does, and in case chip->get_direction is not set, we may ++ * expose the wrong direction in sysfs. ++ */ ++ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; + } + + #ifdef CONFIG_PINCTRL +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +index 61fff25b4ce7..ecd4eba221c0 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +@@ -550,9 +550,9 @@ kfd_gtt_out: + return 0; + + kfd_gtt_no_free_chunk: +- pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); ++ pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); + mutex_unlock(&kfd->gtt_sa_lock); +- kfree(mem_obj); ++ kfree(*mem_obj); + return -ENOMEM; + } + +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c +index f0d819fc16cd..42eaeae86bb8 100644 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c +@@ -1043,10 +1043,12 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) + lct = drm_dp_calculate_rad(port, rad); + + port->mstb = drm_dp_add_mst_branch_device(lct, rad); +- port->mstb->mgr = port->mgr; +- port->mstb->port_parent = port; ++ if (port->mstb) { ++ port->mstb->mgr = port->mgr; ++ port->mstb->port_parent = port; + +- send_link = true; ++ send_link = true; ++ } + break; + } + return send_link; +@@ -2038,6 +2040,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms + int ret = 0; + struct drm_dp_mst_branch *mstb = NULL; + ++ mutex_lock(&mgr->payload_lock); + mutex_lock(&mgr->lock); + if (mst_state == mgr->mst_state) + goto out_unlock; +@@ -2096,7 +2099,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms + /* this can fail if the device is gone */ + drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); + ret = 0; +- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); ++ memset(mgr->payloads, 0, ++ mgr->max_payloads * sizeof(mgr->payloads[0])); ++ memset(mgr->proposed_vcpis, 0, ++ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0])); + mgr->payload_mask = 0; + set_bit(0, &mgr->payload_mask); + mgr->vcpi_mask = 0; +@@ -2104,6 +2110,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms + + out_unlock: + mutex_unlock(&mgr->lock); ++ mutex_unlock(&mgr->payload_lock); + if (mstb) + drm_dp_put_mst_branch_device(mstb); + return ret; +diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c +index 1235c9877d6f..2078d7706a67 100644 +--- a/drivers/gpu/drm/drm_pci.c ++++ b/drivers/gpu/drm/drm_pci.c +@@ -46,8 +46,6 @@ + drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) + { + drm_dma_handle_t *dmah; +- unsigned long addr; +- size_t sz; + + /* pci_alloc_consistent only guarantees alignment to the smallest + * PAGE_SIZE order which is greater than or equal to the requested size. +@@ -61,22 +59,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali + return NULL; + + dmah->size = size; +- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); ++ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL); + + if (dmah->vaddr == NULL) { + kfree(dmah); + return NULL; + } + +- memset(dmah->vaddr, 0, size); +- +- /* XXX - Is virt_to_page() legal for consistent mem? */ +- /* Reserve */ +- for (addr = (unsigned long)dmah->vaddr, sz = size; +- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { +- SetPageReserved(virt_to_page((void *)addr)); +- } +- + return dmah; + } + +@@ -89,19 +78,9 @@ EXPORT_SYMBOL(drm_pci_alloc); + */ + void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) + { +- unsigned long addr; +- size_t sz; +- +- if (dmah->vaddr) { +- /* XXX - Is virt_to_page() legal for consistent mem? */ +- /* Unreserve */ +- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; +- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { +- ClearPageReserved(virt_to_page((void *)addr)); +- } ++ if (dmah->vaddr) + dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, + dmah->busaddr); +- } + } + + /** +diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c +index 9e62f893958a..81158ae8bfe3 100644 +--- a/drivers/i2c/busses/i2c-st.c ++++ b/drivers/i2c/busses/i2c-st.c +@@ -437,6 +437,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev) + /** + * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode + * @i2c_dev: Controller's private data ++ * @max: Maximum amount of data to fill into the Tx FIFO + * + * This functions fills the Tx FIFO with fixed pattern when + * in read mode to trigger clock. +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 136f6e7bf797..0d0f977a2f39 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -534,6 +534,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), + }, + }, ++ { ++ /* ++ * Acer Aspire 5738z ++ * Touchpad stops working in mux mode when dis- + re-enabled ++ * with the touchpad enable/disable toggle hotkey ++ */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h +index 3054c0971759..74c8638aac2b 100644 +--- a/drivers/iommu/amd_iommu_types.h ++++ b/drivers/iommu/amd_iommu_types.h +@@ -348,7 +348,7 @@ + + #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) + #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) +-#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL) ++#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) + + #define DTE_GCR3_INDEX_A 0 + #define DTE_GCR3_INDEX_B 1 +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c +index f5573bb9f450..837459762eb3 100644 +--- a/drivers/iommu/intel-svm.c ++++ b/drivers/iommu/intel-svm.c +@@ -613,14 +613,15 @@ static irqreturn_t prq_event_thread(int irq, void *d) + * any faults on kernel addresses. */ + if (!svm->mm) + goto bad_req; +- /* If the mm is already defunct, don't handle faults. */ +- if (!mmget_not_zero(svm->mm)) +- goto bad_req; + + /* If address is not canonical, return invalid response */ + if (!is_canonical_address(address)) + goto bad_req; + ++ /* If the mm is already defunct, don't handle faults. */ ++ if (!mmget_not_zero(svm->mm)) ++ goto bad_req; ++ + down_read(&svm->mm->mmap_sem); + vma = find_extend_vma(svm->mm, address); + if (!vma || address < vma->vm_start) +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index 799df1e598db..84b23d902d5b 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -2591,12 +2591,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, + return 0; + } + ++static int its_vpe_retrigger(struct irq_data *d) ++{ ++ return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); ++} ++ + static struct irq_chip its_vpe_irq_chip = { + .name = "GICv4-vpe", + .irq_mask = its_vpe_mask_irq, + .irq_unmask = its_vpe_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, ++ .irq_retrigger = its_vpe_retrigger, + .irq_set_irqchip_state = its_vpe_set_irqchip_state, + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, + }; +diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c +index f7fdbf5d183b..c98358be0bc8 100644 +--- a/drivers/irqchip/irq-mbigen.c ++++ b/drivers/irqchip/irq-mbigen.c +@@ -231,10 +231,16 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain, + return 0; + } + ++static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ platform_msi_domain_free(domain, virq, nr_irqs); ++} ++ + static const struct irq_domain_ops mbigen_domain_ops = { + .translate = mbigen_domain_translate, + .alloc = mbigen_irq_domain_alloc, +- .free = irq_domain_free_irqs_common, ++ .free = mbigen_irq_domain_free, + }; + + static int mbigen_of_create_domain(struct platform_device *pdev, +diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c +index 928858dada75..f1386733d3bc 100644 +--- a/drivers/irqchip/irq-versatile-fpga.c ++++ b/drivers/irqchip/irq-versatile-fpga.c +@@ -6,6 +6,7 @@ + #include <linux/irq.h> + #include <linux/io.h> + #include <linux/irqchip.h> ++#include <linux/irqchip/chained_irq.h> + #include <linux/irqchip/versatile-fpga.h> + #include <linux/irqdomain.h> + #include <linux/module.h> +@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d) + + static void fpga_irq_handle(struct irq_desc *desc) + { ++ struct irq_chip *chip = irq_desc_get_chip(desc); + struct fpga_irq_data *f = irq_desc_get_handler_data(desc); +- u32 status = readl(f->base + IRQ_STATUS); ++ u32 status; ++ ++ chained_irq_enter(chip, desc); + ++ status = readl(f->base + IRQ_STATUS); + if (status == 0) { + do_bad_IRQ(desc); +- return; ++ goto out; + } + + do { +@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc) + status &= ~(1 << irq); + generic_handle_irq(irq_find_mapping(f->domain, irq)); + } while (status); ++ ++out: ++ chained_irq_exit(chip, desc); + } + + /* +@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node, + if (of_property_read_u32(node, "valid-mask", &valid_mask)) + valid_mask = 0; + ++ writel(clear_mask, base + IRQ_ENABLE_CLEAR); ++ writel(clear_mask, base + FIQ_ENABLE_CLEAR); ++ + /* Some chips are cascaded from a parent IRQ */ + parent_irq = irq_of_parse_and_map(node, 0); + if (!parent_irq) { +@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node, + + fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); + +- writel(clear_mask, base + IRQ_ENABLE_CLEAR); +- writel(clear_mask, base + FIQ_ENABLE_CLEAR); +- + /* + * On Versatile AB/PB, some secondary interrupts have a direct + * pass-thru to the primary controller for IRQs 20 and 22-31 which need +diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c +index b1b68e01b889..53cd31199f21 100644 +--- a/drivers/md/dm-flakey.c ++++ b/drivers/md/dm-flakey.c +@@ -70,6 +70,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, + arg_name = dm_shift_arg(as); + argc--; + ++ if (!arg_name) { ++ ti->error = "Insufficient feature arguments"; ++ return -EINVAL; ++ } ++ + /* + * drop_writes + */ +diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c +index e13f90832b6b..285148100cde 100644 +--- a/drivers/md/dm-verity-fec.c ++++ b/drivers/md/dm-verity-fec.c +@@ -552,6 +552,7 @@ void verity_fec_dtr(struct dm_verity *v) + mempool_destroy(f->rs_pool); + mempool_destroy(f->prealloc_pool); + mempool_destroy(f->extra_pool); ++ mempool_destroy(f->output_pool); + kmem_cache_destroy(f->cache); + + if (f->data_bufio) +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c +index e3b67b145027..4d658a0c6025 100644 +--- a/drivers/md/dm-zoned-metadata.c ++++ b/drivers/md/dm-zoned-metadata.c +@@ -1105,7 +1105,6 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone, + + if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) { + set_bit(DMZ_RND, &zone->flags); +- zmd->nr_rnd_zones++; + } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ || + blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) { + set_bit(DMZ_SEQ, &zone->flags); +diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c +index 42e383a48ffe..b6dcae1ecc1b 100644 +--- a/drivers/media/platform/ti-vpe/cal.c ++++ b/drivers/media/platform/ti-vpe/cal.c +@@ -544,16 +544,16 @@ static void enable_irqs(struct cal_ctx *ctx) + + static void disable_irqs(struct cal_ctx *ctx) + { ++ u32 val; ++ + /* Disable IRQ_WDMA_END 0/1 */ +- reg_write_field(ctx->dev, +- CAL_HL_IRQENABLE_CLR(2), +- CAL_HL_IRQ_CLEAR, +- CAL_HL_IRQ_MASK(ctx->csi2_port)); ++ val = 0; ++ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port)); ++ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val); + /* Disable IRQ_WDMA_START 0/1 */ +- reg_write_field(ctx->dev, +- CAL_HL_IRQENABLE_CLR(3), +- CAL_HL_IRQ_CLEAR, +- CAL_HL_IRQ_MASK(ctx->csi2_port)); ++ val = 0; ++ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port)); ++ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val); + /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */ + reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0); + } +diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c +index 95d0f2df0ad4..672831d5ee32 100644 +--- a/drivers/mfd/dln2.c ++++ b/drivers/mfd/dln2.c +@@ -93,6 +93,11 @@ struct dln2_mod_rx_slots { + spinlock_t lock; + }; + ++enum dln2_endpoint { ++ DLN2_EP_OUT = 0, ++ DLN2_EP_IN = 1, ++}; ++ + struct dln2_dev { + struct usb_device *usb_dev; + struct usb_interface *interface; +@@ -740,10 +745,10 @@ static int dln2_probe(struct usb_interface *interface, + hostif->desc.bNumEndpoints < 2) + return -ENODEV; + +- epin = &hostif->endpoint[0].desc; +- epout = &hostif->endpoint[1].desc; ++ epout = &hostif->endpoint[DLN2_EP_OUT].desc; + if (!usb_endpoint_is_bulk_out(epout)) + return -ENODEV; ++ epin = &hostif->endpoint[DLN2_EP_IN].desc; + if (!usb_endpoint_is_bulk_in(epin)) + return -ENODEV; + +diff --git a/drivers/mfd/rts5227.c b/drivers/mfd/rts5227.c +index ff296a4bf3d2..dc6a9432a4b6 100644 +--- a/drivers/mfd/rts5227.c ++++ b/drivers/mfd/rts5227.c +@@ -369,6 +369,7 @@ static const struct pcr_ops rts522a_pcr_ops = { + void rts522a_init_params(struct rtsx_pcr *pcr) + { + rts5227_init_params(pcr); ++ pcr->ops = &rts522a_pcr_ops; + + pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3; + } +diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c +index 9597e9523cac..fff13176f9b8 100644 +--- a/drivers/misc/echo/echo.c ++++ b/drivers/misc/echo/echo.c +@@ -454,7 +454,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx) + */ + ec->factor = 0; + ec->shift = 0; +- if ((ec->nonupdate_dwell == 0)) { ++ if (!ec->nonupdate_dwell) { + int p, logp, shift; + + /* Determine: +diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c +index 8b66e52ca3cc..9734e6903fe6 100644 +--- a/drivers/mtd/devices/phram.c ++++ b/drivers/mtd/devices/phram.c +@@ -247,22 +247,25 @@ static int phram_setup(const char *val) + + ret = parse_num64(&start, token[1]); + if (ret) { +- kfree(name); + parse_err("illegal start address\n"); ++ goto error; + } + + ret = parse_num64(&len, token[2]); + if (ret) { +- kfree(name); + parse_err("illegal device length\n"); ++ goto error; + } + + ret = register_device(name, start, len); +- if (!ret) +- pr_info("%s device: %#llx at %#llx\n", name, len, start); +- else +- kfree(name); ++ if (ret) ++ goto error; ++ ++ pr_info("%s device: %#llx at %#llx\n", name, len, start); ++ return 0; + ++error: ++ kfree(name); + return ret; + } + +diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c +index 018c75faadb3..e1c283ccbbde 100644 +--- a/drivers/mtd/lpddr/lpddr_cmds.c ++++ b/drivers/mtd/lpddr/lpddr_cmds.c +@@ -81,7 +81,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) + shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips, + GFP_KERNEL); + if (!shared) { +- kfree(lpddr); + kfree(mtd); + return NULL; + } +diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c +index 7f8d269dd75a..814618c0b632 100644 +--- a/drivers/net/dsa/bcm_sf2_cfp.c ++++ b/drivers/net/dsa/bcm_sf2_cfp.c +@@ -130,17 +130,14 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, + (fs->m_ext.vlan_etype || fs->m_ext.data[1])) + return -EINVAL; + +- if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES) ++ if (fs->location != RX_CLS_LOC_ANY && ++ fs->location > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + if (fs->location != RX_CLS_LOC_ANY && + test_bit(fs->location, priv->cfp.used)) + return -EBUSY; + +- if (fs->location != RX_CLS_LOC_ANY && +- fs->location > bcm_sf2_cfp_rule_size(priv)) +- return -EINVAL; +- + ip_frag = be32_to_cpu(fs->m_ext.data[0]); + + /* We do not support discarding packets, check that the +@@ -333,7 +330,7 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, + int ret; + u32 reg; + +- if (loc >= CFP_NUM_RULES) ++ if (loc > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + /* Refuse deletion of unused rules, and the default reserved rule */ +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +index c65d2cdcc7cf..8556962e6824 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +@@ -515,7 +515,7 @@ static void xgbe_isr_task(unsigned long data) + xgbe_disable_rx_tx_ints(pdata); + + /* Turn on polling */ +- __napi_schedule_irqoff(&pdata->napi); ++ __napi_schedule(&pdata->napi); + } + } else { + /* Don't clear Rx/Tx status if doing per channel DMA +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +index 9f9d6cae39d5..758f2b836328 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +@@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta) + FW_PTP_CMD_PORTID_V(0)); + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.ts.sc = FW_PTP_SC_ADJ_FTIME; ++ c.u.ts.sign = (delta < 0) ? 1 : 0; ++ if (delta < 0) ++ delta = -delta; + c.u.ts.tm = cpu_to_be64(delta); + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +index 7d95f0866fb0..e1de97effcd2 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +@@ -398,7 +398,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, + + spin_unlock_bh(&cmdq->cmdq_lock); + +- if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) { ++ if (!wait_for_completion_timeout(&done, ++ msecs_to_jiffies(CMDQ_TIMEOUT))) { + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmdq->errcode[curr_prod_idx] == &errcode) +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +index 46aba02b8672..5763e333a9af 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +@@ -373,50 +373,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev) + return -EFAULT; + } + +-static int wait_for_io_stopped(struct hinic_hwdev *hwdev) +-{ +- struct hinic_cmd_io_status cmd_io_status; +- struct hinic_hwif *hwif = hwdev->hwif; +- struct pci_dev *pdev = hwif->pdev; +- struct hinic_pfhwdev *pfhwdev; +- unsigned long end; +- u16 out_size; +- int err; +- +- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { +- dev_err(&pdev->dev, "Unsupported PCI Function type\n"); +- return -EINVAL; +- } +- +- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); +- +- cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif); +- +- end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT); +- do { +- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, +- HINIC_COMM_CMD_IO_STATUS_GET, +- &cmd_io_status, sizeof(cmd_io_status), +- &cmd_io_status, &out_size, +- HINIC_MGMT_MSG_SYNC); +- if ((err) || (out_size != sizeof(cmd_io_status))) { +- dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n", +- err); +- return err; +- } +- +- if (cmd_io_status.status == IO_STOPPED) { +- dev_info(&pdev->dev, "IO stopped\n"); +- return 0; +- } +- +- msleep(20); +- } while (time_before(jiffies, end)); +- +- dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n"); +- return -ETIMEDOUT; +-} +- + /** + * clear_io_resource - set the IO resources as not active in the NIC + * @hwdev: the NIC HW device +@@ -436,11 +392,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev) + return -EINVAL; + } + +- err = wait_for_io_stopped(hwdev); +- if (err) { +- dev_err(&pdev->dev, "IO has not stopped yet\n"); +- return err; +- } ++ /* sleep 100ms to wait for firmware stopping I/O */ ++ msleep(100); + + cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +index 278dc13f3dae..9fcf2e5e0003 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +@@ -52,7 +52,7 @@ + + #define MSG_NOT_RESP 0xFFFF + +-#define MGMT_MSG_TIMEOUT 1000 ++#define MGMT_MSG_TIMEOUT 5000 + + #define mgmt_to_pfhwdev(pf_mgmt) \ + container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) +@@ -276,7 +276,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, + goto unlock_sync_msg; + } + +- if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) { ++ if (!wait_for_completion_timeout(recv_done, ++ msecs_to_jiffies(MGMT_MSG_TIMEOUT))) { + dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); + err = -ETIMEDOUT; + goto unlock_sync_msg; +diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h +index cfa970417f81..fe4a4315d20d 100644 +--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h ++++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h +@@ -2065,7 +2065,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); + if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \ + (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\ + if ((mask & VXGE_DEBUG_MASK) == mask) \ +- printk(fmt "\n", __VA_ARGS__); \ ++ printk(fmt "\n", ##__VA_ARGS__); \ + } while (0) + #else + #define vxge_debug_ll(level, mask, fmt, ...) +diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h +index 3a79d93b8445..5b535aa10d23 100644 +--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h ++++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h +@@ -454,49 +454,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); + + #if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK) + #define vxge_debug_ll_config(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_ll_config(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) + #define vxge_debug_init(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_init(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK) + #define vxge_debug_tx(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_tx(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK) + #define vxge_debug_rx(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_rx(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK) + #define vxge_debug_mem(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_mem(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK) + #define vxge_debug_entryexit(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_entryexit(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK) + #define vxge_debug_intr(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_intr(level, fmt, ...) + #endif +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +index 07f9067affc6..cda5b0a9e948 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +@@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d + + ahw->reset.seq_error = 0; + ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); +- if (p_dev->ahw->reset.buff == NULL) ++ if (ahw->reset.buff == NULL) + return -ENOMEM; + + p_buff = p_dev->ahw->reset.buff; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +index fc1fa0f9f338..57694eada995 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +@@ -155,6 +155,8 @@ static int sun7i_gmac_probe(struct platform_device *pdev) + plat_dat->init = sun7i_gmac_init; + plat_dat->exit = sun7i_gmac_exit; + plat_dat->fix_mac_speed = sun7i_fix_speed; ++ plat_dat->tx_fifo_size = 4096; ++ plat_dat->rx_fifo_size = 16384; + + ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); + if (ret) +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c +index 72ad84fde5c1..8e084670c3c2 100644 +--- a/drivers/net/wireless/ath/ath9k/main.c ++++ b/drivers/net/wireless/ath/ath9k/main.c +@@ -1456,6 +1456,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) + ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef); + } + ++ if (changed & IEEE80211_CONF_CHANGE_POWER) ++ ath9k_set_txpower(sc, NULL); ++ + mutex_unlock(&sc->mutex); + ath9k_ps_restore(sc); + +diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c +index c374ed311520..58784e77e215 100644 +--- a/drivers/net/wireless/ath/wil6210/cfg80211.c ++++ b/drivers/net/wireless/ath/wil6210/cfg80211.c +@@ -1735,9 +1735,12 @@ static int wil_cfg80211_suspend(struct wiphy *wiphy, + + wil_dbg_pm(wil, "suspending\n"); + +- wil_p2p_stop_discovery(wil); +- ++ mutex_lock(&wil->mutex); ++ mutex_lock(&wil->p2p_wdev_mutex); ++ wil_p2p_stop_radio_operations(wil); + wil_abort_scan(wil, true); ++ mutex_unlock(&wil->p2p_wdev_mutex); ++ mutex_unlock(&wil->mutex); + + out: + return rc; +diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c +index 6db00c167d2e..3a98f75c5d7e 100644 +--- a/drivers/net/wireless/ath/wil6210/debugfs.c ++++ b/drivers/net/wireless/ath/wil6210/debugfs.c +@@ -1093,7 +1093,7 @@ static const struct file_operations fops_ssid = { + }; + + /*---------temp------------*/ +-static void print_temp(struct seq_file *s, const char *prefix, u32 t) ++static void print_temp(struct seq_file *s, const char *prefix, s32 t) + { + switch (t) { + case 0: +@@ -1101,7 +1101,8 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t) + seq_printf(s, "%s N/A\n", prefix); + break; + default: +- seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000); ++ seq_printf(s, "%s %s%d.%03d\n", prefix, (t < 0 ? "-" : ""), ++ abs(t / 1000), abs(t % 1000)); + break; + } + } +@@ -1109,7 +1110,7 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t) + static int wil_temp_debugfs_show(struct seq_file *s, void *data) + { + struct wil6210_priv *wil = s->private; +- u32 t_m, t_r; ++ s32 t_m, t_r; + int rc = wmi_get_temperature(wil, &t_m, &t_r); + + if (rc) { +diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c +index e01acac88825..7d090150187c 100644 +--- a/drivers/net/wireless/ath/wil6210/fw_inc.c ++++ b/drivers/net/wireless/ath/wil6210/fw_inc.c +@@ -26,14 +26,17 @@ + prefix_type, rowsize, \ + groupsize, buf, len, ascii) + +-#define FW_ADDR_CHECK(ioaddr, val, msg) do { \ +- ioaddr = wmi_buffer(wil, val); \ +- if (!ioaddr) { \ +- wil_err_fw(wil, "bad " msg ": 0x%08x\n", \ +- le32_to_cpu(val)); \ +- return -EINVAL; \ +- } \ +- } while (0) ++static bool wil_fw_addr_check(struct wil6210_priv *wil, ++ void __iomem **ioaddr, __le32 val, ++ u32 size, const char *msg) ++{ ++ *ioaddr = wmi_buffer_block(wil, val, size); ++ if (!(*ioaddr)) { ++ wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val)); ++ return false; ++ } ++ return true; ++} + + /** + * wil_fw_verify - verify firmware file validity +@@ -165,7 +168,8 @@ static int fw_handle_data(struct wil6210_priv *wil, const void *data, + return -EINVAL; + } + +- FW_ADDR_CHECK(dst, d->addr, "address"); ++ if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address")) ++ return -EINVAL; + wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr), + s); + wil_memcpy_toio_32(dst, d->data, s); +@@ -197,7 +201,8 @@ static int fw_handle_fill(struct wil6210_priv *wil, const void *data, + return -EINVAL; + } + +- FW_ADDR_CHECK(dst, d->addr, "address"); ++ if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address")) ++ return -EINVAL; + + v = le32_to_cpu(d->value); + wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n", +@@ -253,7 +258,8 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data, + u32 v = le32_to_cpu(block[i].value); + u32 x, y; + +- FW_ADDR_CHECK(dst, block[i].addr, "address"); ++ if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address")) ++ return -EINVAL; + + x = readl(dst); + y = (x & m) | (v & ~m); +@@ -319,10 +325,15 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data, + wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n", + n, gw_cmd); + +- FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr"); +- FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr"); +- FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr"); +- FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address"); ++ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0, ++ "gateway_addr_addr") || ++ !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0, ++ "gateway_value_addr") || ++ !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0, ++ "gateway_cmd_addr") || ++ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0, ++ "gateway_ctrl_address")) ++ return -EINVAL; + + wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x" + " cmd 0x%08x ctl 0x%08x\n", +@@ -378,12 +389,19 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data, + wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n", + n, gw_cmd); + +- FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr"); ++ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0, ++ "gateway_addr_addr")) ++ return -EINVAL; + for (k = 0; k < ARRAY_SIZE(block->value); k++) +- FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k], +- "gateway_value_addr"); +- FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr"); +- FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address"); ++ if (!wil_fw_addr_check(wil, &gwa_val[k], ++ d->gateway_value_addr[k], ++ 0, "gateway_value_addr")) ++ return -EINVAL; ++ if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0, ++ "gateway_cmd_addr") || ++ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0, ++ "gateway_ctrl_address")) ++ return -EINVAL; + + wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n", + le32_to_cpu(d->gateway_addr_addr), +diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c +index 59def4f3fcf3..5cf341702dc1 100644 +--- a/drivers/net/wireless/ath/wil6210/interrupt.c ++++ b/drivers/net/wireless/ath/wil6210/interrupt.c +@@ -358,6 +358,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil) + wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx); + } + ++static bool wil_validate_mbox_regs(struct wil6210_priv *wil) ++{ ++ size_t min_size = sizeof(struct wil6210_mbox_hdr) + ++ sizeof(struct wmi_cmd_hdr); ++ ++ if (wil->mbox_ctl.rx.entry_size < min_size) { ++ wil_err(wil, "rx mbox entry too small (%d)\n", ++ wil->mbox_ctl.rx.entry_size); ++ return false; ++ } ++ if (wil->mbox_ctl.tx.entry_size < min_size) { ++ wil_err(wil, "tx mbox entry too small (%d)\n", ++ wil->mbox_ctl.tx.entry_size); ++ return false; ++ } ++ ++ return true; ++} ++ + static irqreturn_t wil6210_irq_misc(int irq, void *cookie) + { + struct wil6210_priv *wil = cookie; +@@ -393,7 +412,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) + if (isr & ISR_MISC_FW_READY) { + wil_dbg_irq(wil, "IRQ: FW ready\n"); + wil_cache_mbox_regs(wil); +- set_bit(wil_status_mbox_ready, wil->status); ++ if (wil_validate_mbox_regs(wil)) ++ set_bit(wil_status_mbox_ready, wil->status); + /** + * Actual FW ready indicated by the + * WMI_FW_READY_EVENTID +diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c +index bac829aa950d..a3dc42841526 100644 +--- a/drivers/net/wireless/ath/wil6210/main.c ++++ b/drivers/net/wireless/ath/wil6210/main.c +@@ -871,7 +871,7 @@ static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err) + + static int wil_wait_for_fw_ready(struct wil6210_priv *wil) + { +- ulong to = msecs_to_jiffies(1000); ++ ulong to = msecs_to_jiffies(2000); + ulong left = wait_for_completion_timeout(&wil->wmi_ready, to); + + if (0 == left) { +diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c +index 6a3ab4bf916d..b2c3cf6db881 100644 +--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c ++++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c +@@ -393,6 +393,9 @@ static int wil6210_suspend(struct device *dev, bool is_runtime) + int rc = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct wil6210_priv *wil = pci_get_drvdata(pdev); ++ struct net_device *ndev = wil_to_ndev(wil); ++ bool keep_radio_on = ndev->flags & IFF_UP && ++ wil->keep_radio_on_during_sleep; + + wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); + +@@ -400,14 +403,14 @@ static int wil6210_suspend(struct device *dev, bool is_runtime) + if (rc) + goto out; + +- rc = wil_suspend(wil, is_runtime); ++ rc = wil_suspend(wil, is_runtime, keep_radio_on); + if (!rc) { + wil->suspend_stats.successful_suspends++; + +- /* If platform device supports keep_radio_on_during_sleep +- * it will control PCIe master ++ /* In case radio stays on, platform device will control ++ * PCIe master + */ +- if (!wil->keep_radio_on_during_sleep) ++ if (!keep_radio_on) + /* disable bus mastering */ + pci_clear_master(pdev); + } +@@ -420,20 +423,23 @@ static int wil6210_resume(struct device *dev, bool is_runtime) + int rc = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct wil6210_priv *wil = pci_get_drvdata(pdev); ++ struct net_device *ndev = wil_to_ndev(wil); ++ bool keep_radio_on = ndev->flags & IFF_UP && ++ wil->keep_radio_on_during_sleep; + + wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); + +- /* If platform device supports keep_radio_on_during_sleep it will +- * control PCIe master ++ /* In case radio stays on, platform device will control ++ * PCIe master + */ +- if (!wil->keep_radio_on_during_sleep) ++ if (!keep_radio_on) + /* allow master */ + pci_set_master(pdev); +- rc = wil_resume(wil, is_runtime); ++ rc = wil_resume(wil, is_runtime, keep_radio_on); + if (rc) { + wil_err(wil, "device failed to resume (%d)\n", rc); + wil->suspend_stats.failed_resumes++; +- if (!wil->keep_radio_on_during_sleep) ++ if (!keep_radio_on) + pci_clear_master(pdev); + } else { + wil->suspend_stats.successful_resumes++; +diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c +index 8f5d1b447aaa..8378742ecd49 100644 +--- a/drivers/net/wireless/ath/wil6210/pm.c ++++ b/drivers/net/wireless/ath/wil6210/pm.c +@@ -279,12 +279,9 @@ static int wil_resume_radio_off(struct wil6210_priv *wil) + return rc; + } + +-int wil_suspend(struct wil6210_priv *wil, bool is_runtime) ++int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on) + { + int rc = 0; +- struct net_device *ndev = wil_to_ndev(wil); +- bool keep_radio_on = ndev->flags & IFF_UP && +- wil->keep_radio_on_during_sleep; + + wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); + +@@ -307,12 +304,9 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) + return rc; + } + +-int wil_resume(struct wil6210_priv *wil, bool is_runtime) ++int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on) + { + int rc = 0; +- struct net_device *ndev = wil_to_ndev(wil); +- bool keep_radio_on = ndev->flags & IFF_UP && +- wil->keep_radio_on_during_sleep; + unsigned long long suspend_time_usec = 0; + + wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); +diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c +index 16750056b8b5..b483c4266097 100644 +--- a/drivers/net/wireless/ath/wil6210/txrx.c ++++ b/drivers/net/wireless/ath/wil6210/txrx.c +@@ -636,8 +636,8 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) + v->swtail = next_tail) { + rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); + if (unlikely(rc)) { +- wil_err(wil, "Error %d in wil_rx_refill[%d]\n", +- rc, v->swtail); ++ wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n", ++ rc, v->swtail); + break; + } + } +diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h +index 315ec8b59662..0bfd51adcc81 100644 +--- a/drivers/net/wireless/ath/wil6210/wil6210.h ++++ b/drivers/net/wireless/ath/wil6210/wil6210.h +@@ -865,6 +865,7 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r); + int wil_find_cid(struct wil6210_priv *wil, const u8 *mac); + void wil_set_ethtoolops(struct net_device *ndev); + ++void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size); + void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr); + void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr); + int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, +@@ -1000,8 +1001,8 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name, + bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name); + + int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime); +-int wil_suspend(struct wil6210_priv *wil, bool is_runtime); +-int wil_resume(struct wil6210_priv *wil, bool is_runtime); ++int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on); ++int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on); + bool wil_is_wmi_idle(struct wil6210_priv *wil); + int wmi_resume(struct wil6210_priv *wil); + int wmi_suspend(struct wil6210_priv *wil); +diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c +index 798516f42f2f..22bfa10ea827 100644 +--- a/drivers/net/wireless/ath/wil6210/wmi.c ++++ b/drivers/net/wireless/ath/wil6210/wmi.c +@@ -140,13 +140,15 @@ static u32 wmi_addr_remap(u32 x) + /** + * Check address validity for WMI buffer; remap if needed + * @ptr - internal (linker) fw/ucode address ++ * @size - if non zero, validate the block does not ++ * exceed the device memory (bar) + * + * Valid buffer should be DWORD aligned + * + * return address for accessing buffer from the host; + * if buffer is not valid, return NULL. + */ +-void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) ++void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size) + { + u32 off; + u32 ptr = le32_to_cpu(ptr_); +@@ -161,10 +163,17 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) + off = HOSTADDR(ptr); + if (off > wil->bar_size - 4) + return NULL; ++ if (size && ((off + size > wil->bar_size) || (off + size < off))) ++ return NULL; + + return wil->csr + off; + } + ++void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) ++{ ++ return wmi_buffer_block(wil, ptr_, 0); ++} ++ + /** + * Check address validity + */ +@@ -222,7 +231,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) + uint retry; + int rc = 0; + +- if (sizeof(cmd) + len > r->entry_size) { ++ if (len > r->entry_size - sizeof(cmd)) { + wil_err(wil, "WMI size too large: %d bytes, max is %d\n", + (int)(sizeof(cmd) + len), r->entry_size); + return -ERANGE; +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c +index 27224dc26413..a8ec5b2c5abb 100644 +--- a/drivers/net/wireless/mac80211_hwsim.c ++++ b/drivers/net/wireless/mac80211_hwsim.c +@@ -3134,9 +3134,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) + param.no_vif = true; + + if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { +- hwname = kasprintf(GFP_KERNEL, "%.*s", +- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), +- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); ++ hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]), ++ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), ++ GFP_KERNEL); + if (!hwname) + return -ENOMEM; + param.hwname = hwname; +@@ -3175,9 +3175,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) + if (info->attrs[HWSIM_ATTR_RADIO_ID]) { + idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); + } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { +- hwname = kasprintf(GFP_KERNEL, "%.*s", +- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), +- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); ++ hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]), ++ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), ++ GFP_KERNEL); + if (!hwname) + return -ENOMEM; + } else +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c +index 2f1b54fab399..83e18b367944 100644 +--- a/drivers/nvdimm/bus.c ++++ b/drivers/nvdimm/bus.c +@@ -951,8 +951,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, + return -EFAULT; + } + +- if (!desc || (desc->out_num + desc->in_num == 0) || +- !test_bit(cmd, &cmd_mask)) ++ if (!desc || ++ (desc->out_num + desc->in_num == 0) || ++ cmd > ND_CMD_CALL || ++ !test_bit(cmd, &cmd_mask)) + return -ENOTTY; + + /* fail write commands (when read-only) */ +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c +index 9e4d2ecf736d..058d542647dd 100644 +--- a/drivers/nvme/host/fc.c ++++ b/drivers/nvme/host/fc.c +@@ -337,8 +337,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, + !template->ls_req || !template->fcp_io || + !template->ls_abort || !template->fcp_abort || + !template->max_hw_queues || !template->max_sgl_segments || +- !template->max_dif_sgl_segments || !template->dma_boundary || +- !template->module) { ++ !template->max_dif_sgl_segments || !template->dma_boundary) { + ret = -EINVAL; + goto out_reghost_failed; + } +@@ -1763,7 +1762,6 @@ nvme_fc_ctrl_free(struct kref *ref) + { + struct nvme_fc_ctrl *ctrl = + container_of(ref, struct nvme_fc_ctrl, ref); +- struct nvme_fc_lport *lport = ctrl->lport; + unsigned long flags; + + if (ctrl->ctrl.tagset) { +@@ -1789,7 +1787,6 @@ nvme_fc_ctrl_free(struct kref *ref) + if (ctrl->ctrl.opts) + nvmf_free_options(ctrl->ctrl.opts); + kfree(ctrl); +- module_put(lport->ops->module); + } + + static void +@@ -2768,15 +2765,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, + goto out_fail; + } + +- if (!try_module_get(lport->ops->module)) { +- ret = -EUNATCH; +- goto out_free_ctrl; +- } +- + idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); + if (idx < 0) { + ret = -ENOSPC; +- goto out_mod_put; ++ goto out_free_ctrl; + } + + ctrl->ctrl.opts = opts; +@@ -2923,8 +2915,6 @@ out_free_queues: + out_free_ida: + put_device(ctrl->dev); + ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); +-out_mod_put: +- module_put(lport->ops->module); + out_free_ctrl: + kfree(ctrl); + out_fail: +diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c +index b8fe8702065b..096523d8dd42 100644 +--- a/drivers/nvme/target/fcloop.c ++++ b/drivers/nvme/target/fcloop.c +@@ -693,7 +693,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) + #define FCLOOP_DMABOUND_4G 0xFFFFFFFF + + static struct nvme_fc_port_template fctemplate = { +- .module = THIS_MODULE, + .localport_delete = fcloop_localport_delete, + .remoteport_delete = fcloop_remoteport_delete, + .create_queue = fcloop_create_queue, +diff --git a/drivers/of/base.c b/drivers/of/base.c +index 41b254be0295..c0281be8e061 100644 +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -169,9 +169,6 @@ int __of_attach_node_sysfs(struct device_node *np) + struct property *pp; + int rc; + +- if (!IS_ENABLED(CONFIG_SYSFS)) +- return 0; +- + if (!of_kset) + return 0; + +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c +index 4bf6a9db6ac0..55c98f119df2 100644 +--- a/drivers/of/unittest.c ++++ b/drivers/of/unittest.c +@@ -887,10 +887,13 @@ static void __init of_unittest_platform_populate(void) + + of_platform_populate(np, match, NULL, &test_bus->dev); + for_each_child_of_node(np, child) { +- for_each_child_of_node(child, grandchild) +- unittest(of_find_device_by_node(grandchild), ++ for_each_child_of_node(child, grandchild) { ++ pdev = of_find_device_by_node(grandchild); ++ unittest(pdev, + "Could not create device for node '%s'\n", + grandchild->name); ++ of_dev_put(pdev); ++ } + } + + of_platform_depopulate(&test_bus->dev); +diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c +index 83b7d5d3fc3e..60fbfe92e0ef 100644 +--- a/drivers/pci/endpoint/pci-epc-mem.c ++++ b/drivers/pci/endpoint/pci-epc-mem.c +@@ -90,6 +90,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size, + mem->page_size = page_size; + mem->pages = pages; + mem->size = size; ++ mutex_init(&mem->lock); + + epc->mem = mem; + +@@ -133,7 +134,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, + phys_addr_t *phys_addr, size_t size) + { + int pageno; +- void __iomem *virt_addr; ++ void __iomem *virt_addr = NULL; + struct pci_epc_mem *mem = epc->mem; + unsigned int page_shift = ilog2(mem->page_size); + int order; +@@ -141,15 +142,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, + size = ALIGN(size, mem->page_size); + order = pci_epc_mem_get_order(mem, size); + ++ mutex_lock(&mem->lock); + pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); + if (pageno < 0) +- return NULL; ++ goto ret; + + *phys_addr = mem->phys_base + (pageno << page_shift); + virt_addr = ioremap(*phys_addr, size); + if (!virt_addr) + bitmap_release_region(mem->bitmap, pageno, order); + ++ret: ++ mutex_unlock(&mem->lock); + return virt_addr; + } + EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); +@@ -175,7 +179,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, + pageno = (phys_addr - mem->phys_base) >> page_shift; + size = ALIGN(size, mem->page_size); + order = pci_epc_mem_get_order(mem, size); ++ mutex_lock(&mem->lock); + bitmap_release_region(mem->bitmap, pageno, order); ++ mutex_unlock(&mem->lock); + } + EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr); + +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c +index 6b4e82a4b64e..6f58767b5190 100644 +--- a/drivers/pci/pcie/aspm.c ++++ b/drivers/pci/pcie/aspm.c +@@ -693,9 +693,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) + + /* Enable what we need to enable */ + pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, +- PCI_L1SS_CAP_L1_PM_SS, val); ++ PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, +- PCI_L1SS_CAP_L1_PM_SS, val); ++ PCI_L1SS_CTL1_L1SS_MASK, val); + } + + static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) +diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c +index bf229b442e72..6ef0d4b756f0 100644 +--- a/drivers/pci/switch/switchtec.c ++++ b/drivers/pci/switch/switchtec.c +@@ -412,7 +412,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser) + kref_get(&stuser->kref); + stuser->read_len = sizeof(stuser->data); + stuser_set_state(stuser, MRPC_QUEUED); +- init_completion(&stuser->comp); ++ reinit_completion(&stuser->comp); + list_add_tail(&stuser->list, &stdev->mrpc_queue); + + mrpc_cmd_submit(stdev); +diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c +index 51f0961ecf3e..a7d8cadf172c 100644 +--- a/drivers/power/supply/bq27xxx_battery.c ++++ b/drivers/power/supply/bq27xxx_battery.c +@@ -1842,7 +1842,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) + + di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg); + if (IS_ERR(di->bat)) { +- dev_err(di->dev, "failed to register battery\n"); ++ if (PTR_ERR(di->bat) == -EPROBE_DEFER) ++ dev_dbg(di->dev, "failed to register battery, deferring probe\n"); ++ else ++ dev_err(di->dev, "failed to register battery\n"); + return PTR_ERR(di->bat); + } + +diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c +index e1e5dfcb16f3..259fd58812ae 100644 +--- a/drivers/pwm/pwm-pca9685.c ++++ b/drivers/pwm/pwm-pca9685.c +@@ -31,6 +31,7 @@ + #include <linux/slab.h> + #include <linux/delay.h> + #include <linux/pm_runtime.h> ++#include <linux/bitmap.h> + + /* + * Because the PCA9685 has only one prescaler per chip, changing the period of +@@ -85,6 +86,7 @@ struct pca9685 { + #if IS_ENABLED(CONFIG_GPIOLIB) + struct mutex lock; + struct gpio_chip gpio; ++ DECLARE_BITMAP(pwms_inuse, PCA9685_MAXCHAN + 1); + #endif + }; + +@@ -94,51 +96,51 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip) + } + + #if IS_ENABLED(CONFIG_GPIOLIB) +-static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset) ++static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx) + { +- struct pca9685 *pca = gpiochip_get_data(gpio); +- struct pwm_device *pwm; ++ bool is_inuse; + + mutex_lock(&pca->lock); +- +- pwm = &pca->chip.pwms[offset]; +- +- if (pwm->flags & (PWMF_REQUESTED | PWMF_EXPORTED)) { +- mutex_unlock(&pca->lock); +- return -EBUSY; ++ if (pwm_idx >= PCA9685_MAXCHAN) { ++ /* ++ * "all LEDs" channel: ++ * pretend already in use if any of the PWMs are requested ++ */ ++ if (!bitmap_empty(pca->pwms_inuse, PCA9685_MAXCHAN)) { ++ is_inuse = true; ++ goto out; ++ } ++ } else { ++ /* ++ * regular channel: ++ * pretend already in use if the "all LEDs" channel is requested ++ */ ++ if (test_bit(PCA9685_MAXCHAN, pca->pwms_inuse)) { ++ is_inuse = true; ++ goto out; ++ } + } +- +- pwm_set_chip_data(pwm, (void *)1); +- ++ is_inuse = test_and_set_bit(pwm_idx, pca->pwms_inuse); ++out: + mutex_unlock(&pca->lock); +- pm_runtime_get_sync(pca->chip.dev); +- return 0; ++ return is_inuse; + } + +-static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm) ++static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx) + { +- bool is_gpio = false; +- + mutex_lock(&pca->lock); ++ clear_bit(pwm_idx, pca->pwms_inuse); ++ mutex_unlock(&pca->lock); ++} + +- if (pwm->hwpwm >= PCA9685_MAXCHAN) { +- unsigned int i; +- +- /* +- * Check if any of the GPIOs are requested and in that case +- * prevent using the "all LEDs" channel. +- */ +- for (i = 0; i < pca->gpio.ngpio; i++) +- if (gpiochip_is_requested(&pca->gpio, i)) { +- is_gpio = true; +- break; +- } +- } else if (pwm_get_chip_data(pwm)) { +- is_gpio = true; +- } ++static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset) ++{ ++ struct pca9685 *pca = gpiochip_get_data(gpio); + +- mutex_unlock(&pca->lock); +- return is_gpio; ++ if (pca9685_pwm_test_and_set_inuse(pca, offset)) ++ return -EBUSY; ++ pm_runtime_get_sync(pca->chip.dev); ++ return 0; + } + + static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset) +@@ -173,6 +175,7 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset) + + pca9685_pwm_gpio_set(gpio, offset, 0); + pm_runtime_put(pca->chip.dev); ++ pca9685_pwm_clear_inuse(pca, offset); + } + + static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip, +@@ -224,12 +227,17 @@ static int pca9685_pwm_gpio_probe(struct pca9685 *pca) + return devm_gpiochip_add_data(dev, &pca->gpio, pca); + } + #else +-static inline bool pca9685_pwm_is_gpio(struct pca9685 *pca, +- struct pwm_device *pwm) ++static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, ++ int pwm_idx) + { + return false; + } + ++static inline void ++pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx) ++{ ++} ++ + static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca) + { + return 0; +@@ -413,7 +421,7 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) + { + struct pca9685 *pca = to_pca(chip); + +- if (pca9685_pwm_is_gpio(pca, pwm)) ++ if (pca9685_pwm_test_and_set_inuse(pca, pwm->hwpwm)) + return -EBUSY; + pm_runtime_get_sync(chip->dev); + +@@ -422,8 +430,11 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) + + static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) + { ++ struct pca9685 *pca = to_pca(chip); ++ + pca9685_pwm_disable(chip, pwm); + pm_runtime_put(chip->dev); ++ pca9685_pwm_clear_inuse(pca, pwm->hwpwm); + } + + static const struct pwm_ops pca9685_pwm_ops = { +diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c +index 114481c9fba1..7802663efe33 100644 +--- a/drivers/rpmsg/qcom_glink_native.c ++++ b/drivers/rpmsg/qcom_glink_native.c +@@ -221,6 +221,7 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, + /* Setup glink internal glink_channel data */ + spin_lock_init(&channel->recv_lock); + spin_lock_init(&channel->intent_lock); ++ mutex_init(&channel->intent_req_lock); + + channel->glink = glink; + channel->name = kstrdup(name, GFP_KERNEL); +diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c +index 2b54e71886d9..ed616b0bc563 100644 +--- a/drivers/rpmsg/qcom_glink_smem.c ++++ b/drivers/rpmsg/qcom_glink_smem.c +@@ -181,6 +181,9 @@ static void glink_smem_tx_write(struct qcom_glink_pipe *glink_pipe, + if (head >= pipe->native.length) + head -= pipe->native.length; + ++ /* Ensure ordering of fifo and head update */ ++ wmb(); ++ + *pipe->head = cpu_to_le32(head); + } + +@@ -212,6 +215,7 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent, + ret = device_register(dev); + if (ret) { + pr_err("failed to register glink edge\n"); ++ put_device(dev); + return ERR_PTR(ret); + } + +@@ -294,7 +298,7 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent, + return glink; + + err_put_dev: +- put_device(dev); ++ device_unregister(dev); + + return ERR_PTR(ret); + } +diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c +index 7d3e5168fcef..efbbde7379f1 100644 +--- a/drivers/rtc/rtc-88pm860x.c ++++ b/drivers/rtc/rtc-88pm860x.c +@@ -341,6 +341,10 @@ static int pm860x_rtc_probe(struct platform_device *pdev) + info->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, info); + ++ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev); ++ if (IS_ERR(info->rtc_dev)) ++ return PTR_ERR(info->rtc_dev); ++ + ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, + rtc_update_handler, IRQF_ONESHOT, "rtc", + info); +@@ -382,13 +386,11 @@ static int pm860x_rtc_probe(struct platform_device *pdev) + } + } + +- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc", +- &pm860x_rtc_ops, THIS_MODULE); +- ret = PTR_ERR(info->rtc_dev); +- if (IS_ERR(info->rtc_dev)) { +- dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); ++ info->rtc_dev->ops = &pm860x_rtc_ops; ++ ++ ret = rtc_register_device(info->rtc_dev); ++ if (ret) + return ret; +- } + + /* + * enable internal XO instead of internal 3.25MHz clock since it can +diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c +index ae6506a8b4f5..b25a2ba5ac48 100644 +--- a/drivers/rtc/rtc-omap.c ++++ b/drivers/rtc/rtc-omap.c +@@ -559,9 +559,7 @@ static const struct pinctrl_ops rtc_pinctrl_ops = { + .dt_free_map = pinconf_generic_dt_free_map, + }; + +-enum rtc_pin_config_param { +- PIN_CONFIG_ACTIVE_HIGH = PIN_CONFIG_END + 1, +-}; ++#define PIN_CONFIG_ACTIVE_HIGH (PIN_CONFIG_END + 1) + + static const struct pinconf_generic_params rtc_params[] = { + {"ti,active-high", PIN_CONFIG_ACTIVE_HIGH, 0}, +diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c +index a1b4b0ed1f19..3b619b7b2c53 100644 +--- a/drivers/rtc/rtc-pm8xxx.c ++++ b/drivers/rtc/rtc-pm8xxx.c +@@ -74,16 +74,18 @@ struct pm8xxx_rtc { + /* + * Steps to write the RTC registers. + * 1. Disable alarm if enabled. +- * 2. Write 0x00 to LSB. +- * 3. Write Byte[1], Byte[2], Byte[3] then Byte[0]. +- * 4. Enable alarm if disabled in step 1. ++ * 2. Disable rtc if enabled. ++ * 3. Write 0x00 to LSB. ++ * 4. Write Byte[1], Byte[2], Byte[3] then Byte[0]. ++ * 5. Enable rtc if disabled in step 2. ++ * 6. Enable alarm if disabled in step 1. + */ + static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm) + { + int rc, i; + unsigned long secs, irq_flags; +- u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0; +- unsigned int ctrl_reg; ++ u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, rtc_disabled = 0; ++ unsigned int ctrl_reg, rtc_ctrl_reg; + struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); + const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; + +@@ -92,23 +94,38 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm) + + rtc_tm_to_time(tm, &secs); + ++ dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs); ++ + for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) { + value[i] = secs & 0xFF; + secs >>= 8; + } + +- dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs); +- + spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); + +- rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg); ++ rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg); + if (rc) + goto rtc_rw_fail; + + if (ctrl_reg & regs->alarm_en) { + alarm_enabled = 1; + ctrl_reg &= ~regs->alarm_en; +- rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg); ++ rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg); ++ if (rc) { ++ dev_err(dev, "Write to RTC Alarm control register failed\n"); ++ goto rtc_rw_fail; ++ } ++ } ++ ++ /* Disable RTC H/w before writing on RTC register */ ++ rc = regmap_read(rtc_dd->regmap, regs->ctrl, &rtc_ctrl_reg); ++ if (rc) ++ goto rtc_rw_fail; ++ ++ if (rtc_ctrl_reg & PM8xxx_RTC_ENABLE) { ++ rtc_disabled = 1; ++ rtc_ctrl_reg &= ~PM8xxx_RTC_ENABLE; ++ rc = regmap_write(rtc_dd->regmap, regs->ctrl, rtc_ctrl_reg); + if (rc) { + dev_err(dev, "Write to RTC control register failed\n"); + goto rtc_rw_fail; +@@ -137,11 +154,21 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm) + goto rtc_rw_fail; + } + ++ /* Enable RTC H/w after writing on RTC register */ ++ if (rtc_disabled) { ++ rtc_ctrl_reg |= PM8xxx_RTC_ENABLE; ++ rc = regmap_write(rtc_dd->regmap, regs->ctrl, rtc_ctrl_reg); ++ if (rc) { ++ dev_err(dev, "Write to RTC control register failed\n"); ++ goto rtc_rw_fail; ++ } ++ } ++ + if (alarm_enabled) { + ctrl_reg |= regs->alarm_en; +- rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg); ++ rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg); + if (rc) { +- dev_err(dev, "Write to RTC control register failed\n"); ++ dev_err(dev, "Write to RTC Alarm control register failed\n"); + goto rtc_rw_fail; + } + } +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c +index 8f90e4cea254..168f7c84edba 100644 +--- a/drivers/s390/scsi/zfcp_erp.c ++++ b/drivers/s390/scsi/zfcp_erp.c +@@ -747,7 +747,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) + adapter->peer_d_id); + if (IS_ERR(port)) /* error or port already attached */ + return; +- _zfcp_erp_port_reopen(port, 0, "ereptp1"); ++ zfcp_erp_port_reopen(port, 0, "ereptp1"); + } + + static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) +diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c +index af937b91765e..fcf4b4175d77 100644 +--- a/drivers/scsi/lpfc/lpfc_nvme.c ++++ b/drivers/scsi/lpfc/lpfc_nvme.c +@@ -1591,8 +1591,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, + + /* Declare and initialization an instance of the FC NVME template. */ + static struct nvme_fc_port_template lpfc_nvme_template = { +- .module = THIS_MODULE, +- + /* initiator-based functions */ + .localport_delete = lpfc_nvme_localport_delete, + .remoteport_delete = lpfc_nvme_remoteport_delete, +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index 9ef0c6265cd2..400c055167b0 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -8280,8 +8280,8 @@ static void scsih_remove(struct pci_dev *pdev) + + ioc->remove_host = 1; + +- mpt3sas_wait_for_commands_to_complete(ioc); +- _scsih_flush_running_cmds(ioc); ++ if (!pci_device_is_present(pdev)) ++ _scsih_flush_running_cmds(ioc); + + _scsih_fw_event_cleanup_queue(ioc); + +@@ -8354,8 +8354,8 @@ scsih_shutdown(struct pci_dev *pdev) + + ioc->remove_host = 1; + +- mpt3sas_wait_for_commands_to_complete(ioc); +- _scsih_flush_running_cmds(ioc); ++ if (!pci_device_is_present(pdev)) ++ _scsih_flush_running_cmds(ioc); + + _scsih_fw_event_cleanup_queue(ioc); + +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c +index 7dceed021236..6b33a1f24f56 100644 +--- a/drivers/scsi/qla2xxx/qla_nvme.c ++++ b/drivers/scsi/qla2xxx/qla_nvme.c +@@ -578,7 +578,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) + } + + static struct nvme_fc_port_template qla_nvme_fc_transport = { +- .module = THIS_MODULE, + .localport_delete = qla_nvme_localport_delete, + .remoteport_delete = qla_nvme_remoteport_delete, + .create_queue = qla_nvme_alloc_queue, +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 3a406b40f150..b5f589b7b43d 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -809,8 +809,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, + "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", + (int) cmnd[0], (int) hp->cmd_len)); + +- if (hp->dxfer_len >= SZ_256M) ++ if (hp->dxfer_len >= SZ_256M) { ++ sg_remove_request(sfp, srp); + return -EINVAL; ++ } + + k = sg_start_req(srp, cmnd); + if (k) { +diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c +index c87d770b519a..f2b8de195d8a 100644 +--- a/drivers/scsi/ufs/ufs-qcom.c ++++ b/drivers/scsi/ufs/ufs-qcom.c +@@ -1094,7 +1094,7 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) + hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; + } + +- if (host->hw_ver.major >= 0x2) { ++ if (host->hw_ver.major == 0x2) { + hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; + + if (!ufs_qcom_cap_qunipro(host)) +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index c35045324695..78d9c2c48236 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -1448,6 +1448,11 @@ start: + */ + if (ufshcd_can_hibern8_during_gating(hba) && + ufshcd_is_link_hibern8(hba)) { ++ if (async) { ++ rc = -EAGAIN; ++ hba->clk_gating.active_reqs--; ++ break; ++ } + spin_unlock_irqrestore(hba->host->host_lock, flags); + flush_work(&hba->clk_gating.ungate_work); + spin_lock_irqsave(hba->host->host_lock, flags); +@@ -5366,19 +5371,30 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) + u32 intr_status, enabled_intr_status; + irqreturn_t retval = IRQ_NONE; + struct ufs_hba *hba = __hba; ++ int retries = hba->nutrs; + + spin_lock(hba->host->host_lock); + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); +- enabled_intr_status = +- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + +- if (intr_status) +- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); ++ /* ++ * There could be max of hba->nutrs reqs in flight and in worst case ++ * if the reqs get finished 1 by 1 after the interrupt status is ++ * read, make sure we handle them by checking the interrupt status ++ * again in a loop until we process all of the reqs before returning. ++ */ ++ do { ++ enabled_intr_status = ++ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); ++ if (intr_status) ++ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); ++ if (enabled_intr_status) { ++ ufshcd_sl_intr(hba, enabled_intr_status); ++ retval = IRQ_HANDLED; ++ } ++ ++ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); ++ } while (intr_status && --retries); + +- if (enabled_intr_status) { +- ufshcd_sl_intr(hba, enabled_intr_status); +- retval = IRQ_HANDLED; +- } + spin_unlock(hba->host->host_lock); + return retval; + } +diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c +index 3a12123de466..0e083fe8b893 100644 +--- a/drivers/soc/imx/gpc.c ++++ b/drivers/soc/imx/gpc.c +@@ -97,8 +97,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd) + static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) + { + struct imx_pm_domain *pd = to_imx_pm_domain(genpd); +- int i, ret, sw, sw2iso; +- u32 val; ++ int i, ret; ++ u32 val, req; + + if (pd->supply) { + ret = regulator_enable(pd->supply); +@@ -117,17 +117,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) + regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS, + 0x1, 0x1); + +- /* Read ISO and ISO2SW power up delays */ +- regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val); +- sw = val & 0x3f; +- sw2iso = (val >> 8) & 0x3f; +- + /* Request GPC to power up domain */ +- val = BIT(pd->cntr_pdn_bit + 1); +- regmap_update_bits(pd->regmap, GPC_CNTR, val, val); ++ req = BIT(pd->cntr_pdn_bit + 1); ++ regmap_update_bits(pd->regmap, GPC_CNTR, req, req); + +- /* Wait ISO + ISO2SW IPG clock cycles */ +- udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz)); ++ /* Wait for the PGC to handle the request */ ++ ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req), ++ 1, 50); ++ if (ret) ++ pr_err("powerup request on domain %s timed out\n", genpd->name); ++ ++ /* Wait for reset to propagate through peripherals */ ++ usleep_range(5, 10); + + /* Disable reset clocks for all devices in the domain */ + for (i = 0; i < pd->num_clks; i++) +@@ -329,6 +330,7 @@ static const struct regmap_config imx_gpc_regmap_config = { + .rd_table = &access_table, + .wr_table = &access_table, + .max_register = 0x2ac, ++ .fast_io = true, + }; + + static struct generic_pm_domain *imx_gpc_onecell_domains[] = { +diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c +index 18ec52f2078a..89dd50fa404f 100644 +--- a/drivers/soc/qcom/smem.c ++++ b/drivers/soc/qcom/smem.c +@@ -646,7 +646,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, + return -EINVAL; + } + +- if (header->size != entry->size) { ++ if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) { + dev_err(smem->dev, + "Partition %d has invalid size\n", i); + return -EINVAL; +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index fb7bd422e2e1..ee49b227dc12 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4314,30 +4314,37 @@ int iscsit_close_connection( + if (!atomic_read(&sess->session_reinstatement) && + atomic_read(&sess->session_fall_back_to_erl0)) { + spin_unlock_bh(&sess->conn_lock); ++ complete_all(&sess->session_wait_comp); + iscsit_close_session(sess); + + return 0; + } else if (atomic_read(&sess->session_logout)) { + pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); + sess->session_state = TARG_SESS_STATE_FREE; +- spin_unlock_bh(&sess->conn_lock); + +- if (atomic_read(&sess->sleep_on_sess_wait_comp)) +- complete(&sess->session_wait_comp); ++ if (atomic_read(&sess->session_close)) { ++ spin_unlock_bh(&sess->conn_lock); ++ complete_all(&sess->session_wait_comp); ++ iscsit_close_session(sess); ++ } else { ++ spin_unlock_bh(&sess->conn_lock); ++ } + + return 0; + } else { + pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); + sess->session_state = TARG_SESS_STATE_FAILED; + +- if (!atomic_read(&sess->session_continuation)) { +- spin_unlock_bh(&sess->conn_lock); ++ if (!atomic_read(&sess->session_continuation)) + iscsit_start_time2retain_handler(sess); +- } else +- spin_unlock_bh(&sess->conn_lock); + +- if (atomic_read(&sess->sleep_on_sess_wait_comp)) +- complete(&sess->session_wait_comp); ++ if (atomic_read(&sess->session_close)) { ++ spin_unlock_bh(&sess->conn_lock); ++ complete_all(&sess->session_wait_comp); ++ iscsit_close_session(sess); ++ } else { ++ spin_unlock_bh(&sess->conn_lock); ++ } + + return 0; + } +@@ -4446,9 +4453,9 @@ static void iscsit_logout_post_handler_closesession( + complete(&conn->conn_logout_comp); + + iscsit_dec_conn_usage_count(conn); ++ atomic_set(&sess->session_close, 1); + iscsit_stop_session(sess, sleep, sleep); + iscsit_dec_session_usage_count(sess); +- iscsit_close_session(sess); + } + + static void iscsit_logout_post_handler_samecid( +@@ -4583,49 +4590,6 @@ void iscsit_fail_session(struct iscsi_session *sess) + sess->session_state = TARG_SESS_STATE_FAILED; + } + +-int iscsit_free_session(struct iscsi_session *sess) +-{ +- u16 conn_count = atomic_read(&sess->nconn); +- struct iscsi_conn *conn, *conn_tmp = NULL; +- int is_last; +- +- spin_lock_bh(&sess->conn_lock); +- atomic_set(&sess->sleep_on_sess_wait_comp, 1); +- +- list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, +- conn_list) { +- if (conn_count == 0) +- break; +- +- if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { +- is_last = 1; +- } else { +- iscsit_inc_conn_usage_count(conn_tmp); +- is_last = 0; +- } +- iscsit_inc_conn_usage_count(conn); +- +- spin_unlock_bh(&sess->conn_lock); +- iscsit_cause_connection_reinstatement(conn, 1); +- spin_lock_bh(&sess->conn_lock); +- +- iscsit_dec_conn_usage_count(conn); +- if (is_last == 0) +- iscsit_dec_conn_usage_count(conn_tmp); +- +- conn_count--; +- } +- +- if (atomic_read(&sess->nconn)) { +- spin_unlock_bh(&sess->conn_lock); +- wait_for_completion(&sess->session_wait_comp); +- } else +- spin_unlock_bh(&sess->conn_lock); +- +- iscsit_close_session(sess); +- return 0; +-} +- + void iscsit_stop_session( + struct iscsi_session *sess, + int session_sleep, +@@ -4636,8 +4600,6 @@ void iscsit_stop_session( + int is_last; + + spin_lock_bh(&sess->conn_lock); +- if (session_sleep) +- atomic_set(&sess->sleep_on_sess_wait_comp, 1); + + if (connection_sleep) { + list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, +@@ -4695,12 +4657,15 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) + spin_lock(&sess->conn_lock); + if (atomic_read(&sess->session_fall_back_to_erl0) || + atomic_read(&sess->session_logout) || ++ atomic_read(&sess->session_close) || + (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { + spin_unlock(&sess->conn_lock); + continue; + } ++ iscsit_inc_session_usage_count(sess); + atomic_set(&sess->session_reinstatement, 1); + atomic_set(&sess->session_fall_back_to_erl0, 1); ++ atomic_set(&sess->session_close, 1); + spin_unlock(&sess->conn_lock); + + list_move_tail(&se_sess->sess_list, &free_list); +@@ -4710,7 +4675,9 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) + list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { + sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + +- iscsit_free_session(sess); ++ list_del_init(&se_sess->sess_list); ++ iscsit_stop_session(sess, 1, 1); ++ iscsit_dec_session_usage_count(sess); + session_count++; + } + +diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h +index 42de1843aa40..f0d2cbf594c9 100644 +--- a/drivers/target/iscsi/iscsi_target.h ++++ b/drivers/target/iscsi/iscsi_target.h +@@ -43,7 +43,6 @@ extern int iscsi_target_rx_thread(void *); + extern int iscsit_close_connection(struct iscsi_conn *); + extern int iscsit_close_session(struct iscsi_session *); + extern void iscsit_fail_session(struct iscsi_session *); +-extern int iscsit_free_session(struct iscsi_session *); + extern void iscsit_stop_session(struct iscsi_session *, int, int); + extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int); + +diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c +index 0ebc4818e132..4191e4a8a9ed 100644 +--- a/drivers/target/iscsi/iscsi_target_configfs.c ++++ b/drivers/target/iscsi/iscsi_target_configfs.c +@@ -1503,20 +1503,23 @@ static void lio_tpg_close_session(struct se_session *se_sess) + spin_lock(&sess->conn_lock); + if (atomic_read(&sess->session_fall_back_to_erl0) || + atomic_read(&sess->session_logout) || ++ atomic_read(&sess->session_close) || + (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { + spin_unlock(&sess->conn_lock); + spin_unlock_bh(&se_tpg->session_lock); + return; + } ++ iscsit_inc_session_usage_count(sess); + atomic_set(&sess->session_reinstatement, 1); + atomic_set(&sess->session_fall_back_to_erl0, 1); ++ atomic_set(&sess->session_close, 1); + spin_unlock(&sess->conn_lock); + + iscsit_stop_time2retain_timer(sess); + spin_unlock_bh(&se_tpg->session_lock); + + iscsit_stop_session(sess, 1, 1); +- iscsit_close_session(sess); ++ iscsit_dec_session_usage_count(sess); + } + + static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg) +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index 27893d90c4ef..55df6f99e669 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -199,6 +199,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) + spin_lock(&sess_p->conn_lock); + if (atomic_read(&sess_p->session_fall_back_to_erl0) || + atomic_read(&sess_p->session_logout) || ++ atomic_read(&sess_p->session_close) || + (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { + spin_unlock(&sess_p->conn_lock); + continue; +@@ -209,6 +210,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) + (sess_p->sess_ops->SessionType == sessiontype))) { + atomic_set(&sess_p->session_reinstatement, 1); + atomic_set(&sess_p->session_fall_back_to_erl0, 1); ++ atomic_set(&sess_p->session_close, 1); + spin_unlock(&sess_p->conn_lock); + iscsit_inc_session_usage_count(sess_p); + iscsit_stop_time2retain_timer(sess_p); +@@ -233,7 +235,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) + if (sess->session_state == TARG_SESS_STATE_FAILED) { + spin_unlock_bh(&sess->conn_lock); + iscsit_dec_session_usage_count(sess); +- iscsit_close_session(sess); + return 0; + } + spin_unlock_bh(&sess->conn_lock); +@@ -241,7 +242,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) + iscsit_stop_session(sess, 1, 1); + iscsit_dec_session_usage_count(sess); + +- iscsit_close_session(sess); + return 0; + } + +@@ -534,6 +534,7 @@ static int iscsi_login_non_zero_tsih_s2( + sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr; + if (atomic_read(&sess_p->session_fall_back_to_erl0) || + atomic_read(&sess_p->session_logout) || ++ atomic_read(&sess_p->session_close) || + (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) + continue; + if (!memcmp(sess_p->isid, pdu->isid, 6) && +diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c +index a1c7125cb968..5a348efb91ad 100644 +--- a/drivers/tty/ehv_bytechan.c ++++ b/drivers/tty/ehv_bytechan.c +@@ -139,6 +139,21 @@ static int find_console_handle(void) + return 1; + } + ++static unsigned int local_ev_byte_channel_send(unsigned int handle, ++ unsigned int *count, ++ const char *p) ++{ ++ char buffer[EV_BYTE_CHANNEL_MAX_BYTES]; ++ unsigned int c = *count; ++ ++ if (c < sizeof(buffer)) { ++ memcpy(buffer, p, c); ++ memset(&buffer[c], 0, sizeof(buffer) - c); ++ p = buffer; ++ } ++ return ev_byte_channel_send(handle, count, p); ++} ++ + /*************************** EARLY CONSOLE DRIVER ***************************/ + + #ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC +@@ -157,7 +172,7 @@ static void byte_channel_spin_send(const char data) + + do { + count = 1; +- ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, ++ ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, + &count, &data); + } while (ret == EV_EAGAIN); + } +@@ -224,7 +239,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s, + while (count) { + len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES); + do { +- ret = ev_byte_channel_send(handle, &len, s); ++ ret = local_ev_byte_channel_send(handle, &len, s); + } while (ret == EV_EAGAIN); + count -= len; + s += len; +@@ -404,7 +419,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc) + CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE), + EV_BYTE_CHANNEL_MAX_BYTES); + +- ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); ++ ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); + + /* 'len' is valid only if the return code is 0 or EV_EAGAIN */ + if (!ret || (ret == EV_EAGAIN)) +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 021899c58028..010201dbd029 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -867,6 +867,9 @@ static int dwc3_core_init(struct dwc3 *dwc) + if (dwc->dis_tx_ipgap_linecheck_quirk) + reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; + ++ if (dwc->parkmode_disable_ss_quirk) ++ reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; ++ + dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); + } + +@@ -1107,6 +1110,8 @@ static void dwc3_get_properties(struct dwc3 *dwc) + "snps,dis-del-phy-power-chg-quirk"); + dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, + "snps,dis-tx-ipgap-linecheck-quirk"); ++ dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, ++ "snps,parkmode-disable-ss-quirk"); + + dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, + "snps,tx_de_emphasis_quirk"); +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h +index 40bf0e0768d9..8747f9f02229 100644 +--- a/drivers/usb/dwc3/core.h ++++ b/drivers/usb/dwc3/core.h +@@ -206,6 +206,7 @@ + #define DWC3_GCTL_DSBLCLKGTNG BIT(0) + + /* Global User Control 1 Register */ ++#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17) + #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28) + #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24) + +@@ -863,6 +864,8 @@ struct dwc3_scratchpad_array { + * change quirk. + * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate + * check during HS transmit. ++ * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed ++ * instances in park mode. + * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk + * @tx_de_emphasis: Tx de-emphasis value + * 0 - -6dB de-emphasis +@@ -1022,6 +1025,7 @@ struct dwc3 { + unsigned dis_u2_freeclk_exists_quirk:1; + unsigned dis_del_phy_power_chg_quirk:1; + unsigned dis_tx_ipgap_linecheck_quirk:1; ++ unsigned parkmode_disable_ss_quirk:1; + + unsigned tx_de_emphasis_quirk:1; + unsigned tx_de_emphasis:2; +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index 5a4cf779b269..c1f037af9702 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -850,6 +850,11 @@ static int set_config(struct usb_composite_dev *cdev, + else + power = min(power, 900U); + done: ++ if (power <= USB_SELF_POWER_VBUS_MAX_DRAW) ++ usb_gadget_set_selfpowered(gadget); ++ else ++ usb_gadget_clear_selfpowered(gadget); ++ + usb_gadget_vbus_draw(gadget, power); + if (result >= 0 && cdev->delayed_status) + result = USB_GADGET_DELAYED_STATUS; +@@ -2281,6 +2286,7 @@ void composite_suspend(struct usb_gadget *gadget) + + cdev->suspended = 1; + ++ usb_gadget_set_selfpowered(gadget); + usb_gadget_vbus_draw(gadget, 2); + } + +@@ -2309,6 +2315,9 @@ void composite_resume(struct usb_gadget *gadget) + else + maxpower = min(maxpower, 900U); + ++ if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW) ++ usb_gadget_clear_selfpowered(gadget); ++ + usb_gadget_vbus_draw(gadget, maxpower); + } + +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c +index 282396e8eec6..819fd77a2da4 100644 +--- a/drivers/usb/gadget/function/f_fs.c ++++ b/drivers/usb/gadget/function/f_fs.c +@@ -1037,6 +1037,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) + + ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); + if (unlikely(ret)) { ++ io_data->req = NULL; + usb_ep_free_request(ep->ep, req); + goto error_lock; + } +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index 83961a22bef1..07dcf687a52b 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -1134,7 +1134,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, + case FBIOGET_FSCREENINFO: + if (!lock_fb_info(info)) + return -ENODEV; +- fix = info->fix; ++ memcpy(&fix, &info->fix, sizeof(fix)); + unlock_fb_info(info); + + ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0; +diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c +index 1ec9c3e0e1d8..f23a381442d3 100644 +--- a/drivers/video/fbdev/sis/init301.c ++++ b/drivers/video/fbdev/sis/init301.c +@@ -522,9 +522,7 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) + SiS_DDC2Delay(SiS_Pr, 0x4000); + } + +- } else if((SiS_Pr->SiS_IF_DEF_LVDS == 1) /* || +- (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || +- (SiS_Pr->SiS_CustomT == CUT_CLEVO1400) */ ) { /* 315 series, LVDS; Special */ ++ } else if (SiS_Pr->SiS_IF_DEF_LVDS == 1) { /* 315 series, LVDS; Special */ + + if(SiS_Pr->SiS_IF_DEF_CH70xx == 0) { + PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); +diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c +index 72d7589072f5..92615badc173 100644 +--- a/fs/btrfs/async-thread.c ++++ b/fs/btrfs/async-thread.c +@@ -447,3 +447,11 @@ void btrfs_set_work_high_priority(struct btrfs_work *work) + { + set_bit(WORK_HIGH_PRIO_BIT, &work->flags); + } ++ ++void btrfs_flush_workqueue(struct btrfs_workqueue *wq) ++{ ++ if (wq->high) ++ flush_workqueue(wq->high->normal_wq); ++ ++ flush_workqueue(wq->normal->normal_wq); ++} +diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h +index fc957e00cef1..2a25aef6ef2a 100644 +--- a/fs/btrfs/async-thread.h ++++ b/fs/btrfs/async-thread.h +@@ -85,4 +85,6 @@ void btrfs_set_work_high_priority(struct btrfs_work *work); + struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work); + struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); + bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); ++void btrfs_flush_workqueue(struct btrfs_workqueue *wq); ++ + #endif +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index 87414fc9e268..416fb50a5378 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -18,6 +18,7 @@ + */ + + #include <linux/slab.h> ++#include <linux/sched/mm.h> + #include "delayed-inode.h" + #include "disk-io.h" + #include "transaction.h" +@@ -833,11 +834,14 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, + { + struct btrfs_fs_info *fs_info = root->fs_info; + struct extent_buffer *leaf; ++ unsigned int nofs_flag; + char *ptr; + int ret; + ++ nofs_flag = memalloc_nofs_save(); + ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, + delayed_item->data_len); ++ memalloc_nofs_restore(nofs_flag); + if (ret < 0 && ret != -EEXIST) + return ret; + +@@ -966,6 +970,7 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_delayed_node *node) + { + struct btrfs_delayed_item *curr, *prev; ++ unsigned int nofs_flag; + int ret = 0; + + do_again: +@@ -974,7 +979,9 @@ do_again: + if (!curr) + goto delete_fail; + ++ nofs_flag = memalloc_nofs_save(); + ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); ++ memalloc_nofs_restore(nofs_flag); + if (ret < 0) + goto delete_fail; + else if (ret > 0) { +@@ -1041,6 +1048,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, + struct btrfs_key key; + struct btrfs_inode_item *inode_item; + struct extent_buffer *leaf; ++ unsigned int nofs_flag; + int mod; + int ret; + +@@ -1053,7 +1061,9 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, + else + mod = 1; + ++ nofs_flag = memalloc_nofs_save(); + ret = btrfs_lookup_inode(trans, root, path, &key, mod); ++ memalloc_nofs_restore(nofs_flag); + if (ret > 0) { + btrfs_release_path(path); + return -ENOENT; +@@ -1104,7 +1114,10 @@ search: + + key.type = BTRFS_INODE_EXTREF_KEY; + key.offset = -1; ++ ++ nofs_flag = memalloc_nofs_save(); + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); ++ memalloc_nofs_restore(nofs_flag); + if (ret < 0) + goto err_out; + ASSERT(ret); +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 6b4fee5c79f9..096c015b22a4 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -3725,6 +3725,19 @@ void close_ctree(struct btrfs_fs_info *fs_info) + */ + btrfs_delete_unused_bgs(fs_info); + ++ /* ++ * There might be existing delayed inode workers still running ++ * and holding an empty delayed inode item. We must wait for ++ * them to complete first because they can create a transaction. ++ * This happens when someone calls btrfs_balance_delayed_items() ++ * and then a transaction commit runs the same delayed nodes ++ * before any delayed worker has done something with the nodes. ++ * We must wait for any worker here and not at transaction ++ * commit time since that could cause a deadlock. ++ * This is a very rare case. ++ */ ++ btrfs_flush_workqueue(fs_info->delayed_workers); ++ + ret = btrfs_commit_super(fs_info); + if (ret) + btrfs_err(fs_info, "commit super ret %d", ret); +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index d4c00edd16d2..f4397dd19583 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -538,8 +538,8 @@ static int should_ignore_root(struct btrfs_root *root) + if (!reloc_root) + return 0; + +- if (btrfs_root_last_snapshot(&reloc_root->root_item) == +- root->fs_info->running_transaction->transid - 1) ++ if (btrfs_header_generation(reloc_root->commit_root) == ++ root->fs_info->running_transaction->transid) + return 0; + /* + * if there is reloc tree and it was created in previous +@@ -1194,7 +1194,7 @@ out: + free_backref_node(cache, lower); + } + +- free_backref_node(cache, node); ++ remove_backref_node(cache, node); + return ERR_PTR(err); + } + ASSERT(!node || !node->detached); +@@ -1306,7 +1306,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root) + if (!node) + return -ENOMEM; + +- node->bytenr = root->node->start; ++ node->bytenr = root->commit_root->start; + node->data = root; + + spin_lock(&rc->reloc_root_tree.lock); +@@ -1337,10 +1337,11 @@ static void __del_reloc_root(struct btrfs_root *root) + if (rc && root->node) { + spin_lock(&rc->reloc_root_tree.lock); + rb_node = tree_search(&rc->reloc_root_tree.rb_root, +- root->node->start); ++ root->commit_root->start); + if (rb_node) { + node = rb_entry(rb_node, struct mapping_node, rb_node); + rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); ++ RB_CLEAR_NODE(&node->rb_node); + } + spin_unlock(&rc->reloc_root_tree.lock); + if (!node) +@@ -1358,7 +1359,7 @@ static void __del_reloc_root(struct btrfs_root *root) + * helper to update the 'address of tree root -> reloc tree' + * mapping + */ +-static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) ++static int __update_reloc_root(struct btrfs_root *root) + { + struct btrfs_fs_info *fs_info = root->fs_info; + struct rb_node *rb_node; +@@ -1367,7 +1368,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) + + spin_lock(&rc->reloc_root_tree.lock); + rb_node = tree_search(&rc->reloc_root_tree.rb_root, +- root->node->start); ++ root->commit_root->start); + if (rb_node) { + node = rb_entry(rb_node, struct mapping_node, rb_node); + rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); +@@ -1379,7 +1380,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) + BUG_ON((struct btrfs_root *)node->data != root); + + spin_lock(&rc->reloc_root_tree.lock); +- node->bytenr = new_bytenr; ++ node->bytenr = root->node->start; + rb_node = tree_insert(&rc->reloc_root_tree.rb_root, + node->bytenr, &node->rb_node); + spin_unlock(&rc->reloc_root_tree.lock); +@@ -1524,6 +1525,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, + } + + if (reloc_root->commit_root != reloc_root->node) { ++ __update_reloc_root(reloc_root); + btrfs_set_root_node(root_item, reloc_root->node); + free_extent_buffer(reloc_root->commit_root); + reloc_root->commit_root = btrfs_root_node(reloc_root); +@@ -2480,7 +2482,21 @@ out: + free_reloc_roots(&reloc_roots); + } + +- BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); ++ /* ++ * We used to have ++ * ++ * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); ++ * ++ * here, but it's wrong. If we fail to start the transaction in ++ * prepare_to_merge() we will have only 0 ref reloc roots, none of which ++ * have actually been removed from the reloc_root_tree rb tree. This is ++ * fine because we're bailing here, and we hold a reference on the root ++ * for the list that holds it, so these roots will be cleaned up when we ++ * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root ++ * will be cleaned up on unmount. ++ * ++ * The remaining nodes will be cleaned up by free_reloc_control. ++ */ + } + + static void free_block_list(struct rb_root *blocks) +@@ -4698,11 +4714,6 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, + BUG_ON(rc->stage == UPDATE_DATA_PTRS && + root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); + +- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { +- if (buf == root->node) +- __update_reloc_root(root, cow->start); +- } +- + level = btrfs_header_level(buf); + if (btrfs_header_generation(buf) <= + btrfs_root_last_snapshot(&root->root_item)) +diff --git a/fs/buffer.c b/fs/buffer.c +index bdca7b10e239..cae7f24a0410 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -1398,6 +1398,17 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) + } + EXPORT_SYMBOL(__breadahead); + ++void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size, ++ gfp_t gfp) ++{ ++ struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); ++ if (likely(bh)) { ++ ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); ++ brelse(bh); ++ } ++} ++EXPORT_SYMBOL(__breadahead_gfp); ++ + /** + * __bread_gfp() - reads a specified block and returns the bh + * @bdev: the block_device to read from +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 5e75c5f77f4c..662977b8d6ae 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -3303,7 +3303,7 @@ again: + if (rc == -ENODATA) + rc = 0; + +- ctx->rc = (rc == 0) ? ctx->total_len : rc; ++ ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc; + + mutex_unlock(&ctx->aio_mutex); + +diff --git a/fs/exec.c b/fs/exec.c +index 7def97f6aac2..f687e7d59beb 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -1373,7 +1373,7 @@ void setup_new_exec(struct linux_binprm * bprm) + + /* An exec changes our domain. We are no longer part of the thread + group */ +- current->self_exec_id++; ++ WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1); + flush_signal_handlers(current, 0); + } + EXPORT_SYMBOL(setup_new_exec); +diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c +index dd8f10db82e9..bd1d68ff3a9f 100644 +--- a/fs/ext2/xattr.c ++++ b/fs/ext2/xattr.c +@@ -56,6 +56,7 @@ + + #include <linux/buffer_head.h> + #include <linux/init.h> ++#include <linux/printk.h> + #include <linux/slab.h> + #include <linux/mbcache.h> + #include <linux/quotaops.h> +@@ -84,8 +85,8 @@ + printk("\n"); \ + } while (0) + #else +-# define ea_idebug(f...) +-# define ea_bdebug(f...) ++# define ea_idebug(inode, f...) no_printk(f) ++# define ea_bdebug(bh, f...) no_printk(f) + #endif + + static int ext2_xattr_set2(struct inode *, struct buffer_head *, +@@ -838,8 +839,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) + error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1); + if (error) { + if (error == -EBUSY) { +- ea_bdebug(bh, "already in cache (%d cache entries)", +- atomic_read(&ext2_xattr_cache->c_entry_count)); ++ ea_bdebug(bh, "already in cache"); + error = 0; + } + } else +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 20d68554680f..fa6ae9014e8f 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -3446,8 +3446,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, + (unsigned long long)map->m_lblk, map_len); + + sbi = EXT4_SB(inode->i_sb); +- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> +- inode->i_sb->s_blocksize_bits; ++ eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) ++ >> inode->i_sb->s_blocksize_bits; + if (eof_block < map->m_lblk + map_len) + eof_block = map->m_lblk + map_len; + +@@ -3702,8 +3702,8 @@ static int ext4_split_convert_extents(handle_t *handle, + __func__, inode->i_ino, + (unsigned long long)map->m_lblk, map->m_len); + +- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> +- inode->i_sb->s_blocksize_bits; ++ eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) ++ >> inode->i_sb->s_blocksize_bits; + if (eof_block < map->m_lblk + map->m_len) + eof_block = map->m_lblk + map->m_len; + /* +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 1e2edebd0929..5b0d5ca2c2b2 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -4593,7 +4593,7 @@ make_io: + if (end > table) + end = table; + while (b <= end) +- sb_breadahead(sb, b++); ++ sb_breadahead_unmovable(sb, b++); + } + + /* +@@ -4981,7 +4981,7 @@ static int ext4_inode_blocks_set(handle_t *handle, + struct ext4_inode_info *ei) + { + struct inode *inode = &(ei->vfs_inode); +- u64 i_blocks = inode->i_blocks; ++ u64 i_blocks = READ_ONCE(inode->i_blocks); + struct super_block *sb = inode->i_sb; + + if (i_blocks <= ~0U) { +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index f5646bcad770..0ced133a36ec 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -369,7 +369,8 @@ static void save_error_info(struct super_block *sb, const char *func, + unsigned int line) + { + __save_error_info(sb, func, line); +- ext4_commit_super(sb, 1); ++ if (!bdev_read_only(sb->s_bdev)) ++ ext4_commit_super(sb, 1); + } + + /* +@@ -3969,7 +3970,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || + sbi->s_inodes_per_group > blocksize * 8) { + ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", +- sbi->s_blocks_per_group); ++ sbi->s_inodes_per_group); + goto failed_mount; + } + sbi->s_itb_per_group = sbi->s_inodes_per_group / +@@ -4100,9 +4101,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + EXT4_BLOCKS_PER_GROUP(sb) - 1); + do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); + if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { +- ext4_msg(sb, KERN_WARNING, "groups count too large: %u " ++ ext4_msg(sb, KERN_WARNING, "groups count too large: %llu " + "(block count %llu, first data block %u, " +- "blocks per group %lu)", sbi->s_groups_count, ++ "blocks per group %lu)", blocks_count, + ext4_blocks_count(es), + le32_to_cpu(es->s_first_data_block), + EXT4_BLOCKS_PER_GROUP(sb)); +@@ -4145,7 +4146,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + /* Pre-read the descriptors into the buffer cache */ + for (i = 0; i < db_count; i++) { + block = descriptor_loc(sb, logical_sb_block, i); +- sb_breadahead(sb, block); ++ sb_breadahead_unmovable(sb, block); + } + + for (i = 0; i < db_count; i++) { +diff --git a/fs/filesystems.c b/fs/filesystems.c +index f2728a4a03a1..8fb7cda40997 100644 +--- a/fs/filesystems.c ++++ b/fs/filesystems.c +@@ -279,7 +279,9 @@ struct file_system_type *get_fs_type(const char *name) + fs = __get_fs_type(name, len); + if (!fs && (request_module("fs-%.*s", len, name) == 0)) { + fs = __get_fs_type(name, len); +- WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name); ++ if (!fs) ++ pr_warn_once("request_module fs-%.*s succeeded, but still no fs?\n", ++ len, name); + } + + if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index aea1ed0aebd0..1e2ff4b32c79 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -636,6 +636,9 @@ __acquires(&gl->gl_lockref.lock) + goto out_unlock; + if (nonblock) + goto out_sched; ++ smp_mb(); ++ if (atomic_read(&gl->gl_revokes) != 0) ++ goto out_sched; + set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); + GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); + gl->gl_target = gl->gl_demote_state; +diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c +index e6d554476db4..eeebe80c6be4 100644 +--- a/fs/hfsplus/attributes.c ++++ b/fs/hfsplus/attributes.c +@@ -292,6 +292,10 @@ static int __hfsplus_delete_attr(struct inode *inode, u32 cnid, + return -ENOENT; + } + ++ /* Avoid btree corruption */ ++ hfs_bnode_read(fd->bnode, fd->search_key, ++ fd->keyoffset, fd->keylength); ++ + err = hfs_brec_remove(fd); + if (err) + return err; +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c +index 1a4bd8d9636e..6870103a0f59 100644 +--- a/fs/jbd2/commit.c ++++ b/fs/jbd2/commit.c +@@ -994,9 +994,10 @@ restart_loop: + * journalled data) we need to unmap buffer and clear + * more bits. We also need to be careful about the check + * because the data page mapping can get cleared under +- * out hands, which alse need not to clear more bits +- * because the page and buffers will be freed and can +- * never be reused once we are done with them. ++ * our hands. Note that if mapping == NULL, we don't ++ * need to make buffer unmapped because the page is ++ * already detached from the mapping and buffers cannot ++ * get reused. + */ + mapping = READ_ONCE(bh->b_page->mapping); + if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) { +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c +index b8d55da2f04d..440ff8e7082b 100644 +--- a/fs/nfs/callback_proc.c ++++ b/fs/nfs/callback_proc.c +@@ -127,6 +127,8 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp, + restart: + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + list_for_each_entry(lo, &server->layouts, plh_layouts) { ++ if (!pnfs_layout_is_valid(lo)) ++ continue; + if (stateid != NULL && + !nfs4_stateid_match_other(stateid, &lo->plh_stateid)) + continue; +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index 9d07b53e1647..e6ea4511c41c 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -600,6 +600,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) + l_ctx = nfs_get_lock_context(dreq->ctx); + if (IS_ERR(l_ctx)) { + result = PTR_ERR(l_ctx); ++ nfs_direct_req_release(dreq); + goto out_release; + } + dreq->l_ctx = l_ctx; +@@ -1023,6 +1024,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) + l_ctx = nfs_get_lock_context(dreq->ctx); + if (IS_ERR(l_ctx)) { + result = PTR_ERR(l_ctx); ++ nfs_direct_req_release(dreq); + goto out_release; + } + dreq->l_ctx = l_ctx; +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index ceb6892d9bbd..7c01936be7c7 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -864,15 +864,6 @@ static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio, + pgio->pg_mirror_count = mirror_count; + } + +-/* +- * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) +- */ +-void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) +-{ +- pgio->pg_mirror_count = 1; +- pgio->pg_mirror_idx = 0; +-} +- + static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio) + { + pgio->pg_mirror_count = 1; +@@ -1301,6 +1292,14 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) + } + } + ++/* ++ * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) ++ */ ++void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) ++{ ++ nfs_pageio_complete(pgio); ++} ++ + int __init nfs_init_nfspagecache(void) + { + nfs_page_cachep = kmem_cache_create("nfs_page", +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 89f36040adf6..7b6bda68aa86 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -422,6 +422,7 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, + } + + subreq->wb_head = subreq; ++ nfs_release_request(old_head); + + if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { + nfs_release_request(subreq); +diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c +index addd7c5f2d3e..bed54e8adcf9 100644 +--- a/fs/ocfs2/alloc.c ++++ b/fs/ocfs2/alloc.c +@@ -7240,6 +7240,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_inline_data *idata = &di->id2.i_data; + ++ /* No need to punch hole beyond i_size. */ ++ if (start >= i_size_read(inode)) ++ return 0; ++ + if (end > i_size_read(inode)) + end = i_size_read(inode); + +diff --git a/include/acpi/processor.h b/include/acpi/processor.h +index d591bb77f592..f4bff2313547 100644 +--- a/include/acpi/processor.h ++++ b/include/acpi/processor.h +@@ -291,6 +291,14 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx + } + #endif + ++static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg, ++ bool direct) ++{ ++ if (direct || (is_percpu_thread() && cpu == smp_processor_id())) ++ return fn(arg); ++ return work_on_cpu(cpu, fn, arg); ++} ++ + /* in processor_perflib.c */ + + #ifdef CONFIG_CPU_FREQ +diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h +index e0970a578188..a7207a965466 100644 +--- a/include/keys/big_key-type.h ++++ b/include/keys/big_key-type.h +@@ -21,6 +21,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep); + extern void big_key_revoke(struct key *key); + extern void big_key_destroy(struct key *key); + extern void big_key_describe(const struct key *big_key, struct seq_file *m); +-extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen); ++extern long big_key_read(const struct key *key, char *buffer, size_t buflen); + + #endif /* _KEYS_BIG_KEY_TYPE_H */ +diff --git a/include/keys/user-type.h b/include/keys/user-type.h +index 12babe991594..0d8f3cd3056f 100644 +--- a/include/keys/user-type.h ++++ b/include/keys/user-type.h +@@ -45,8 +45,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep); + extern void user_revoke(struct key *key); + extern void user_destroy(struct key *key); + extern void user_describe(const struct key *user, struct seq_file *m); +-extern long user_read(const struct key *key, +- char __user *buffer, size_t buflen); ++extern long user_read(const struct key *key, char *buffer, size_t buflen); + + static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) + { +diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h +index afa37f807f12..2e1077ea77db 100644 +--- a/include/linux/buffer_head.h ++++ b/include/linux/buffer_head.h +@@ -187,6 +187,8 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, + void __brelse(struct buffer_head *); + void __bforget(struct buffer_head *); + void __breadahead(struct block_device *, sector_t block, unsigned int size); ++void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, ++ gfp_t gfp); + struct buffer_head *__bread_gfp(struct block_device *, + sector_t block, unsigned size, gfp_t gfp); + void invalidate_bh_lrus(void); +@@ -319,6 +321,12 @@ sb_breadahead(struct super_block *sb, sector_t block) + __breadahead(sb->s_bdev, block, sb->s_blocksize); + } + ++static inline void ++sb_breadahead_unmovable(struct super_block *sb, sector_t block) ++{ ++ __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); ++} ++ + static inline struct buffer_head * + sb_getblk(struct super_block *sb, sector_t block) + { +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index f84d332085c3..3ffe3f3f7903 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -331,7 +331,7 @@ unsigned long read_word_at_a_time(const void *addr) + * compiler has support to do so. + */ + #define compiletime_assert(condition, msg) \ +- _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) ++ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) + + #define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ +diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h +index 4635f95000a4..79a6e37a1d6f 100644 +--- a/include/linux/devfreq_cooling.h ++++ b/include/linux/devfreq_cooling.h +@@ -75,7 +75,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *dfc); + + #else /* !CONFIG_DEVFREQ_THERMAL */ + +-struct thermal_cooling_device * ++static inline struct thermal_cooling_device * + of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, + struct devfreq_cooling_power *dfc_power) + { +diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h +index dba15ca8e60b..1dcd9198beb7 100644 +--- a/include/linux/iocontext.h ++++ b/include/linux/iocontext.h +@@ -8,6 +8,7 @@ + + enum { + ICQ_EXITED = 1 << 2, ++ ICQ_DESTROYED = 1 << 3, + }; + + /* +diff --git a/include/linux/key-type.h b/include/linux/key-type.h +index dfb3ba782d2c..535b310a4c3b 100644 +--- a/include/linux/key-type.h ++++ b/include/linux/key-type.h +@@ -125,7 +125,7 @@ struct key_type { + * much is copied into the buffer + * - shouldn't do the copy if the buffer is NULL + */ +- long (*read)(const struct key *key, char __user *buffer, size_t buflen); ++ long (*read)(const struct key *key, char *buffer, size_t buflen); + + /* handle request_key() for this type instead of invoking + * /sbin/request-key (optional) +diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h +index e9c3b98df3e2..a726f96010d5 100644 +--- a/include/linux/nvme-fc-driver.h ++++ b/include/linux/nvme-fc-driver.h +@@ -279,8 +279,6 @@ struct nvme_fc_remote_port { + * + * Host/Initiator Transport Entrypoints/Parameters: + * +- * @module: The LLDD module using the interface +- * + * @localport_delete: The LLDD initiates deletion of a localport via + * nvme_fc_deregister_localport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the +@@ -394,8 +392,6 @@ struct nvme_fc_remote_port { + * Value is Mandatory. Allowed to be zero. + */ + struct nvme_fc_port_template { +- struct module *module; +- + /* initiator-based functions */ + void (*localport_delete)(struct nvme_fc_local_port *); + void (*remoteport_delete)(struct nvme_fc_remote_port *); +diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h +index f7a04e1af112..abbc74621f38 100644 +--- a/include/linux/pci-epc.h ++++ b/include/linux/pci-epc.h +@@ -63,6 +63,7 @@ struct pci_epc_ops { + * @bitmap: bitmap to manage the PCI address space + * @pages: number of bits representing the address region + * @page_size: size of each page ++ * @lock: mutex to protect bitmap + */ + struct pci_epc_mem { + phys_addr_t phys_base; +@@ -70,6 +71,8 @@ struct pci_epc_mem { + unsigned long *bitmap; + size_t page_size; + int pages; ++ /* mutex to protect against concurrent access for memory allocation*/ ++ struct mutex lock; + }; + + /** +diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h +index 73a7bf30fe9a..3f3cece31148 100644 +--- a/include/linux/percpu_counter.h ++++ b/include/linux/percpu_counter.h +@@ -78,9 +78,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc) + */ + static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) + { +- s64 ret = fbc->count; ++ /* Prevent reloads of fbc->count */ ++ s64 ret = READ_ONCE(fbc->count); + +- barrier(); /* Prevent reloads of fbc->count */ + if (ret >= 0) + return ret; + return 0; +diff --git a/include/linux/sched.h b/include/linux/sched.h +index b06577652643..99650f05c271 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -839,8 +839,8 @@ struct task_struct { + struct seccomp seccomp; + + /* Thread group tracking: */ +- u32 parent_exec_id; +- u32 self_exec_id; ++ u64 parent_exec_id; ++ u64 self_exec_id; + + /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ + spinlock_t alloc_lock; +diff --git a/include/linux/swab.h b/include/linux/swab.h +index e466fd159c85..bcff5149861a 100644 +--- a/include/linux/swab.h ++++ b/include/linux/swab.h +@@ -7,6 +7,7 @@ + # define swab16 __swab16 + # define swab32 __swab32 + # define swab64 __swab64 ++# define swab __swab + # define swahw32 __swahw32 + # define swahb32 __swahb32 + # define swab16p __swab16p +diff --git a/include/linux/swapops.h b/include/linux/swapops.h +index 1d3877c39a00..0b8c86096752 100644 +--- a/include/linux/swapops.h ++++ b/include/linux/swapops.h +@@ -377,7 +377,8 @@ static inline void num_poisoned_pages_inc(void) + } + #endif + +-#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) ++#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \ ++ defined(CONFIG_DEVICE_PRIVATE) + static inline int non_swap_entry(swp_entry_t entry) + { + return swp_type(entry) >= MAX_SWAPFILES; +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h +index bee528135cf1..9f7f81117434 100644 +--- a/include/net/ip6_route.h ++++ b/include/net/ip6_route.h +@@ -207,6 +207,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, + + return rt->rt6i_flags & RTF_ANYCAST || + (rt->rt6i_dst.plen < 127 && ++ !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) && + ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); + } + +diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h +index cf5f3fff1f1a..fd7e4d1df9a1 100644 +--- a/include/target/iscsi/iscsi_target_core.h ++++ b/include/target/iscsi/iscsi_target_core.h +@@ -673,7 +673,7 @@ struct iscsi_session { + atomic_t session_logout; + atomic_t session_reinstatement; + atomic_t session_stop_active; +- atomic_t sleep_on_sess_wait_comp; ++ atomic_t session_close; + /* connection list */ + struct list_head sess_conn_list; + struct list_head cr_active_list; +diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h +index 23cd84868cc3..fa7f97da5b76 100644 +--- a/include/uapi/linux/swab.h ++++ b/include/uapi/linux/swab.h +@@ -4,6 +4,7 @@ + + #include <linux/types.h> + #include <linux/compiler.h> ++#include <asm/bitsperlong.h> + #include <asm/swab.h> + + /* +@@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) + __fswab64(x)) + #endif + ++static __always_inline unsigned long __swab(const unsigned long y) ++{ ++#if BITS_PER_LONG == 64 ++ return __swab64(y); ++#else /* BITS_PER_LONG == 32 */ ++ return __swab32(y); ++#endif ++} ++ + /** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 96c0a868232e..d8c77bfb6e7e 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -2089,10 +2089,8 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) + */ + cpuhp_offline_cpu_device(cpu); + } +- if (!ret) { ++ if (!ret) + cpu_smt_control = ctrlval; +- arch_smt_update(); +- } + cpu_maps_update_done(); + return ret; + } +@@ -2103,7 +2101,6 @@ int cpuhp_smt_enable(void) + + cpu_maps_update_begin(); + cpu_smt_control = CPU_SMT_ENABLED; +- arch_smt_update(); + for_each_present_cpu(cpu) { + /* Skip online CPUs and CPUs on offline nodes */ + if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) +diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c +index b269ae16b10c..0d54f8256b9f 100644 +--- a/kernel/irq/irqdomain.c ++++ b/kernel/irq/irqdomain.c +@@ -1372,6 +1372,11 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs, void *arg) + { ++ if (!domain->ops->alloc) { ++ pr_debug("domain->ops->alloc() is NULL\n"); ++ return -ENOSYS; ++ } ++ + return domain->ops->alloc(domain, irq_base, nr_irqs, arg); + } + +@@ -1409,11 +1414,6 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, + return -EINVAL; + } + +- if (!domain->ops->alloc) { +- pr_debug("domain->ops->alloc() is NULL\n"); +- return -ENOSYS; +- } +- + if (realloc && irq_base >= 0) { + virq = irq_base; + } else { +diff --git a/kernel/kmod.c b/kernel/kmod.c +index bc6addd9152b..a2de58de6ab6 100644 +--- a/kernel/kmod.c ++++ b/kernel/kmod.c +@@ -120,7 +120,7 @@ out: + * invoke it. + * + * If module auto-loading support is disabled then this function +- * becomes a no-operation. ++ * simply returns -ENOENT. + */ + int __request_module(bool wait, const char *fmt, ...) + { +@@ -137,7 +137,7 @@ int __request_module(bool wait, const char *fmt, ...) + WARN_ON_ONCE(wait && current_is_async()); + + if (!modprobe_path[0]) +- return 0; ++ return -ENOENT; + + va_start(args, fmt); + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c +index 90a3469a7a88..03e3ab61a2ed 100644 +--- a/kernel/locking/lockdep.c ++++ b/kernel/locking/lockdep.c +@@ -1297,9 +1297,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) + this.class = class; + + raw_local_irq_save(flags); ++ current->lockdep_recursion = 1; + arch_spin_lock(&lockdep_lock); + ret = __lockdep_count_forward_deps(&this); + arch_spin_unlock(&lockdep_lock); ++ current->lockdep_recursion = 0; + raw_local_irq_restore(flags); + + return ret; +@@ -1324,9 +1326,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) + this.class = class; + + raw_local_irq_save(flags); ++ current->lockdep_recursion = 1; + arch_spin_lock(&lockdep_lock); + ret = __lockdep_count_backward_deps(&this); + arch_spin_unlock(&lockdep_lock); ++ current->lockdep_recursion = 0; + raw_local_irq_restore(flags); + + return ret; +diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c +index 6dca260eeccf..032868be3259 100644 +--- a/kernel/locking/locktorture.c ++++ b/kernel/locking/locktorture.c +@@ -723,10 +723,10 @@ static void __torture_print_stats(char *page, + if (statp[i].n_lock_fail) + fail = true; + sum += statp[i].n_lock_acquired; +- if (max < statp[i].n_lock_fail) +- max = statp[i].n_lock_fail; +- if (min > statp[i].n_lock_fail) +- min = statp[i].n_lock_fail; ++ if (max < statp[i].n_lock_acquired) ++ max = statp[i].n_lock_acquired; ++ if (min > statp[i].n_lock_acquired) ++ min = statp[i].n_lock_acquired; + } + page += sprintf(page, + "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 268f560ec998..391d73a12ad7 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -89,7 +89,13 @@ static inline void cpu_load_update_active(struct rq *this_rq) { } + #ifdef CONFIG_64BIT + # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) + # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) +-# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) ++# define scale_load_down(w) \ ++({ \ ++ unsigned long __w = (w); \ ++ if (__w) \ ++ __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ ++ __w; \ ++}) + #else + # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) + # define scale_load(w) (w) +diff --git a/kernel/signal.c b/kernel/signal.c +index c066168f8854..deb36b35c30b 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -1675,7 +1675,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig) + * This is only possible if parent == real_parent. + * Check if it has changed security domain. + */ +- if (tsk->parent_exec_id != tsk->parent->self_exec_id) ++ if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) + sig = SIGCHLD; + } + +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c +index 31e91efe243e..6fb5eb7b57dc 100644 +--- a/kernel/trace/trace_events_trigger.c ++++ b/kernel/trace/trace_events_trigger.c +@@ -1075,14 +1075,10 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, + struct event_trigger_data *data, + struct trace_event_file *file) + { +- int ret = register_trigger(glob, ops, data, file); +- +- if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) { +- unregister_trigger(glob, ops, data, file); +- ret = 0; +- } ++ if (tracing_alloc_snapshot_instance(file->tr) != 0) ++ return 0; + +- return ret; ++ return register_trigger(glob, ops, data, file); + } + + static int +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index ea20274a105a..d66aed6e9c75 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -877,6 +877,8 @@ static int probes_seq_show(struct seq_file *m, void *v) + int i; + + seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); ++ if (trace_kprobe_is_return(tk) && tk->rp.maxactive) ++ seq_printf(m, "%d", tk->rp.maxactive); + seq_printf(m, ":%s/%s", tk->tp.call.class->system, + trace_event_name(&tk->tp.call)); + +diff --git a/lib/find_bit.c b/lib/find_bit.c +index 6ed74f78380c..883ef3755a1c 100644 +--- a/lib/find_bit.c ++++ b/lib/find_bit.c +@@ -133,18 +133,6 @@ EXPORT_SYMBOL(find_last_bit); + + #ifdef __BIG_ENDIAN + +-/* include/linux/byteorder does not support "unsigned long" type */ +-static inline unsigned long ext2_swab(const unsigned long y) +-{ +-#if BITS_PER_LONG == 64 +- return (unsigned long) __swab64((u64) y); +-#elif BITS_PER_LONG == 32 +- return (unsigned long) __swab32((u32) y); +-#else +-#error BITS_PER_LONG not defined +-#endif +-} +- + #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) + static unsigned long _find_next_bit_le(const unsigned long *addr, + unsigned long nbits, unsigned long start, unsigned long invert) +@@ -157,7 +145,7 @@ static unsigned long _find_next_bit_le(const unsigned long *addr, + tmp = addr[start / BITS_PER_LONG] ^ invert; + + /* Handle 1st word. */ +- tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start)); ++ tmp &= swab(BITMAP_FIRST_WORD_MASK(start)); + start = round_down(start, BITS_PER_LONG); + + while (!tmp) { +@@ -168,7 +156,7 @@ static unsigned long _find_next_bit_le(const unsigned long *addr, + tmp = addr[start / BITS_PER_LONG] ^ invert; + } + +- return min(start + __ffs(ext2_swab(tmp)), nbits); ++ return min(start + __ffs(swab(tmp)), nbits); + } + #endif + +diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc +index d5242f544551..b7c68030da4f 100644 +--- a/lib/raid6/neon.uc ++++ b/lib/raid6/neon.uc +@@ -28,7 +28,6 @@ + + typedef uint8x16_t unative_t; + +-#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x}) + #define NSIZE sizeof(unative_t) + + /* +@@ -61,7 +60,7 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs) + int d, z, z0; + + register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; +- const unative_t x1d = NBYTES(0x1d); ++ const unative_t x1d = vdupq_n_u8(0x1d); + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ +@@ -92,7 +91,7 @@ void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop, + int d, z, z0; + + register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; +- const unative_t x1d = NBYTES(0x1d); ++ const unative_t x1d = vdupq_n_u8(0x1d); + + z0 = stop; /* P/Q right side optimization */ + p = dptr[disks-2]; /* XOR parity */ +diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c +index 8cd20c9f834a..7d00c31a6547 100644 +--- a/lib/raid6/recov_neon_inner.c ++++ b/lib/raid6/recov_neon_inner.c +@@ -10,11 +10,6 @@ + + #include <arm_neon.h> + +-static const uint8x16_t x0f = { +- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, +- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, +-}; +- + #ifdef CONFIG_ARM + /* + * AArch32 does not provide this intrinsic natively because it does not +@@ -41,6 +36,7 @@ void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, + uint8x16_t pm1 = vld1q_u8(pbmul + 16); + uint8x16_t qm0 = vld1q_u8(qmul); + uint8x16_t qm1 = vld1q_u8(qmul + 16); ++ uint8x16_t x0f = vdupq_n_u8(0x0f); + + /* + * while ( bytes-- ) { +@@ -87,6 +83,7 @@ void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, + { + uint8x16_t qm0 = vld1q_u8(qmul); + uint8x16_t qm1 = vld1q_u8(qmul + 16); ++ uint8x16_t x0f = vdupq_n_u8(0x0f); + + /* + * while (bytes--) { +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 6f71518a4558..08af4e3de6fb 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -4325,11 +4325,11 @@ refill: + /* Even if we own the page, we do not use atomic_set(). + * This would break get_page_unless_zero() users. + */ +- page_ref_add(page, size); ++ page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); + + /* reset page count bias and offset to start of new frag */ + nc->pfmemalloc = page_is_pfmemalloc(page); +- nc->pagecnt_bias = size + 1; ++ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; + nc->offset = size; + } + +@@ -4345,10 +4345,10 @@ refill: + size = nc->size; + #endif + /* OK, page count is 0, we can safely set it */ +- set_page_count(page, size + 1); ++ set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); + + /* reset page count bias and offset to start of new frag */ +- nc->pagecnt_bias = size + 1; ++ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; + offset = size - fragsz; + } + +diff --git a/mm/slub.c b/mm/slub.c +index 958a8f7a3c25..3c1a16f03b2b 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -248,7 +248,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, + unsigned long ptr_addr) + { + #ifdef CONFIG_SLAB_FREELIST_HARDENED +- return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); ++ return (void *)((unsigned long)ptr ^ s->random ^ swab(ptr_addr)); + #else + return ptr; + #endif +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index d00961ba0c42..88091fd704f4 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -1682,7 +1682,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, + nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; + array_size = (nr_pages * sizeof(struct page *)); + +- area->nr_pages = nr_pages; + /* Please note that the recursion is strictly bounded. */ + if (array_size > PAGE_SIZE) { + pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, +@@ -1690,13 +1689,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, + } else { + pages = kmalloc_node(array_size, nested_gfp, node); + } +- area->pages = pages; +- if (!area->pages) { ++ ++ if (!pages) { + remove_vm_area(area->addr); + kfree(area); + return NULL; + } + ++ area->pages = pages; ++ area->nr_pages = nr_pages; ++ + for (i = 0; i < area->nr_pages; i++) { + struct page *page; + +diff --git a/net/core/dev.c b/net/core/dev.c +index 36d926d2d5f0..4f32a3251b64 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3575,7 +3575,8 @@ EXPORT_SYMBOL(netdev_max_backlog); + + int netdev_tstamp_prequeue __read_mostly = 1; + int netdev_budget __read_mostly = 300; +-unsigned int __read_mostly netdev_budget_usecs = 2000; ++/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */ ++unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ; + int weight_p __read_mostly = 64; /* old backlog weight */ + int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ + int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 7c479c1ffd77..cb15338cfda4 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -2424,7 +2424,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) + } + + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { +- __dev_notify_flags(dev, old_flags, 0U); ++ __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags)); + } else { + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; + __dev_notify_flags(dev, old_flags, ~0U); +diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c +index 5f5d9eafccf5..ea133857f19e 100644 +--- a/net/dns_resolver/dns_key.c ++++ b/net/dns_resolver/dns_key.c +@@ -242,7 +242,7 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m) + * - the key's semaphore is read-locked + */ + static long dns_resolver_read(const struct key *key, +- char __user *buffer, size_t buflen) ++ char *buffer, size_t buflen) + { + int err = PTR_ERR(key->payload.data[dns_key_error]); + +diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c +index 37708dabebd1..606bc7fe5cc7 100644 +--- a/net/hsr/hsr_netlink.c ++++ b/net/hsr/hsr_netlink.c +@@ -64,10 +64,16 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, + else + multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]); + +- if (!data[IFLA_HSR_VERSION]) ++ if (!data[IFLA_HSR_VERSION]) { + hsr_version = 0; +- else ++ } else { + hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]); ++ if (hsr_version > 1) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Only versions 0..1 are supported"); ++ return -EINVAL; ++ } ++ } + + return hsr_dev_finalize(dev, link, multicast_spec, hsr_version); + } +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c +index 5f020c051af9..096a28f9720d 100644 +--- a/net/ipv4/devinet.c ++++ b/net/ipv4/devinet.c +@@ -579,12 +579,15 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, + return NULL; + } + +-static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa) ++static int ip_mc_autojoin_config(struct net *net, bool join, ++ const struct in_ifaddr *ifa) + { ++#if defined(CONFIG_IP_MULTICAST) + struct ip_mreqn mreq = { + .imr_multiaddr.s_addr = ifa->ifa_address, + .imr_ifindex = ifa->ifa_dev->dev->ifindex, + }; ++ struct sock *sk = net->ipv4.mc_autojoin_sk; + int ret; + + ASSERT_RTNL(); +@@ -597,6 +600,9 @@ static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa) + release_sock(sk); + + return ret; ++#else ++ return -EOPNOTSUPP; ++#endif + } + + static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, +@@ -638,7 +644,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, + continue; + + if (ipv4_is_multicast(ifa->ifa_address)) +- ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa); ++ ip_mc_autojoin_config(net, false, ifa); + __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid); + return 0; + } +@@ -896,8 +902,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, + */ + set_ifa_lifetime(ifa, valid_lft, prefered_lft); + if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) { +- int ret = ip_mc_config(net->ipv4.mc_autojoin_sk, +- true, ifa); ++ int ret = ip_mc_autojoin_config(net, true, ifa); + + if (ret < 0) { + inet_free_ifa(ifa); +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 91490446ebb4..5b8d5bfeb7ac 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -3129,7 +3129,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, + NFT_SET_INTERVAL | NFT_SET_TIMEOUT | + NFT_SET_MAP | NFT_SET_EVAL | + NFT_SET_OBJECT)) +- return -EINVAL; ++ return -EOPNOTSUPP; + /* Only one of these operations is supported */ + if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) == + (NFT_SET_MAP | NFT_SET_OBJECT)) +@@ -3167,7 +3167,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, + objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE])); + if (objtype == NFT_OBJECT_UNSPEC || + objtype > NFT_OBJECT_MAX) +- return -EINVAL; ++ return -EOPNOTSUPP; + } else if (flags & NFT_SET_OBJECT) + return -EINVAL; + else +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c +index 8d9a244f4534..944ce686bfe5 100644 +--- a/net/qrtr/qrtr.c ++++ b/net/qrtr/qrtr.c +@@ -710,20 +710,21 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + + node = NULL; + if (addr->sq_node == QRTR_NODE_BCAST) { +- enqueue_fn = qrtr_bcast_enqueue; +- if (addr->sq_port != QRTR_PORT_CTRL) { ++ if (addr->sq_port != QRTR_PORT_CTRL && ++ qrtr_local_nid != QRTR_NODE_BCAST) { + release_sock(sk); + return -ENOTCONN; + } ++ enqueue_fn = qrtr_bcast_enqueue; + } else if (addr->sq_node == ipc->us.sq_node) { + enqueue_fn = qrtr_local_enqueue; + } else { +- enqueue_fn = qrtr_node_enqueue; + node = qrtr_node_lookup(addr->sq_node); + if (!node) { + release_sock(sk); + return -ECONNRESET; + } ++ enqueue_fn = qrtr_node_enqueue; + } + + plen = (len + 3) & ~3; +diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c +index e7f6b8823eb6..ad9d1b21cb0b 100644 +--- a/net/rxrpc/key.c ++++ b/net/rxrpc/key.c +@@ -35,7 +35,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *); + static void rxrpc_destroy(struct key *); + static void rxrpc_destroy_s(struct key *); + static void rxrpc_describe(const struct key *, struct seq_file *); +-static long rxrpc_read(const struct key *, char __user *, size_t); ++static long rxrpc_read(const struct key *, char *, size_t); + + /* + * rxrpc defined keys take an arbitrary string as the description and an +@@ -1044,12 +1044,12 @@ EXPORT_SYMBOL(rxrpc_get_null_key); + * - this returns the result in XDR form + */ + static long rxrpc_read(const struct key *key, +- char __user *buffer, size_t buflen) ++ char *buffer, size_t buflen) + { + const struct rxrpc_key_token *token; + const struct krb5_principal *princ; + size_t size; +- __be32 __user *xdr, *oldxdr; ++ __be32 *xdr, *oldxdr; + u32 cnlen, toksize, ntoks, tok, zero; + u16 toksizes[AFSTOKEN_MAX]; + int loop; +@@ -1126,30 +1126,25 @@ static long rxrpc_read(const struct key *key, + if (!buffer || buflen < size) + return size; + +- xdr = (__be32 __user *) buffer; ++ xdr = (__be32 *)buffer; + zero = 0; + #define ENCODE(x) \ + do { \ +- __be32 y = htonl(x); \ +- if (put_user(y, xdr++) < 0) \ +- goto fault; \ ++ *xdr++ = htonl(x); \ + } while(0) + #define ENCODE_DATA(l, s) \ + do { \ + u32 _l = (l); \ + ENCODE(l); \ +- if (copy_to_user(xdr, (s), _l) != 0) \ +- goto fault; \ +- if (_l & 3 && \ +- copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \ +- goto fault; \ ++ memcpy(xdr, (s), _l); \ ++ if (_l & 3) \ ++ memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ + xdr += (_l + 3) >> 2; \ + } while(0) + #define ENCODE64(x) \ + do { \ + __be64 y = cpu_to_be64(x); \ +- if (copy_to_user(xdr, &y, 8) != 0) \ +- goto fault; \ ++ memcpy(xdr, &y, 8); \ + xdr += 8 >> 2; \ + } while(0) + #define ENCODE_STR(s) \ +@@ -1240,8 +1235,4 @@ static long rxrpc_read(const struct key *key, + ASSERTCMP((char __user *) xdr - buffer, ==, size); + _leave(" = %zu", size); + return size; +- +-fault: +- _leave(" = -EFAULT"); +- return -EFAULT; + } +diff --git a/security/keys/big_key.c b/security/keys/big_key.c +index 929e14978c42..1957275ad2af 100644 +--- a/security/keys/big_key.c ++++ b/security/keys/big_key.c +@@ -22,6 +22,13 @@ + #include <keys/big_key-type.h> + #include <crypto/aead.h> + ++struct big_key_buf { ++ unsigned int nr_pages; ++ void *virt; ++ struct scatterlist *sg; ++ struct page *pages[]; ++}; ++ + /* + * Layout of key payload words. + */ +@@ -91,10 +98,9 @@ static DEFINE_MUTEX(big_key_aead_lock); + /* + * Encrypt/decrypt big_key data + */ +-static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) ++static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t datalen, u8 *key) + { + int ret; +- struct scatterlist sgio; + struct aead_request *aead_req; + /* We always use a zero nonce. The reason we can get away with this is + * because we're using a different randomly generated key for every +@@ -109,8 +115,7 @@ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) + return -ENOMEM; + + memset(zero_nonce, 0, sizeof(zero_nonce)); +- sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0)); +- aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce); ++ aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce); + aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + aead_request_set_ad(aead_req, 0); + +@@ -129,22 +134,82 @@ error: + return ret; + } + ++/* ++ * Free up the buffer. ++ */ ++static void big_key_free_buffer(struct big_key_buf *buf) ++{ ++ unsigned int i; ++ ++ if (buf->virt) { ++ memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE); ++ vunmap(buf->virt); ++ } ++ ++ for (i = 0; i < buf->nr_pages; i++) ++ if (buf->pages[i]) ++ __free_page(buf->pages[i]); ++ ++ kfree(buf); ++} ++ ++/* ++ * Allocate a buffer consisting of a set of pages with a virtual mapping ++ * applied over them. ++ */ ++static void *big_key_alloc_buffer(size_t len) ++{ ++ struct big_key_buf *buf; ++ unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ unsigned int i, l; ++ ++ buf = kzalloc(sizeof(struct big_key_buf) + ++ sizeof(struct page) * npg + ++ sizeof(struct scatterlist) * npg, ++ GFP_KERNEL); ++ if (!buf) ++ return NULL; ++ ++ buf->nr_pages = npg; ++ buf->sg = (void *)(buf->pages + npg); ++ sg_init_table(buf->sg, npg); ++ ++ for (i = 0; i < buf->nr_pages; i++) { ++ buf->pages[i] = alloc_page(GFP_KERNEL); ++ if (!buf->pages[i]) ++ goto nomem; ++ ++ l = min_t(size_t, len, PAGE_SIZE); ++ sg_set_page(&buf->sg[i], buf->pages[i], l, 0); ++ len -= l; ++ } ++ ++ buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL); ++ if (!buf->virt) ++ goto nomem; ++ ++ return buf; ++ ++nomem: ++ big_key_free_buffer(buf); ++ return NULL; ++} ++ + /* + * Preparse a big key + */ + int big_key_preparse(struct key_preparsed_payload *prep) + { ++ struct big_key_buf *buf; + struct path *path = (struct path *)&prep->payload.data[big_key_path]; + struct file *file; + u8 *enckey; +- u8 *data = NULL; + ssize_t written; +- size_t datalen = prep->datalen; ++ size_t datalen = prep->datalen, enclen = datalen + ENC_AUTHTAG_SIZE; + int ret; + +- ret = -EINVAL; + if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data) +- goto error; ++ return -EINVAL; + + /* Set an arbitrary quota */ + prep->quotalen = 16; +@@ -157,13 +222,12 @@ int big_key_preparse(struct key_preparsed_payload *prep) + * + * File content is stored encrypted with randomly generated key. + */ +- size_t enclen = datalen + ENC_AUTHTAG_SIZE; + loff_t pos = 0; + +- data = kmalloc(enclen, GFP_KERNEL); +- if (!data) ++ buf = big_key_alloc_buffer(enclen); ++ if (!buf) + return -ENOMEM; +- memcpy(data, prep->data, datalen); ++ memcpy(buf->virt, prep->data, datalen); + + /* generate random key */ + enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL); +@@ -176,7 +240,7 @@ int big_key_preparse(struct key_preparsed_payload *prep) + goto err_enckey; + + /* encrypt aligned data */ +- ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey); ++ ret = big_key_crypt(BIG_KEY_ENC, buf, datalen, enckey); + if (ret) + goto err_enckey; + +@@ -187,7 +251,7 @@ int big_key_preparse(struct key_preparsed_payload *prep) + goto err_enckey; + } + +- written = kernel_write(file, data, enclen, &pos); ++ written = kernel_write(file, buf->virt, enclen, &pos); + if (written != enclen) { + ret = written; + if (written >= 0) +@@ -202,7 +266,7 @@ int big_key_preparse(struct key_preparsed_payload *prep) + *path = file->f_path; + path_get(path); + fput(file); +- kzfree(data); ++ big_key_free_buffer(buf); + } else { + /* Just store the data in a buffer */ + void *data = kmalloc(datalen, GFP_KERNEL); +@@ -220,7 +284,7 @@ err_fput: + err_enckey: + kzfree(enckey); + error: +- kzfree(data); ++ big_key_free_buffer(buf); + return ret; + } + +@@ -289,7 +353,7 @@ void big_key_describe(const struct key *key, struct seq_file *m) + * read the key data + * - the key's semaphore is read-locked + */ +-long big_key_read(const struct key *key, char __user *buffer, size_t buflen) ++long big_key_read(const struct key *key, char *buffer, size_t buflen) + { + size_t datalen = (size_t)key->payload.data[big_key_len]; + long ret; +@@ -298,15 +362,15 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) + return datalen; + + if (datalen > BIG_KEY_FILE_THRESHOLD) { ++ struct big_key_buf *buf; + struct path *path = (struct path *)&key->payload.data[big_key_path]; + struct file *file; +- u8 *data; + u8 *enckey = (u8 *)key->payload.data[big_key_data]; + size_t enclen = datalen + ENC_AUTHTAG_SIZE; + loff_t pos = 0; + +- data = kmalloc(enclen, GFP_KERNEL); +- if (!data) ++ buf = big_key_alloc_buffer(enclen); ++ if (!buf) + return -ENOMEM; + + file = dentry_open(path, O_RDONLY, current_cred()); +@@ -316,31 +380,28 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) + } + + /* read file to kernel and decrypt */ +- ret = kernel_read(file, data, enclen, &pos); ++ ret = kernel_read(file, buf->virt, enclen, &pos); + if (ret >= 0 && ret != enclen) { + ret = -EIO; + goto err_fput; + } + +- ret = big_key_crypt(BIG_KEY_DEC, data, enclen, enckey); ++ ret = big_key_crypt(BIG_KEY_DEC, buf, enclen, enckey); + if (ret) + goto err_fput; + + ret = datalen; + +- /* copy decrypted data to user */ +- if (copy_to_user(buffer, data, datalen) != 0) +- ret = -EFAULT; ++ /* copy out decrypted data */ ++ memcpy(buffer, buf->virt, datalen); + + err_fput: + fput(file); + error: +- kzfree(data); ++ big_key_free_buffer(buf); + } else { + ret = datalen; +- if (copy_to_user(buffer, key->payload.data[big_key_data], +- datalen) != 0) +- ret = -EFAULT; ++ memcpy(buffer, key->payload.data[big_key_data], datalen); + } + + return ret; +diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c +index d92cbf9687c3..571f6d486838 100644 +--- a/security/keys/encrypted-keys/encrypted.c ++++ b/security/keys/encrypted-keys/encrypted.c +@@ -895,14 +895,14 @@ out: + } + + /* +- * encrypted_read - format and copy the encrypted data to userspace ++ * encrypted_read - format and copy out the encrypted data + * + * The resulting datablob format is: + * <master-key name> <decrypted data length> <encrypted iv> <encrypted data> + * + * On success, return to userspace the encrypted key datablob size. + */ +-static long encrypted_read(const struct key *key, char __user *buffer, ++static long encrypted_read(const struct key *key, char *buffer, + size_t buflen) + { + struct encrypted_key_payload *epayload; +@@ -950,8 +950,7 @@ static long encrypted_read(const struct key *key, char __user *buffer, + key_put(mkey); + memzero_explicit(derived_key, sizeof(derived_key)); + +- if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) +- ret = -EFAULT; ++ memcpy(buffer, ascii_buf, asciiblob_len); + kzfree(ascii_buf); + + return asciiblob_len; +diff --git a/security/keys/key.c b/security/keys/key.c +index 17244f5f54c6..5f4cb271464a 100644 +--- a/security/keys/key.c ++++ b/security/keys/key.c +@@ -383,7 +383,7 @@ int key_payload_reserve(struct key *key, size_t datalen) + spin_lock(&key->user->lock); + + if (delta > 0 && +- (key->user->qnbytes + delta >= maxbytes || ++ (key->user->qnbytes + delta > maxbytes || + key->user->qnbytes + delta < key->user->qnbytes)) { + ret = -EDQUOT; + } +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index ca31af186abd..4b6a084e323b 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -742,6 +742,21 @@ error: + return ret; + } + ++/* ++ * Call the read method ++ */ ++static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen) ++{ ++ long ret; ++ ++ down_read(&key->sem); ++ ret = key_validate(key); ++ if (ret == 0) ++ ret = key->type->read(key, buffer, buflen); ++ up_read(&key->sem); ++ return ret; ++} ++ + /* + * Read a key's payload. + * +@@ -757,26 +772,27 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) + struct key *key; + key_ref_t key_ref; + long ret; ++ char *key_data; + + /* find the key first */ + key_ref = lookup_user_key(keyid, 0, 0); + if (IS_ERR(key_ref)) { + ret = -ENOKEY; +- goto error; ++ goto out; + } + + key = key_ref_to_ptr(key_ref); + + ret = key_read_state(key); + if (ret < 0) +- goto error2; /* Negatively instantiated */ ++ goto key_put_out; /* Negatively instantiated */ + + /* see if we can read it directly */ + ret = key_permission(key_ref, KEY_NEED_READ); + if (ret == 0) + goto can_read_key; + if (ret != -EACCES) +- goto error2; ++ goto key_put_out; + + /* we can't; see if it's searchable from this process's keyrings + * - we automatically take account of the fact that it may be +@@ -784,26 +800,51 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) + */ + if (!is_key_possessed(key_ref)) { + ret = -EACCES; +- goto error2; ++ goto key_put_out; + } + + /* the key is probably readable - now try to read it */ + can_read_key: +- ret = -EOPNOTSUPP; +- if (key->type->read) { +- /* Read the data with the semaphore held (since we might sleep) +- * to protect against the key being updated or revoked. +- */ +- down_read(&key->sem); +- ret = key_validate(key); +- if (ret == 0) +- ret = key->type->read(key, buffer, buflen); +- up_read(&key->sem); ++ if (!key->type->read) { ++ ret = -EOPNOTSUPP; ++ goto key_put_out; + } + +-error2: ++ if (!buffer || !buflen) { ++ /* Get the key length from the read method */ ++ ret = __keyctl_read_key(key, NULL, 0); ++ goto key_put_out; ++ } ++ ++ /* ++ * Read the data with the semaphore held (since we might sleep) ++ * to protect against the key being updated or revoked. ++ * ++ * Allocating a temporary buffer to hold the keys before ++ * transferring them to user buffer to avoid potential ++ * deadlock involving page fault and mmap_sem. ++ */ ++ key_data = kmalloc(buflen, GFP_KERNEL); ++ ++ if (!key_data) { ++ ret = -ENOMEM; ++ goto key_put_out; ++ } ++ ret = __keyctl_read_key(key, key_data, buflen); ++ ++ /* ++ * Read methods will just return the required length without ++ * any copying if the provided length isn't large enough. ++ */ ++ if (ret > 0 && ret <= buflen) { ++ if (copy_to_user(buffer, key_data, ret)) ++ ret = -EFAULT; ++ } ++ kzfree(key_data); ++ ++key_put_out: + key_put(key); +-error: ++out: + return ret; + } + +@@ -882,8 +923,8 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) + key_quota_root_maxbytes : key_quota_maxbytes; + + spin_lock(&newowner->lock); +- if (newowner->qnkeys + 1 >= maxkeys || +- newowner->qnbytes + key->quotalen >= maxbytes || ++ if (newowner->qnkeys + 1 > maxkeys || ++ newowner->qnbytes + key->quotalen > maxbytes || + newowner->qnbytes + key->quotalen < + newowner->qnbytes) + goto quota_overrun; +diff --git a/security/keys/keyring.c b/security/keys/keyring.c +index 359b9cba3d0d..f7cf371bcd2a 100644 +--- a/security/keys/keyring.c ++++ b/security/keys/keyring.c +@@ -432,7 +432,6 @@ static int keyring_read_iterator(const void *object, void *data) + { + struct keyring_read_iterator_context *ctx = data; + const struct key *key = keyring_ptr_to_key(object); +- int ret; + + kenter("{%s,%d},,{%zu/%zu}", + key->type->name, key->serial, ctx->count, ctx->buflen); +@@ -440,10 +439,7 @@ static int keyring_read_iterator(const void *object, void *data) + if (ctx->count >= ctx->buflen) + return 1; + +- ret = put_user(key->serial, ctx->buffer); +- if (ret < 0) +- return ret; +- ctx->buffer++; ++ *ctx->buffer++ = key->serial; + ctx->count += sizeof(key->serial); + return 0; + } +diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c +index 1d34b2a5f485..13ac3b1e57da 100644 +--- a/security/keys/request_key_auth.c ++++ b/security/keys/request_key_auth.c +@@ -27,7 +27,7 @@ static int request_key_auth_instantiate(struct key *, + static void request_key_auth_describe(const struct key *, struct seq_file *); + static void request_key_auth_revoke(struct key *); + static void request_key_auth_destroy(struct key *); +-static long request_key_auth_read(const struct key *, char __user *, size_t); ++static long request_key_auth_read(const struct key *, char *, size_t); + + /* + * The request-key authorisation key type definition. +@@ -85,7 +85,7 @@ static void request_key_auth_describe(const struct key *key, + * - the key's semaphore is read-locked + */ + static long request_key_auth_read(const struct key *key, +- char __user *buffer, size_t buflen) ++ char *buffer, size_t buflen) + { + struct request_key_auth *rka = get_request_key_auth(key); + size_t datalen; +@@ -102,8 +102,7 @@ static long request_key_auth_read(const struct key *key, + if (buflen > datalen) + buflen = datalen; + +- if (copy_to_user(buffer, rka->callout_info, buflen) != 0) +- ret = -EFAULT; ++ memcpy(buffer, rka->callout_info, buflen); + } + + return ret; +diff --git a/security/keys/trusted.c b/security/keys/trusted.c +index 98aa89ff7bfd..01e8544f79a5 100644 +--- a/security/keys/trusted.c ++++ b/security/keys/trusted.c +@@ -1136,11 +1136,10 @@ out: + * trusted_read - copy the sealed blob data to userspace in hex. + * On success, return to userspace the trusted key datablob size. + */ +-static long trusted_read(const struct key *key, char __user *buffer, ++static long trusted_read(const struct key *key, char *buffer, + size_t buflen) + { + const struct trusted_key_payload *p; +- char *ascii_buf; + char *bufp; + int i; + +@@ -1149,18 +1148,9 @@ static long trusted_read(const struct key *key, char __user *buffer, + return -EINVAL; + + if (buffer && buflen >= 2 * p->blob_len) { +- ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL); +- if (!ascii_buf) +- return -ENOMEM; +- +- bufp = ascii_buf; ++ bufp = buffer; + for (i = 0; i < p->blob_len; i++) + bufp = hex_byte_pack(bufp, p->blob[i]); +- if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) { +- kzfree(ascii_buf); +- return -EFAULT; +- } +- kzfree(ascii_buf); + } + return 2 * p->blob_len; + } +diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c +index 9f558bedba23..0e723b676aef 100644 +--- a/security/keys/user_defined.c ++++ b/security/keys/user_defined.c +@@ -172,7 +172,7 @@ EXPORT_SYMBOL_GPL(user_describe); + * read the key data + * - the key's semaphore is read-locked + */ +-long user_read(const struct key *key, char __user *buffer, size_t buflen) ++long user_read(const struct key *key, char *buffer, size_t buflen) + { + const struct user_key_payload *upayload; + long ret; +@@ -185,8 +185,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen) + if (buflen > upayload->datalen) + buflen = upayload->datalen; + +- if (copy_to_user(buffer, upayload->data, buflen) != 0) +- ret = -EFAULT; ++ memcpy(buffer, upayload->data, buflen); + } + + return ret; +diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c +index 40d2d39151bf..3ecc070738e8 100644 +--- a/sound/core/oss/pcm_plugin.c ++++ b/sound/core/oss/pcm_plugin.c +@@ -196,7 +196,9 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin) + return 0; + } + +-snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames) ++static snd_pcm_sframes_t plug_client_size(struct snd_pcm_substream *plug, ++ snd_pcm_uframes_t drv_frames, ++ bool check_size) + { + struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next; + int stream; +@@ -209,7 +211,7 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + plugin = snd_pcm_plug_last(plug); + while (plugin && drv_frames > 0) { +- if (drv_frames > plugin->buf_frames) ++ if (check_size && drv_frames > plugin->buf_frames) + drv_frames = plugin->buf_frames; + plugin_prev = plugin->prev; + if (plugin->src_frames) +@@ -222,7 +224,7 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p + plugin_next = plugin->next; + if (plugin->dst_frames) + drv_frames = plugin->dst_frames(plugin, drv_frames); +- if (drv_frames > plugin->buf_frames) ++ if (check_size && drv_frames > plugin->buf_frames) + drv_frames = plugin->buf_frames; + plugin = plugin_next; + } +@@ -231,7 +233,9 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p + return drv_frames; + } + +-snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t clt_frames) ++static snd_pcm_sframes_t plug_slave_size(struct snd_pcm_substream *plug, ++ snd_pcm_uframes_t clt_frames, ++ bool check_size) + { + struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next; + snd_pcm_sframes_t frames; +@@ -252,14 +256,14 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc + if (frames < 0) + return frames; + } +- if (frames > plugin->buf_frames) ++ if (check_size && frames > plugin->buf_frames) + frames = plugin->buf_frames; + plugin = plugin_next; + } + } else if (stream == SNDRV_PCM_STREAM_CAPTURE) { + plugin = snd_pcm_plug_last(plug); + while (plugin) { +- if (frames > plugin->buf_frames) ++ if (check_size && frames > plugin->buf_frames) + frames = plugin->buf_frames; + plugin_prev = plugin->prev; + if (plugin->src_frames) { +@@ -274,6 +278,18 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc + return frames; + } + ++snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, ++ snd_pcm_uframes_t drv_frames) ++{ ++ return plug_client_size(plug, drv_frames, false); ++} ++ ++snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, ++ snd_pcm_uframes_t clt_frames) ++{ ++ return plug_slave_size(plug, clt_frames, false); ++} ++ + static int snd_pcm_plug_formats(const struct snd_mask *mask, + snd_pcm_format_t format) + { +@@ -629,7 +645,7 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st + src_channels = dst_channels; + plugin = next; + } +- return snd_pcm_plug_client_size(plug, frames); ++ return plug_client_size(plug, frames, true); + } + + snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, struct snd_pcm_plugin_channel *dst_channels_final, snd_pcm_uframes_t size) +@@ -639,7 +655,7 @@ snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, str + snd_pcm_sframes_t frames = size; + int err; + +- frames = snd_pcm_plug_slave_size(plug, frames); ++ frames = plug_slave_size(plug, frames, true); + if (frames < 0) + return frames; + +diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c +index c397e7da0eac..7ccfb09535e1 100644 +--- a/sound/pci/hda/hda_beep.c ++++ b/sound/pci/hda/hda_beep.c +@@ -310,8 +310,12 @@ int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol, + { + struct hda_codec *codec = snd_kcontrol_chip(kcontrol); + struct hda_beep *beep = codec->beep; ++ int chs = get_amp_channels(kcontrol); ++ + if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) { +- ucontrol->value.integer.value[0] = ++ if (chs & 1) ++ ucontrol->value.integer.value[0] = beep->enabled; ++ if (chs & 2) + ucontrol->value.integer.value[1] = beep->enabled; + return 0; + } +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index a6f7561e7bb9..7d65fe31c825 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -942,6 +942,7 @@ int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card, + + /* power-up all before initialization */ + hda_set_power_state(codec, AC_PWRST_D0); ++ codec->core.dev.power.power_state = PMSG_ON; + + snd_hda_codec_proc_new(codec); + +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 890793ad85ca..d392c1ec0b28 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2034,24 +2034,15 @@ static void azx_firmware_cb(const struct firmware *fw, void *context) + { + struct snd_card *card = context; + struct azx *chip = card->private_data; +- struct pci_dev *pci = chip->pci; +- +- if (!fw) { +- dev_err(card->dev, "Cannot load firmware, aborting\n"); +- goto error; +- } + +- chip->fw = fw; ++ if (fw) ++ chip->fw = fw; ++ else ++ dev_err(card->dev, "Cannot load firmware, continue without patching\n"); + if (!chip->disabled) { + /* continue probing */ +- if (azx_probe_continue(chip)) +- goto error; ++ azx_probe_continue(chip); + } +- return; /* OK */ +- +- error: +- snd_card_free(card); +- pci_set_drvdata(pci, NULL); + } + #endif + +@@ -2177,6 +2168,17 @@ static const struct hdac_io_ops pci_hda_io_ops = { + .dma_free_pages = dma_free_pages, + }; + ++/* Blacklist for skipping the whole probe: ++ * some HD-audio PCI entries are exposed without any codecs, and such devices ++ * should be ignored from the beginning. ++ */ ++static const struct snd_pci_quirk driver_blacklist[] = { ++ SND_PCI_QUIRK(0x1043, 0x874f, "ASUS ROG Zenith II / Strix", 0), ++ SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0), ++ SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0), ++ {} ++}; ++ + static const struct hda_controller_ops pci_hda_ops = { + .disable_msi_reset_irq = disable_msi_reset_irq, + .substream_alloc_pages = substream_alloc_pages, +@@ -2196,6 +2198,11 @@ static int azx_probe(struct pci_dev *pci, + bool schedule_probe; + int err; + ++ if (snd_pci_quirk_lookup(pci, driver_blacklist)) { ++ dev_info(&pci->dev, "Skipping the blacklisted device\n"); ++ return -ENODEV; ++ } ++ + if (dev >= SNDRV_CARDS) + return -ENODEV; + if (!enable[dev]) { +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index b500dad33ea9..3fded87817c6 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -333,7 +333,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) + case 0x10ec0215: + case 0x10ec0233: + case 0x10ec0235: ++ case 0x10ec0236: + case 0x10ec0255: ++ case 0x10ec0256: + case 0x10ec0257: + case 0x10ec0282: + case 0x10ec0283: +@@ -345,11 +347,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) + case 0x10ec0300: + alc_update_coef_idx(codec, 0x10, 1<<9, 0); + break; +- case 0x10ec0236: +- case 0x10ec0256: +- alc_write_coef_idx(codec, 0x36, 0x5757); +- alc_update_coef_idx(codec, 0x10, 1<<9, 0); +- break; + case 0x10ec0275: + alc_update_coef_idx(codec, 0xe, 0, 1<<0); + break; +@@ -3122,7 +3119,13 @@ static void alc256_init(struct hda_codec *codec) + alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ + alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */ + alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15); +- alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ ++ /* ++ * Expose headphone mic (or possibly Line In on some machines) instead ++ * of PC Beep on 1Ah, and disable 1Ah loopback for all outputs. See ++ * Documentation/sound/hd-audio/realtek-pc-beep.rst for details of ++ * this register. ++ */ ++ alc_write_coef_idx(codec, 0x36, 0x5757); + } + + static void alc256_shutup(struct hda_codec *codec) +diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c +index 2697402b5195..41f6450a2539 100644 +--- a/sound/pci/ice1712/prodigy_hifi.c ++++ b/sound/pci/ice1712/prodigy_hifi.c +@@ -569,7 +569,7 @@ static int wm_adc_mux_enum_get(struct snd_kcontrol *kcontrol, + struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); + + mutex_lock(&ice->gpio_mutex); +- ucontrol->value.integer.value[0] = wm_get(ice, WM_ADC_MUX) & 0x1f; ++ ucontrol->value.enumerated.item[0] = wm_get(ice, WM_ADC_MUX) & 0x1f; + mutex_unlock(&ice->gpio_mutex); + return 0; + } +@@ -583,7 +583,7 @@ static int wm_adc_mux_enum_put(struct snd_kcontrol *kcontrol, + + mutex_lock(&ice->gpio_mutex); + oval = wm_get(ice, WM_ADC_MUX); +- nval = (oval & 0xe0) | ucontrol->value.integer.value[0]; ++ nval = (oval & 0xe0) | ucontrol->value.enumerated.item[0]; + if (nval != oval) { + wm_put(ice, WM_ADC_MUX, nval); + change = 1; +diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c +index 0f3604b55942..6044b3bbb121 100644 +--- a/sound/soc/intel/atom/sst-atom-controls.c ++++ b/sound/soc/intel/atom/sst-atom-controls.c +@@ -1341,7 +1341,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute) + dai->capture_widget->name); + w = dai->capture_widget; + snd_soc_dapm_widget_for_each_source_path(w, p) { +- if (p->connected && !p->connected(w, p->sink)) ++ if (p->connected && !p->connected(w, p->source)) + continue; + + if (p->connect && p->source->power && +diff --git a/sound/soc/intel/atom/sst/sst_pci.c b/sound/soc/intel/atom/sst/sst_pci.c +index 6906ee624cf6..438c7bcd8c4c 100644 +--- a/sound/soc/intel/atom/sst/sst_pci.c ++++ b/sound/soc/intel/atom/sst/sst_pci.c +@@ -107,7 +107,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx) + dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram); + do_release_regions: + pci_release_regions(pci); +- return 0; ++ return ret; + } + + /* +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index fb2fef166672..7861cf7a4488 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -799,7 +799,13 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i, + val = max - val; + p->connect = !!val; + } else { +- p->connect = 0; ++ /* since a virtual mixer has no backing registers to ++ * decide which path to connect, it will try to match ++ * with initial state. This is to ensure ++ * that the default mixer choice will be ++ * correctly powered up during initialization. ++ */ ++ p->connect = invert; + } + } + +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c +index 500f98c730b9..d5ef627e93be 100644 +--- a/sound/soc/soc-ops.c ++++ b/sound/soc/soc-ops.c +@@ -837,7 +837,7 @@ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol, + unsigned int regbase = mc->regbase; + unsigned int regcount = mc->regcount; + unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; +- unsigned int regwmask = (1<<regwshift)-1; ++ unsigned int regwmask = (1UL<<regwshift)-1; + unsigned int invert = mc->invert; + unsigned long mask = (1UL<<mc->nbits)-1; + long min = mc->min; +@@ -886,7 +886,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, + unsigned int regbase = mc->regbase; + unsigned int regcount = mc->regcount; + unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; +- unsigned int regwmask = (1<<regwshift)-1; ++ unsigned int regwmask = (1UL<<regwshift)-1; + unsigned int invert = mc->invert; + unsigned long mask = (1UL<<mc->nbits)-1; + long max = mc->max; +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index e75822dd9930..fd4b71729eed 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -2048,7 +2048,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) && +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)) ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) && ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) + continue; + + ret = dpcm_do_trigger(dpcm, be_substream, cmd); +@@ -2078,7 +2079,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, + be->dpcm[stream].state = SND_SOC_DPCM_STATE_START; + break; + case SNDRV_PCM_TRIGGER_STOP: +- if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) ++ if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) && ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) + continue; + + if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream)) +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c +index 1a912f72bddd..a215b9ad148c 100644 +--- a/sound/soc/soc-topology.c ++++ b/sound/soc/soc-topology.c +@@ -421,7 +421,7 @@ static int soc_tplg_add_kcontrol(struct soc_tplg *tplg, + struct snd_soc_component *comp = tplg->comp; + + return soc_tplg_add_dcontrol(comp->card->snd_card, +- comp->dev, k, NULL, comp, kcontrol); ++ comp->dev, k, comp->name_prefix, comp, kcontrol); + } + + /* remove a mixer kcontrol */ +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index e6e4c3b9d9d3..7b75208d5cea 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -2342,7 +2342,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) + if (map->id == state.chip->usb_id) { + state.map = map->map; + state.selector_map = map->selector_map; +- mixer->ignore_ctl_error = map->ignore_ctl_error; ++ mixer->ignore_ctl_error |= map->ignore_ctl_error; + break; + } + } +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c +index eaa03acd4686..26ce6838e842 100644 +--- a/sound/usb/mixer_maps.c ++++ b/sound/usb/mixer_maps.c +@@ -363,6 +363,14 @@ static const struct usbmix_name_map dell_alc4020_map[] = { + { 0 } + }; + ++/* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX ++ * response for Input Gain Pad (id=19, control=12). Skip it. ++ */ ++static const struct usbmix_name_map asus_rog_map[] = { ++ { 19, NULL, 12 }, /* FU, Input Gain Pad */ ++ {} ++}; ++ + /* + * Control map entries + */ +@@ -482,6 +490,26 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { + .id = USB_ID(0x05a7, 0x1020), + .map = bose_companion5_map, + }, ++ { /* Gigabyte TRX40 Aorus Pro WiFi */ ++ .id = USB_ID(0x0414, 0xa002), ++ .map = asus_rog_map, ++ }, ++ { /* ASUS ROG Zenith II */ ++ .id = USB_ID(0x0b05, 0x1916), ++ .map = asus_rog_map, ++ }, ++ { /* ASUS ROG Strix */ ++ .id = USB_ID(0x0b05, 0x1917), ++ .map = asus_rog_map, ++ }, ++ { /* MSI TRX40 Creator */ ++ .id = USB_ID(0x0db0, 0x0d64), ++ .map = asus_rog_map, ++ }, ++ { /* MSI TRX40 */ ++ .id = USB_ID(0x0db0, 0x543d), ++ .map = asus_rog_map, ++ }, + { 0 } /* terminator */ + }; + +diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile +index 6a73c06e069c..3dbf7e8b07a5 100644 +--- a/tools/gpio/Makefile ++++ b/tools/gpio/Makefile +@@ -35,7 +35,7 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h + + prepare: $(OUTPUT)include/linux/gpio.h + +-GPIO_UTILS_IN := $(output)gpio-utils-in.o ++GPIO_UTILS_IN := $(OUTPUT)gpio-utils-in.o + $(GPIO_UTILS_IN): prepare FORCE + $(Q)$(MAKE) $(build)=gpio-utils + +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index 5422543faff8..ccd5319d1284 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -915,10 +915,7 @@ static struct rela *find_switch_table(struct objtool_file *file, + * it. + */ + for (; +- &insn->list != &file->insn_list && +- insn->sec == func->sec && +- insn->offset >= func->offset; +- ++ &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func; + insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { + + if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config +index b97e31498ff7..8baaf9797101 100644 +--- a/tools/perf/Makefile.config ++++ b/tools/perf/Makefile.config +@@ -179,8 +179,17 @@ strip-libs = $(filter-out -l%,$(1)) + + PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) + ++# Python 3.8 changed the output of `python-config --ldflags` to not include the ++# '-lpythonX.Y' flag unless '--embed' is also passed. The feature check for ++# libpython fails if that flag is not included in LDFLAGS ++ifeq ($(shell $(PYTHON_CONFIG_SQ) --ldflags --embed 2>&1 1>/dev/null; echo $$?), 0) ++ PYTHON_CONFIG_LDFLAGS := --ldflags --embed ++else ++ PYTHON_CONFIG_LDFLAGS := --ldflags ++endif ++ + ifdef PYTHON_CONFIG +- PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) ++ PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) $(PYTHON_CONFIG_LDFLAGS) 2>/dev/null) + PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) + PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil + PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) +diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c +index 6f22238f3217..12aaa063196e 100644 +--- a/tools/testing/selftests/x86/ptrace_syscall.c ++++ b/tools/testing/selftests/x86/ptrace_syscall.c +@@ -414,8 +414,12 @@ int main() + + #if defined(__i386__) && (!defined(__GLIBC__) || __GLIBC__ > 2 || __GLIBC_MINOR__ >= 16) + vsyscall32 = (void *)getauxval(AT_SYSINFO); +- printf("[RUN]\tCheck AT_SYSINFO return regs\n"); +- test_sys32_regs(do_full_vsyscall32); ++ if (vsyscall32) { ++ printf("[RUN]\tCheck AT_SYSINFO return regs\n"); ++ test_sys32_regs(do_full_vsyscall32); ++ } else { ++ printf("[SKIP]\tAT_SYSINFO is not available\n"); ++ } + #endif + + test_ptrace_syscall_restart(); |