diff options
author | Alice Ferrazzi <alicef@gentoo.org> | 2017-01-20 12:44:14 +0000 |
---|---|---|
committer | Alice Ferrazzi <alicef@gentoo.org> | 2017-01-20 12:44:14 +0000 |
commit | 33cb8d275a5991956a95fe233a4f172fce68fbe2 (patch) | |
tree | cdf59d6e46592e5ba773a559f2b845ba44ed5827 | |
parent | Linux patch 4.4.43 (diff) | |
download | linux-patches-4.4-48.tar.gz linux-patches-4.4-48.tar.bz2 linux-patches-4.4-48.zip |
Linux patch 4.4.444.4-48
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1043_linux-4.4.44.patch | 1545 |
2 files changed, 1549 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 9bc21938..751eb4c1 100644 --- a/0000_README +++ b/0000_README @@ -215,6 +215,10 @@ Patch: 1042_linux-4.4.43.patch From: http://www.kernel.org Desc: Linux 4.4.43 +Patch: 1043_linux-4.4.44.patch +From: http://www.kernel.org +Desc: Linux 4.4.44 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1043_linux-4.4.44.patch b/1043_linux-4.4.44.patch new file mode 100644 index 00000000..5f1b50be --- /dev/null +++ b/1043_linux-4.4.44.patch @@ -0,0 +1,1545 @@ +diff --git a/Makefile b/Makefile +index 04a2186a4276..d6a1de0e2bd7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 43 ++SUBLEVEL = 44 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c +index ac86c53e2542..e524a775fa5c 100644 +--- a/arch/powerpc/kernel/ibmebus.c ++++ b/arch/powerpc/kernel/ibmebus.c +@@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn) + static int ibmebus_create_devices(const struct of_device_id *matches) + { + struct device_node *root, *child; ++ struct device *dev; + int ret = 0; + + root = of_find_node_by_path("/"); +@@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches) + if (!of_match_node(matches, child)) + continue; + +- if (bus_find_device(&ibmebus_bus_type, NULL, child, +- ibmebus_match_node)) ++ dev = bus_find_device(&ibmebus_bus_type, NULL, child, ++ ibmebus_match_node); ++ if (dev) { ++ put_device(dev); + continue; ++ } + + ret = ibmebus_create_device(child); + if (ret) { +@@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, + const char *buf, size_t count) + { + struct device_node *dn = NULL; ++ struct device *dev; + char *path; + ssize_t rc = 0; + +@@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, + if (!path) + return -ENOMEM; + +- if (bus_find_device(&ibmebus_bus_type, NULL, path, +- ibmebus_match_path)) { ++ dev = bus_find_device(&ibmebus_bus_type, NULL, path, ++ ibmebus_match_path); ++ if (dev) { ++ put_device(dev); + printk(KERN_WARNING "%s: %s has already been probed\n", + __func__, path); + rc = -EEXIST; +@@ -307,6 +314,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, + if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path, + ibmebus_match_path))) { + of_device_unregister(to_platform_device(dev)); ++ put_device(dev); + + kfree(path); + return count; +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 2b49b113d65d..637ca414d431 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1129,7 +1129,7 @@ static __init int setup_disablecpuid(char *arg) + { + int bit; + +- if (get_option(&arg, &bit) && bit < NCAPINTS*32) ++ if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32) + setup_clear_cpu_cap(bit); + else + return 0; +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index f49e98062ea5..1dcea225977d 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -172,6 +172,7 @@ + #define NearBranch ((u64)1 << 52) /* Near branches */ + #define No16 ((u64)1 << 53) /* No 16 bit operand */ + #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */ ++#define Aligned16 ((u64)1 << 55) /* Aligned to 16 byte boundary (e.g. FXSAVE) */ + + #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) + +@@ -434,6 +435,26 @@ FOP_END; + FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET + FOP_END; + ++/* ++ * XXX: inoutclob user must know where the argument is being expanded. ++ * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault. ++ */ ++#define asm_safe(insn, inoutclob...) \ ++({ \ ++ int _fault = 0; \ ++ \ ++ asm volatile("1:" insn "\n" \ ++ "2:\n" \ ++ ".pushsection .fixup, \"ax\"\n" \ ++ "3: movl $1, %[_fault]\n" \ ++ " jmp 2b\n" \ ++ ".popsection\n" \ ++ _ASM_EXTABLE(1b, 3b) \ ++ : [_fault] "+qm"(_fault) inoutclob ); \ ++ \ ++ _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \ ++}) ++ + static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, + enum x86_intercept intercept, + enum x86_intercept_stage stage) +@@ -620,21 +641,24 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, + * depending on whether they're AVX encoded or not. + * + * Also included is CMPXCHG16B which is not a vector instruction, yet it is +- * subject to the same check. ++ * subject to the same check. FXSAVE and FXRSTOR are checked here too as their ++ * 512 bytes of data must be aligned to a 16 byte boundary. + */ +-static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) ++static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) + { + if (likely(size < 16)) +- return false; ++ return 1; + + if (ctxt->d & Aligned) +- return true; ++ return size; + else if (ctxt->d & Unaligned) +- return false; ++ return 1; + else if (ctxt->d & Avx) +- return false; ++ return 1; ++ else if (ctxt->d & Aligned16) ++ return 16; + else +- return true; ++ return size; + } + + static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, +@@ -692,7 +716,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, + } + break; + } +- if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) ++ if (la & (insn_alignment(ctxt, size) - 1)) + return emulate_gp(ctxt, 0); + return X86EMUL_CONTINUE; + bad: +@@ -779,6 +803,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); + } + ++static int segmented_write_std(struct x86_emulate_ctxt *ctxt, ++ struct segmented_address addr, ++ void *data, ++ unsigned int size) ++{ ++ int rc; ++ ulong linear; ++ ++ rc = linearize(ctxt, addr, size, true, &linear); ++ if (rc != X86EMUL_CONTINUE) ++ return rc; ++ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception); ++} ++ + /* + * Prefetch the remaining bytes of the instruction without crossing page + * boundary if they are not in fetch_cache yet. +@@ -1532,7 +1570,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, + &ctxt->exception); + } + +-/* Does not support long mode */ + static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + u16 selector, int seg, u8 cpl, + enum x86_transfer_type transfer, +@@ -1569,20 +1606,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + + rpl = selector & 3; + +- /* NULL selector is not valid for TR, CS and SS (except for long mode) */ +- if ((seg == VCPU_SREG_CS +- || (seg == VCPU_SREG_SS +- && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) +- || seg == VCPU_SREG_TR) +- && null_selector) +- goto exception; +- + /* TR should be in GDT only */ + if (seg == VCPU_SREG_TR && (selector & (1 << 2))) + goto exception; + +- if (null_selector) /* for NULL selector skip all following checks */ ++ /* NULL selector is not valid for TR, CS and (except for long mode) SS */ ++ if (null_selector) { ++ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR) ++ goto exception; ++ ++ if (seg == VCPU_SREG_SS) { ++ if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl) ++ goto exception; ++ ++ /* ++ * ctxt->ops->set_segment expects the CPL to be in ++ * SS.DPL, so fake an expand-up 32-bit data segment. ++ */ ++ seg_desc.type = 3; ++ seg_desc.p = 1; ++ seg_desc.s = 1; ++ seg_desc.dpl = cpl; ++ seg_desc.d = 1; ++ seg_desc.g = 1; ++ } ++ ++ /* Skip all following checks */ + goto load; ++ } + + ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); + if (ret != X86EMUL_CONTINUE) +@@ -1698,6 +1749,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + u16 selector, int seg) + { + u8 cpl = ctxt->ops->cpl(ctxt); ++ ++ /* ++ * None of MOV, POP and LSS can load a NULL selector in CPL=3, but ++ * they can load it at CPL<3 (Intel's manual says only LSS can, ++ * but it's wrong). ++ * ++ * However, the Intel manual says that putting IST=1/DPL=3 in ++ * an interrupt gate will result in SS=3 (the AMD manual instead ++ * says it doesn't), so allow SS=3 in __load_segment_descriptor ++ * and only forbid it here. ++ */ ++ if (seg == VCPU_SREG_SS && selector == 3 && ++ ctxt->mode == X86EMUL_MODE_PROT64) ++ return emulate_exception(ctxt, GP_VECTOR, 0, true); ++ + return __load_segment_descriptor(ctxt, selector, seg, cpl, + X86_TRANSFER_NONE, NULL); + } +@@ -3646,8 +3712,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, + } + /* Disable writeback. */ + ctxt->dst.type = OP_NONE; +- return segmented_write(ctxt, ctxt->dst.addr.mem, +- &desc_ptr, 2 + ctxt->op_bytes); ++ return segmented_write_std(ctxt, ctxt->dst.addr.mem, ++ &desc_ptr, 2 + ctxt->op_bytes); + } + + static int em_sgdt(struct x86_emulate_ctxt *ctxt) +@@ -3830,6 +3896,131 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt) + return X86EMUL_CONTINUE; + } + ++static int check_fxsr(struct x86_emulate_ctxt *ctxt) ++{ ++ u32 eax = 1, ebx, ecx = 0, edx; ++ ++ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); ++ if (!(edx & FFL(FXSR))) ++ return emulate_ud(ctxt); ++ ++ if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) ++ return emulate_nm(ctxt); ++ ++ /* ++ * Don't emulate a case that should never be hit, instead of working ++ * around a lack of fxsave64/fxrstor64 on old compilers. ++ */ ++ if (ctxt->mode >= X86EMUL_MODE_PROT64) ++ return X86EMUL_UNHANDLEABLE; ++ ++ return X86EMUL_CONTINUE; ++} ++ ++/* ++ * FXSAVE and FXRSTOR have 4 different formats depending on execution mode, ++ * 1) 16 bit mode ++ * 2) 32 bit mode ++ * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs ++ * preserve whole 32 bit values, though, so (1) and (2) are the same wrt. ++ * save and restore ++ * 3) 64-bit mode with REX.W prefix ++ * - like (2), but XMM 8-15 are being saved and restored ++ * 4) 64-bit mode without REX.W prefix ++ * - like (3), but FIP and FDP are 64 bit ++ * ++ * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the ++ * desired result. (4) is not emulated. ++ * ++ * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS ++ * and FPU DS) should match. ++ */ ++static int em_fxsave(struct x86_emulate_ctxt *ctxt) ++{ ++ struct fxregs_state fx_state; ++ size_t size; ++ int rc; ++ ++ rc = check_fxsr(ctxt); ++ if (rc != X86EMUL_CONTINUE) ++ return rc; ++ ++ ctxt->ops->get_fpu(ctxt); ++ ++ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); ++ ++ ctxt->ops->put_fpu(ctxt); ++ ++ if (rc != X86EMUL_CONTINUE) ++ return rc; ++ ++ if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR) ++ size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]); ++ else ++ size = offsetof(struct fxregs_state, xmm_space[0]); ++ ++ return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); ++} ++ ++static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt, ++ struct fxregs_state *new) ++{ ++ int rc = X86EMUL_CONTINUE; ++ struct fxregs_state old; ++ ++ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old)); ++ if (rc != X86EMUL_CONTINUE) ++ return rc; ++ ++ /* ++ * 64 bit host will restore XMM 8-15, which is not correct on non-64 ++ * bit guests. Load the current values in order to preserve 64 bit ++ * XMMs after fxrstor. ++ */ ++#ifdef CONFIG_X86_64 ++ /* XXX: accessing XMM 8-15 very awkwardly */ ++ memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16); ++#endif ++ ++ /* ++ * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but ++ * does save and restore MXCSR. ++ */ ++ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)) ++ memcpy(new->xmm_space, old.xmm_space, 8 * 16); ++ ++ return rc; ++} ++ ++static int em_fxrstor(struct x86_emulate_ctxt *ctxt) ++{ ++ struct fxregs_state fx_state; ++ int rc; ++ ++ rc = check_fxsr(ctxt); ++ if (rc != X86EMUL_CONTINUE) ++ return rc; ++ ++ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512); ++ if (rc != X86EMUL_CONTINUE) ++ return rc; ++ ++ if (fx_state.mxcsr >> 16) ++ return emulate_gp(ctxt, 0); ++ ++ ctxt->ops->get_fpu(ctxt); ++ ++ if (ctxt->mode < X86EMUL_MODE_PROT64) ++ rc = fxrstor_fixup(ctxt, &fx_state); ++ ++ if (rc == X86EMUL_CONTINUE) ++ rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); ++ ++ ctxt->ops->put_fpu(ctxt); ++ ++ return rc; ++} ++ + static bool valid_cr(int nr) + { + switch (nr) { +@@ -4182,7 +4373,9 @@ static const struct gprefix pfx_0f_ae_7 = { + }; + + static const struct group_dual group15 = { { +- N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7), ++ I(ModRM | Aligned16, em_fxsave), ++ I(ModRM | Aligned16, em_fxrstor), ++ N, N, N, N, N, GP(0, &pfx_0f_ae_7), + }, { + N, N, N, N, N, N, N, N, + } }; +@@ -5054,21 +5247,13 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) + + static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) + { +- bool fault = false; ++ int rc; + + ctxt->ops->get_fpu(ctxt); +- asm volatile("1: fwait \n\t" +- "2: \n\t" +- ".pushsection .fixup,\"ax\" \n\t" +- "3: \n\t" +- "movb $1, %[fault] \n\t" +- "jmp 2b \n\t" +- ".popsection \n\t" +- _ASM_EXTABLE(1b, 3b) +- : [fault]"+qm"(fault)); ++ rc = asm_safe("fwait"); + ctxt->ops->put_fpu(ctxt); + +- if (unlikely(fault)) ++ if (unlikely(rc != X86EMUL_CONTINUE)) + return emulate_exception(ctxt, MF_VECTOR, 0, false); + + return X86EMUL_CONTINUE; +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 4d30b865be30..1c96f09367ae 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -2187,3 +2187,9 @@ void kvm_lapic_init(void) + jump_label_rate_limit(&apic_hw_disabled, HZ); + jump_label_rate_limit(&apic_sw_disabled, HZ); + } ++ ++void kvm_lapic_exit(void) ++{ ++ static_key_deferred_flush(&apic_hw_disabled); ++ static_key_deferred_flush(&apic_sw_disabled); ++} +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h +index fde8e35d5850..eb418fd670ff 100644 +--- a/arch/x86/kvm/lapic.h ++++ b/arch/x86/kvm/lapic.h +@@ -95,6 +95,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) + + int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); + void kvm_lapic_init(void); ++void kvm_lapic_exit(void); + + static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off) + { +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index b3c2ae7aa213..25a6efcfdf7f 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -5842,6 +5842,7 @@ out: + + void kvm_arch_exit(void) + { ++ kvm_lapic_exit(); + perf_unregister_guest_info_callbacks(&kvm_guest_cbs); + + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) +diff --git a/block/blk-mq.c b/block/blk-mq.c +index 9f99a01b00e8..6cfc6b200366 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -842,7 +842,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) + return WORK_CPU_UNBOUND; + + if (--hctx->next_cpu_batch <= 0) { +- int cpu = hctx->next_cpu, next_cpu; ++ int next_cpu; + + next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); + if (next_cpu >= nr_cpu_ids) +@@ -850,8 +850,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) + + hctx->next_cpu = next_cpu; + hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; +- +- return cpu; + } + + return hctx->next_cpu; +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c +index 3ad307ee6029..e04a7b8492cf 100644 +--- a/block/cfq-iosched.c ++++ b/block/cfq-iosched.c +@@ -1572,7 +1572,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp) + { + struct cfq_group_data *cgd; + +- cgd = kzalloc(sizeof(*cgd), GFP_KERNEL); ++ cgd = kzalloc(sizeof(*cgd), gfp); + if (!cgd) + return NULL; + return &cgd->cpd; +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c +index 8f8da9f92090..eac4f3b02df9 100644 +--- a/drivers/acpi/apei/ghes.c ++++ b/drivers/acpi/apei/ghes.c +@@ -847,6 +847,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) + if (ghes_read_estatus(ghes, 1)) { + ghes_clear_estatus(ghes); + continue; ++ } else { ++ ret = NMI_HANDLED; + } + + sev = ghes_severity(ghes->estatus->error_severity); +@@ -858,12 +860,11 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) + + __process_error(ghes); + ghes_clear_estatus(ghes); +- +- ret = NMI_HANDLED; + } + + #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG +- irq_work_queue(&ghes_proc_irq_work); ++ if (ret == NMI_HANDLED) ++ irq_work_queue(&ghes_proc_irq_work); + #endif + atomic_dec(&ghes_in_nmi); + return ret; +diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c +index 6575c0fe6a4e..27ea64fa4f9b 100644 +--- a/drivers/bus/vexpress-config.c ++++ b/drivers/bus/vexpress-config.c +@@ -171,6 +171,7 @@ static int vexpress_config_populate(struct device_node *node) + { + struct device_node *bridge; + struct device *parent; ++ int ret; + + bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0); + if (!bridge) +@@ -181,7 +182,11 @@ static int vexpress_config_populate(struct device_node *node) + if (WARN_ON(!parent)) + return -ENODEV; + +- return of_platform_populate(node, NULL, NULL, parent); ++ ret = of_platform_populate(node, NULL, NULL, parent); ++ ++ put_device(parent); ++ ++ return ret; + } + + static int __init vexpress_config_init(void) +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c +index cb501386eb6e..c4b0ef65988c 100644 +--- a/drivers/cpufreq/powernv-cpufreq.c ++++ b/drivers/cpufreq/powernv-cpufreq.c +@@ -373,8 +373,14 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, + if (unlikely(rebooting) && new_index != get_nominal_index()) + return 0; + +- if (!throttled) ++ if (!throttled) { ++ /* we don't want to be preempted while ++ * checking if the CPU frequency has been throttled ++ */ ++ preempt_disable(); + powernv_cpufreq_throttle_check(NULL); ++ preempt_enable(); ++ } + + freq_data.pstate_id = powernv_freqs[new_index].driver_data; + +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c +index eb2a2a49974f..b6f16804e73b 100644 +--- a/drivers/gpu/drm/radeon/si_dpm.c ++++ b/drivers/gpu/drm/radeon/si_dpm.c +@@ -3008,19 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, + (rdev->pdev->device == 0x6817) || + (rdev->pdev->device == 0x6806)) + max_mclk = 120000; +- } else if (rdev->family == CHIP_VERDE) { +- if ((rdev->pdev->revision == 0x81) || +- (rdev->pdev->revision == 0x83) || +- (rdev->pdev->revision == 0x87) || +- (rdev->pdev->device == 0x6820) || +- (rdev->pdev->device == 0x6821) || +- (rdev->pdev->device == 0x6822) || +- (rdev->pdev->device == 0x6823) || +- (rdev->pdev->device == 0x682A) || +- (rdev->pdev->device == 0x682B)) { +- max_sclk = 75000; +- max_mclk = 80000; +- } + } else if (rdev->family == CHIP_OLAND) { + if ((rdev->pdev->revision == 0xC7) || + (rdev->pdev->revision == 0x80) || +diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c +index d625167357cc..e4587411b447 100644 +--- a/drivers/i2c/i2c-core.c ++++ b/drivers/i2c/i2c-core.c +@@ -1400,7 +1400,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, + + if (i2c_check_addr_validity(addr, info.flags)) { + dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n", +- info.addr, node->full_name); ++ addr, node->full_name); + return ERR_PTR(-EINVAL); + } + +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index 2413ec9f8207..94c837046786 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -329,7 +329,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, + unsigned long arg) + { + struct i2c_smbus_ioctl_data data_arg; +- union i2c_smbus_data temp; ++ union i2c_smbus_data temp = {}; + int datasize, res; + + if (copy_from_user(&data_arg, +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index aff42d5e2296..16f000a76de5 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -1238,6 +1238,12 @@ static int xpad_init_input(struct usb_xpad *xpad) + input_dev->name = xpad->name; + input_dev->phys = xpad->phys; + usb_to_input_id(xpad->udev, &input_dev->id); ++ ++ if (xpad->xtype == XTYPE_XBOX360W) { ++ /* x360w controllers and the receiver have different ids */ ++ input_dev->id.product = 0x02a1; ++ } ++ + input_dev->dev.parent = &xpad->intf->dev; + + input_set_drvdata(input_dev, xpad); +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 073246c7d163..0cdd95801a25 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c +index ac09855fa435..486f8fe242da 100644 +--- a/drivers/input/touchscreen/elants_i2c.c ++++ b/drivers/input/touchscreen/elants_i2c.c +@@ -905,9 +905,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev) + + case QUEUE_HEADER_NORMAL: + report_count = ts->buf[FW_HDR_COUNT]; +- if (report_count > 3) { ++ if (report_count == 0 || report_count > 3) { + dev_err(&client->dev, +- "too large report count: %*ph\n", ++ "bad report count: %*ph\n", + HEADER_SIZE, ts->buf); + break; + } +diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c +index 84943e4cff09..13730ca151ad 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson.c ++++ b/drivers/pinctrl/meson/pinctrl-meson.c +@@ -246,7 +246,7 @@ static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev, + { + struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev); + +- meson_pmx_disable_other_groups(pc, range->pin_base + offset, -1); ++ meson_pmx_disable_other_groups(pc, offset, -1); + + return 0; + } +diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c +index 863c3e30ce05..50f2014fed55 100644 +--- a/drivers/pinctrl/sh-pfc/pinctrl.c ++++ b/drivers/pinctrl/sh-pfc/pinctrl.c +@@ -483,7 +483,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin, + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: +- return true; ++ return pin->configs & ++ (SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN); + + case PIN_CONFIG_BIAS_PULL_UP: + return pin->configs & SH_PFC_PIN_CFG_PULL_UP; +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index e0b89b961e1b..a0f911641b04 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -470,6 +470,14 @@ static void atmel_stop_tx(struct uart_port *port) + /* disable PDC transmit */ + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); + } ++ ++ /* ++ * Disable the transmitter. ++ * This is mandatory when DMA is used, otherwise the DMA buffer ++ * is fully transmitted. ++ */ ++ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); ++ + /* Disable interrupts */ + atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); + +@@ -502,6 +510,9 @@ static void atmel_start_tx(struct uart_port *port) + + /* Enable interrupts */ + atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); ++ ++ /* re-enable the transmitter */ ++ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); + } + + /* +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c +index 5381a728d23e..1fa4128eb88e 100644 +--- a/drivers/tty/sysrq.c ++++ b/drivers/tty/sysrq.c +@@ -939,8 +939,8 @@ static const struct input_device_id sysrq_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_KEYBIT, +- .evbit = { BIT_MASK(EV_KEY) }, +- .keybit = { BIT_MASK(KEY_LEFTALT) }, ++ .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) }, ++ .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) }, + }, + { }, + }; +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index fa5d8c2f6982..de7dce6eb474 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -913,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) + spin_lock_irqsave(&xhci->lock, flags); + + ep->stop_cmds_pending--; +- if (xhci->xhc_state & XHCI_STATE_REMOVING) { +- spin_unlock_irqrestore(&xhci->lock, flags); +- return; +- } +- if (xhci->xhc_state & XHCI_STATE_DYING) { +- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, +- "Stop EP timer ran, but another timer marked " +- "xHCI as DYING, exiting."); +- spin_unlock_irqrestore(&xhci->lock, flags); +- return; +- } + if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Stop EP timer ran, but no command pending, " +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index a8b3c0fc11fb..f2e9f59c90d6 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1569,19 +1569,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) + xhci_urb_free_priv(urb_priv); + return ret; + } +- if ((xhci->xhc_state & XHCI_STATE_DYING) || +- (xhci->xhc_state & XHCI_STATE_HALTED)) { +- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, +- "Ep 0x%x: URB %p to be canceled on " +- "non-responsive xHCI host.", +- urb->ep->desc.bEndpointAddress, urb); +- /* Let the stop endpoint command watchdog timer (which set this +- * state) finish cleaning up the endpoint TD lists. We must +- * have caught it in the middle of dropping a lock and giving +- * back an URB. +- */ +- goto done; +- } + + ep_index = xhci_get_endpoint_index(&urb->ep->desc); + ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c +index c73808f095bb..71133d96f97d 100644 +--- a/drivers/usb/serial/ch341.c ++++ b/drivers/usb/serial/ch341.c +@@ -99,6 +99,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request, + r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, + value, index, NULL, 0, DEFAULT_TIMEOUT); ++ if (r < 0) ++ dev_err(&dev->dev, "failed to send control message: %d\n", r); + + return r; + } +@@ -116,7 +118,20 @@ static int ch341_control_in(struct usb_device *dev, + r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, + value, index, buf, bufsize, DEFAULT_TIMEOUT); +- return r; ++ if (r < bufsize) { ++ if (r >= 0) { ++ dev_err(&dev->dev, ++ "short control message received (%d < %u)\n", ++ r, bufsize); ++ r = -EIO; ++ } ++ ++ dev_err(&dev->dev, "failed to receive control message: %d\n", ++ r); ++ return r; ++ } ++ ++ return 0; + } + + static int ch341_set_baudrate(struct usb_device *dev, +@@ -158,9 +173,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control) + + static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) + { ++ const unsigned int size = 2; + char *buffer; + int r; +- const unsigned size = 8; + unsigned long flags; + + buffer = kmalloc(size, GFP_KERNEL); +@@ -171,14 +186,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) + if (r < 0) + goto out; + +- /* setup the private status if available */ +- if (r == 2) { +- r = 0; +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; +- spin_unlock_irqrestore(&priv->lock, flags); +- } else +- r = -EPROTO; ++ spin_lock_irqsave(&priv->lock, flags); ++ priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; ++ spin_unlock_irqrestore(&priv->lock, flags); + + out: kfree(buffer); + return r; +@@ -188,9 +198,9 @@ out: kfree(buffer); + + static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) + { ++ const unsigned int size = 2; + char *buffer; + int r; +- const unsigned size = 8; + + buffer = kmalloc(size, GFP_KERNEL); + if (!buffer) +@@ -253,7 +263,6 @@ static int ch341_port_probe(struct usb_serial_port *port) + + spin_lock_init(&priv->lock); + priv->baud_rate = DEFAULT_BAUD_RATE; +- priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; + + r = ch341_configure(port->serial->dev, priv); + if (r < 0) +@@ -315,7 +324,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) + + r = ch341_configure(serial->dev, priv); + if (r) +- goto out; ++ return r; + + if (tty) + ch341_set_termios(tty, port, NULL); +@@ -325,12 +334,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) + if (r) { + dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n", + __func__, r); +- goto out; ++ return r; + } + + r = usb_serial_generic_open(tty, port); ++ if (r) ++ goto err_kill_interrupt_urb; + +-out: return r; ++ return 0; ++ ++err_kill_interrupt_urb: ++ usb_kill_urb(port->interrupt_in_urb); ++ ++ return r; + } + + /* Old_termios contains the original termios settings and +@@ -345,26 +361,25 @@ static void ch341_set_termios(struct tty_struct *tty, + + baud_rate = tty_get_baud_rate(tty); + +- priv->baud_rate = baud_rate; +- + if (baud_rate) { +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); +- spin_unlock_irqrestore(&priv->lock, flags); ++ priv->baud_rate = baud_rate; + ch341_set_baudrate(port->serial->dev, priv); +- } else { +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); +- spin_unlock_irqrestore(&priv->lock, flags); + } + +- ch341_set_handshake(port->serial->dev, priv->line_control); +- + /* Unimplemented: + * (cflag & CSIZE) : data bits [5, 8] + * (cflag & PARENB) : parity {NONE, EVEN, ODD} + * (cflag & CSTOPB) : stop bits [1, 2] + */ ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ if (C_BAUD(tty) == B0) ++ priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); ++ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) ++ priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); ++ spin_unlock_irqrestore(&priv->lock, flags); ++ ++ ch341_set_handshake(port->serial->dev, priv->line_control); + } + + static void ch341_break_ctl(struct tty_struct *tty, int break_state) +@@ -539,14 +554,23 @@ static int ch341_tiocmget(struct tty_struct *tty) + + static int ch341_reset_resume(struct usb_serial *serial) + { +- struct ch341_private *priv; +- +- priv = usb_get_serial_port_data(serial->port[0]); ++ struct usb_serial_port *port = serial->port[0]; ++ struct ch341_private *priv = usb_get_serial_port_data(port); ++ int ret; + + /* reconfigure ch341 serial port after bus-reset */ + ch341_configure(serial->dev, priv); + +- return 0; ++ if (test_bit(ASYNCB_INITIALIZED, &port->port.flags)) { ++ ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); ++ if (ret) { ++ dev_err(&port->dev, "failed to submit interrupt urb: %d\n", ++ ret); ++ return ret; ++ } ++ } ++ ++ return usb_serial_generic_resume(serial); + } + + static struct usb_serial_driver ch341_device = { +diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c +index cd0322502ab1..83c823d32ff9 100644 +--- a/drivers/usb/serial/kl5kusb105.c ++++ b/drivers/usb/serial/kl5kusb105.c +@@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port, + status_buf, KLSI_STATUSBUF_LEN, + 10000 + ); +- if (rc < 0) +- dev_err(&port->dev, "Reading line status failed (error = %d)\n", +- rc); +- else { ++ if (rc != KLSI_STATUSBUF_LEN) { ++ dev_err(&port->dev, "reading line status failed: %d\n", rc); ++ if (rc >= 0) ++ rc = -EIO; ++ } else { + status = get_unaligned_le16(status_buf); + + dev_info(&port->serial->dev->dev, "read status %x %x\n", +diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c +index b79a74a98a23..ad94d8a45728 100644 +--- a/drivers/vme/bridges/vme_ca91cx42.c ++++ b/drivers/vme/bridges/vme_ca91cx42.c +@@ -467,7 +467,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled, + vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]); + pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]); + +- *pci_base = (dma_addr_t)vme_base + pci_offset; ++ *pci_base = (dma_addr_t)*vme_base + pci_offset; + *size = (unsigned long long)((vme_bound - *vme_base) + granularity); + + *enabled = 0; +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 2af08c3de775..2a2e370399ba 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -2520,11 +2520,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, + if (ref && ref->seq && + btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { + spin_unlock(&locked_ref->lock); +- btrfs_delayed_ref_unlock(locked_ref); + spin_lock(&delayed_refs->lock); + locked_ref->processing = 0; + delayed_refs->num_heads_ready++; + spin_unlock(&delayed_refs->lock); ++ btrfs_delayed_ref_unlock(locked_ref); + locked_ref = NULL; + cond_resched(); + count++; +@@ -2570,7 +2570,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, + */ + if (must_insert_reserved) + locked_ref->must_insert_reserved = 1; ++ spin_lock(&delayed_refs->lock); + locked_ref->processing = 0; ++ delayed_refs->num_heads_ready++; ++ spin_unlock(&delayed_refs->lock); + btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); + btrfs_delayed_ref_unlock(locked_ref); + return ret; +diff --git a/fs/dcache.c b/fs/dcache.c +index 71b6056ad35d..849c1c1e787b 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1322,8 +1322,11 @@ int d_set_mounted(struct dentry *dentry) + } + spin_lock(&dentry->d_lock); + if (!d_unlinked(dentry)) { +- dentry->d_flags |= DCACHE_MOUNTED; +- ret = 0; ++ ret = -EBUSY; ++ if (!d_mountpoint(dentry)) { ++ dentry->d_flags |= DCACHE_MOUNTED; ++ ret = 0; ++ } + } + spin_unlock(&dentry->d_lock); + out: +diff --git a/fs/namespace.c b/fs/namespace.c +index 5be02a0635be..da98a1bbd8b5 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -743,26 +743,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry) + return NULL; + } + +-static struct mountpoint *new_mountpoint(struct dentry *dentry) ++static struct mountpoint *get_mountpoint(struct dentry *dentry) + { +- struct hlist_head *chain = mp_hash(dentry); +- struct mountpoint *mp; ++ struct mountpoint *mp, *new = NULL; + int ret; + +- mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); +- if (!mp) ++ if (d_mountpoint(dentry)) { ++mountpoint: ++ read_seqlock_excl(&mount_lock); ++ mp = lookup_mountpoint(dentry); ++ read_sequnlock_excl(&mount_lock); ++ if (mp) ++ goto done; ++ } ++ ++ if (!new) ++ new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); ++ if (!new) + return ERR_PTR(-ENOMEM); + ++ ++ /* Exactly one processes may set d_mounted */ + ret = d_set_mounted(dentry); +- if (ret) { +- kfree(mp); +- return ERR_PTR(ret); +- } + +- mp->m_dentry = dentry; +- mp->m_count = 1; +- hlist_add_head(&mp->m_hash, chain); +- INIT_HLIST_HEAD(&mp->m_list); ++ /* Someone else set d_mounted? */ ++ if (ret == -EBUSY) ++ goto mountpoint; ++ ++ /* The dentry is not available as a mountpoint? */ ++ mp = ERR_PTR(ret); ++ if (ret) ++ goto done; ++ ++ /* Add the new mountpoint to the hash table */ ++ read_seqlock_excl(&mount_lock); ++ new->m_dentry = dentry; ++ new->m_count = 1; ++ hlist_add_head(&new->m_hash, mp_hash(dentry)); ++ INIT_HLIST_HEAD(&new->m_list); ++ read_sequnlock_excl(&mount_lock); ++ ++ mp = new; ++ new = NULL; ++done: ++ kfree(new); + return mp; + } + +@@ -1557,11 +1581,11 @@ void __detach_mounts(struct dentry *dentry) + struct mount *mnt; + + namespace_lock(); ++ lock_mount_hash(); + mp = lookup_mountpoint(dentry); + if (IS_ERR_OR_NULL(mp)) + goto out_unlock; + +- lock_mount_hash(); + event++; + while (!hlist_empty(&mp->m_list)) { + mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); +@@ -1571,9 +1595,9 @@ void __detach_mounts(struct dentry *dentry) + } + else umount_tree(mnt, UMOUNT_CONNECTED); + } +- unlock_mount_hash(); + put_mountpoint(mp); + out_unlock: ++ unlock_mount_hash(); + namespace_unlock(); + } + +@@ -1962,9 +1986,7 @@ retry: + namespace_lock(); + mnt = lookup_mnt(path); + if (likely(!mnt)) { +- struct mountpoint *mp = lookup_mountpoint(dentry); +- if (!mp) +- mp = new_mountpoint(dentry); ++ struct mountpoint *mp = get_mountpoint(dentry); + if (IS_ERR(mp)) { + namespace_unlock(); + mutex_unlock(&dentry->d_inode->i_mutex); +@@ -1983,7 +2005,11 @@ retry: + static void unlock_mount(struct mountpoint *where) + { + struct dentry *dentry = where->m_dentry; ++ ++ read_seqlock_excl(&mount_lock); + put_mountpoint(where); ++ read_sequnlock_excl(&mount_lock); ++ + namespace_unlock(); + mutex_unlock(&dentry->d_inode->i_mutex); + } +@@ -3055,9 +3081,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, + touch_mnt_namespace(current->nsproxy->mnt_ns); + /* A moved mount should not expire automatically */ + list_del_init(&new_mnt->mnt_expire); ++ put_mountpoint(root_mp); + unlock_mount_hash(); + chroot_fs_refs(&root, &new); +- put_mountpoint(root_mp); + error = 0; + out4: + unlock_mount(old_mp); +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index 46cfed63d229..52ee0b73ab4a 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -462,7 +462,7 @@ void nfs_force_use_readdirplus(struct inode *dir) + { + if (!list_empty(&NFS_I(dir)->open_files)) { + nfs_advise_use_readdirplus(dir); +- nfs_zap_mapping(dir, dir->i_mapping); ++ invalidate_mapping_pages(dir->i_mapping, 0, -1); + } + } + +@@ -847,17 +847,6 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc) + goto out; + } + +-static bool nfs_dir_mapping_need_revalidate(struct inode *dir) +-{ +- struct nfs_inode *nfsi = NFS_I(dir); +- +- if (nfs_attribute_cache_expired(dir)) +- return true; +- if (nfsi->cache_validity & NFS_INO_INVALID_DATA) +- return true; +- return false; +-} +- + /* The file offset position represents the dirent entry number. A + last cookie cache takes care of the common case of reading the + whole directory. +@@ -890,7 +879,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) + desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0; + + nfs_block_sillyrename(dentry); +- if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode)) ++ if (ctx->pos == 0 || nfs_attribute_cache_expired(inode)) + res = nfs_revalidate_mapping(inode, file->f_mapping); + if (res < 0) + goto out; +diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c +index 4946ef40ba87..85ef38f9765f 100644 +--- a/fs/nfs/filelayout/filelayoutdev.c ++++ b/fs/nfs/filelayout/filelayoutdev.c +@@ -283,7 +283,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) + s->nfs_client->cl_rpcclient->cl_auth->au_flavor); + + out_test_devid: +- if (filelayout_test_devid_unavailable(devid)) ++ if (ret->ds_clp == NULL || ++ filelayout_test_devid_unavailable(devid)) + ret = NULL; + out: + return ret; +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index 5cd3568eea06..3cae0726c1b1 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -1185,13 +1185,11 @@ bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) + * i_lock */ + spin_lock(&ino->i_lock); + lo = nfsi->layout; +- if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) ++ if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { ++ rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); + sleep = true; ++ } + spin_unlock(&ino->i_lock); +- +- if (sleep) +- rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); +- + return sleep; + } + +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c +index b002acf50203..60a5f1548cd9 100644 +--- a/fs/ocfs2/dlmglue.c ++++ b/fs/ocfs2/dlmglue.c +@@ -3321,6 +3321,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, + mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, + lockres->l_level, new_level); + ++ /* ++ * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always ++ * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that ++ * we can recover correctly from node failure. Otherwise, we may get ++ * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set. ++ */ ++ if (!ocfs2_is_o2cb_active() && ++ lockres->l_ops->flags & LOCK_TYPE_USES_LVB) ++ lvb = 1; ++ + if (lvb) + dlm_flags |= DLM_LKF_VALBLK; + +diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c +index 5d965e83bd43..783bcdce5666 100644 +--- a/fs/ocfs2/stackglue.c ++++ b/fs/ocfs2/stackglue.c +@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; + */ + static struct ocfs2_stack_plugin *active_stack; + ++inline int ocfs2_is_o2cb_active(void) ++{ ++ return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB); ++} ++EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active); ++ + static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) + { + struct ocfs2_stack_plugin *p; +diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h +index 66334a30cea8..e1b30931974d 100644 +--- a/fs/ocfs2/stackglue.h ++++ b/fs/ocfs2/stackglue.h +@@ -298,4 +298,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p + int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); + void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); + ++/* In ocfs2_downconvert_lock(), we need to know which stack we are using */ ++int ocfs2_is_o2cb_active(void); ++ + #endif /* STACKGLUE_H */ +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c +index fe5b6e6c4671..4dbe1e2daeca 100644 +--- a/fs/proc/proc_sysctl.c ++++ b/fs/proc/proc_sysctl.c +@@ -703,7 +703,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx) + ctl_dir = container_of(head, struct ctl_dir, header); + + if (!dir_emit_dots(file, ctx)) +- return 0; ++ goto out; + + pos = 2; + +@@ -713,6 +713,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx) + break; + } + } ++out: + sysctl_head_finish(head); + return 0; + } +diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h +index 089f70f83e97..23da3af459fe 100644 +--- a/include/linux/jump_label_ratelimit.h ++++ b/include/linux/jump_label_ratelimit.h +@@ -14,6 +14,7 @@ struct static_key_deferred { + + #ifdef HAVE_JUMP_LABEL + extern void static_key_slow_dec_deferred(struct static_key_deferred *key); ++extern void static_key_deferred_flush(struct static_key_deferred *key); + extern void + jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); + +@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) + STATIC_KEY_CHECK_USE(); + static_key_slow_dec(&key->key); + } ++static inline void static_key_deferred_flush(struct static_key_deferred *key) ++{ ++ STATIC_KEY_CHECK_USE(); ++} + static inline void + jump_label_rate_limit(struct static_key_deferred *key, + unsigned long rl) +diff --git a/kernel/jump_label.c b/kernel/jump_label.c +index 4b353e0be121..453ec4232852 100644 +--- a/kernel/jump_label.c ++++ b/kernel/jump_label.c +@@ -138,6 +138,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key) + } + EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); + ++void static_key_deferred_flush(struct static_key_deferred *key) ++{ ++ STATIC_KEY_CHECK_USE(); ++ flush_delayed_work(&key->work); ++} ++EXPORT_SYMBOL_GPL(static_key_deferred_flush); ++ + void jump_label_rate_limit(struct static_key_deferred *key, + unsigned long rl) + { +diff --git a/kernel/memremap.c b/kernel/memremap.c +index 25ced161ebeb..f719c925cb54 100644 +--- a/kernel/memremap.c ++++ b/kernel/memremap.c +@@ -159,7 +159,9 @@ static void devm_memremap_pages_release(struct device *dev, void *res) + struct page_map *page_map = res; + + /* pages are dead and unused, undo the arch mapping */ ++ mem_hotplug_begin(); + arch_remove_memory(page_map->res.start, resource_size(&page_map->res)); ++ mem_hotplug_done(); + } + + void *devm_memremap_pages(struct device *dev, struct resource *res) +@@ -189,7 +191,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res) + if (nid < 0) + nid = numa_mem_id(); + ++ mem_hotplug_begin(); + error = arch_add_memory(nid, res->start, resource_size(res), true); ++ mem_hotplug_done(); + if (error) { + devres_free(page_map); + return ERR_PTR(error); +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 4434cdd4cd9a..ea11123a9249 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1723,23 +1723,32 @@ free: + } + + /* +- * When releasing a hugetlb pool reservation, any surplus pages that were +- * allocated to satisfy the reservation must be explicitly freed if they were +- * never used. +- * Called with hugetlb_lock held. ++ * This routine has two main purposes: ++ * 1) Decrement the reservation count (resv_huge_pages) by the value passed ++ * in unused_resv_pages. This corresponds to the prior adjustments made ++ * to the associated reservation map. ++ * 2) Free any unused surplus pages that may have been allocated to satisfy ++ * the reservation. As many as unused_resv_pages may be freed. ++ * ++ * Called with hugetlb_lock held. However, the lock could be dropped (and ++ * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, ++ * we must make sure nobody else can claim pages we are in the process of ++ * freeing. Do this by ensuring resv_huge_page always is greater than the ++ * number of huge pages we plan to free when dropping the lock. + */ + static void return_unused_surplus_pages(struct hstate *h, + unsigned long unused_resv_pages) + { + unsigned long nr_pages; + +- /* Uncommit the reservation */ +- h->resv_huge_pages -= unused_resv_pages; +- + /* Cannot return gigantic pages currently */ + if (hstate_is_gigantic(h)) +- return; ++ goto out; + ++ /* ++ * Part (or even all) of the reservation could have been backed ++ * by pre-allocated pages. Only free surplus pages. ++ */ + nr_pages = min(unused_resv_pages, h->surplus_huge_pages); + + /* +@@ -1749,12 +1758,22 @@ static void return_unused_surplus_pages(struct hstate *h, + * when the nodes with surplus pages have no free pages. + * free_pool_huge_page() will balance the the freed pages across the + * on-line nodes with memory and will handle the hstate accounting. ++ * ++ * Note that we decrement resv_huge_pages as we free the pages. If ++ * we drop the lock, resv_huge_pages will still be sufficiently large ++ * to cover subsequent pages we may free. + */ + while (nr_pages--) { ++ h->resv_huge_pages--; ++ unused_resv_pages--; + if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) +- break; ++ goto out; + cond_resched_lock(&hugetlb_lock); + } ++ ++out: ++ /* Fully uncommit the reservation */ ++ h->resv_huge_pages -= unused_resv_pages; + } + + +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index bf65f31bd55e..1f0de6d74daa 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -13168,13 +13168,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb, + + list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { + bool schedule_destroy_work = false; +- bool schedule_scan_stop = false; + struct cfg80211_sched_scan_request *sched_scan_req = + rcu_dereference(rdev->sched_scan_req); + + if (sched_scan_req && notify->portid && +- sched_scan_req->owner_nlportid == notify->portid) +- schedule_scan_stop = true; ++ sched_scan_req->owner_nlportid == notify->portid) { ++ sched_scan_req->owner_nlportid = 0; ++ ++ if (rdev->ops->sched_scan_stop && ++ rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ++ schedule_work(&rdev->sched_scan_stop_wk); ++ } + + list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) { + cfg80211_mlme_unregister_socket(wdev, notify->portid); +@@ -13205,12 +13209,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb, + spin_unlock(&rdev->destroy_list_lock); + schedule_work(&rdev->destroy_work); + } +- } else if (schedule_scan_stop) { +- sched_scan_req->owner_nlportid = 0; +- +- if (rdev->ops->sched_scan_stop && +- rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) +- schedule_work(&rdev->sched_scan_stop_wk); + } + } + +diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile +index c8edff6803d1..24ebd3e3eb7d 100644 +--- a/tools/testing/selftests/Makefile ++++ b/tools/testing/selftests/Makefile +@@ -83,7 +83,7 @@ ifdef INSTALL_PATH + done; + + @# Ask all targets to emit their test scripts +- echo "#!/bin/bash" > $(ALL_SCRIPT) ++ echo "#!/bin/sh" > $(ALL_SCRIPT) + echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT) + echo "ROOT=\$$PWD" >> $(ALL_SCRIPT) + +diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests +index c09a682df56a..16058bbea7a8 100755 +--- a/tools/testing/selftests/net/run_netsocktests ++++ b/tools/testing/selftests/net/run_netsocktests +@@ -1,4 +1,4 @@ +-#!/bin/bash ++#!/bin/sh + + echo "--------------------" + echo "running socket test" +diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c +index 09a03b5a21ff..e5d5dde6bf75 100644 +--- a/virt/lib/irqbypass.c ++++ b/virt/lib/irqbypass.c +@@ -188,7 +188,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer) + mutex_lock(&lock); + + list_for_each_entry(tmp, &consumers, node) { +- if (tmp->token == consumer->token) { ++ if (tmp->token == consumer->token || tmp == consumer) { + mutex_unlock(&lock); + module_put(THIS_MODULE); + return -EBUSY; +@@ -235,7 +235,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer) + mutex_lock(&lock); + + list_for_each_entry(tmp, &consumers, node) { +- if (tmp->token != consumer->token) ++ if (tmp != consumer) + continue; + + list_for_each_entry(producer, &producers, node) { |