summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2021-07-28 08:39:37 -0400
committerMike Pagano <mpagano@gentoo.org>2021-07-28 08:39:37 -0400
commita9625f49cc19be85b2641f04db485c8b9250ac2a (patch)
tree78268c695103db29dc5a3ce3ae6d2efeb2442495
parentLinux patch 4.4.276 (diff)
downloadlinux-patches-a9625f49cc19be85b2641f04db485c8b9250ac2a.tar.gz
linux-patches-a9625f49cc19be85b2641f04db485c8b9250ac2a.tar.bz2
linux-patches-a9625f49cc19be85b2641f04db485c8b9250ac2a.zip
Linux patch 4.4.2774.4-279
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1276_linux-4.4.277.patch1791
2 files changed, 1795 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 145c6d9c..a79b6ce3 100644
--- a/0000_README
+++ b/0000_README
@@ -1147,6 +1147,10 @@ Patch: 1275_linux-4.4.276.patch
From: http://www.kernel.org
Desc: Linux 4.4.276
+Patch: 1276_linux-4.4.277.patch
+From: http://www.kernel.org
+Desc: Linux 4.4.277
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1276_linux-4.4.277.patch b/1276_linux-4.4.277.patch
new file mode 100644
index 00000000..f486a3c8
--- /dev/null
+++ b/1276_linux-4.4.277.patch
@@ -0,0 +1,1791 @@
+diff --git a/Makefile b/Makefile
+index 8855bdd51f81f..6a486a5d614bd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 276
++SUBLEVEL = 277
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+
+diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
+index 34cd640512509..84efc3d16f585 100644
+--- a/arch/arm/boot/dts/bcm63138.dtsi
++++ b/arch/arm/boot/dts/bcm63138.dtsi
+@@ -152,7 +152,7 @@
+ status = "disabled";
+ };
+
+- nand: nand@2000 {
++ nand_controller: nand-controller@2000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.0", "brcm,brcmnand";
+diff --git a/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts b/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts
+index 0bb8d17e4c2d0..e51c9b079432a 100644
+--- a/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts
++++ b/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts
+@@ -13,10 +13,10 @@
+ };
+ };
+
+-&nand {
++&nand_controller {
+ status = "okay";
+
+- nandcs@1 {
++ nand@1 {
+ compatible = "brcm,nandcs";
+ reg = <1>;
+ nand-ecc-step-size = <512>;
+diff --git a/arch/arm/boot/dts/bcm7445.dtsi b/arch/arm/boot/dts/bcm7445.dtsi
+index 4791321969b3f..3f002f2047f18 100644
+--- a/arch/arm/boot/dts/bcm7445.dtsi
++++ b/arch/arm/boot/dts/bcm7445.dtsi
+@@ -149,7 +149,7 @@
+ reg-names = "aon-ctrl", "aon-sram";
+ };
+
+- nand: nand@3e2800 {
++ nand_controller: nand-controller@3e2800 {
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/bcm963138dvt.dts b/arch/arm/boot/dts/bcm963138dvt.dts
+index 370aa2cfddf20..439cff69e948f 100644
+--- a/arch/arm/boot/dts/bcm963138dvt.dts
++++ b/arch/arm/boot/dts/bcm963138dvt.dts
+@@ -29,10 +29,10 @@
+ status = "okay";
+ };
+
+-&nand {
++&nand_controller {
+ status = "okay";
+
+- nandcs@0 {
++ nand@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ nand-ecc-strength = <4>;
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+index cae04e8060362..e3e3a7a08d087 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+@@ -307,8 +307,8 @@
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D30__UART3_RTS_B 0x1b0b1
+- MX6QDL_PAD_EIM_D31__UART3_CTS_B 0x1b0b1
++ MX6QDL_PAD_EIM_D31__UART3_RTS_B 0x1b0b1
++ MX6QDL_PAD_EIM_D30__UART3_CTS_B 0x1b0b1
+ >;
+ };
+
+@@ -383,6 +383,7 @@
+ &uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
++ uart-has-rtscts;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/mach-imx/suspend-imx53.S b/arch/arm/mach-imx/suspend-imx53.S
+index 5ed078ad110aa..f12d24104075b 100644
+--- a/arch/arm/mach-imx/suspend-imx53.S
++++ b/arch/arm/mach-imx/suspend-imx53.S
+@@ -33,11 +33,11 @@
+ * ^
+ * ^
+ * imx53_suspend code
+- * PM_INFO structure(imx53_suspend_info)
++ * PM_INFO structure(imx5_cpu_suspend_info)
+ * ======================== low address =======================
+ */
+
+-/* Offsets of members of struct imx53_suspend_info */
++/* Offsets of members of struct imx5_cpu_suspend_info */
+ #define SUSPEND_INFO_MX53_M4IF_V_OFFSET 0x0
+ #define SUSPEND_INFO_MX53_IOMUXC_V_OFFSET 0x4
+ #define SUSPEND_INFO_MX53_IO_COUNT_OFFSET 0x8
+diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
+index b1b2273d1f6d3..308744830f55d 100644
+--- a/arch/powerpc/kvm/book3s_rtas.c
++++ b/arch/powerpc/kvm/book3s_rtas.c
+@@ -230,6 +230,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
+ * value so we can restore it on the way out.
+ */
+ orig_rets = args.rets;
++ if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) {
++ /*
++ * Don't overflow our args array: ensure there is room for
++ * at least rets[0] (even if the call specifies 0 nret).
++ *
++ * Each handler must then check for the correct nargs and nret
++ * values, but they may always return failure in rets[0].
++ */
++ rc = -EINVAL;
++ goto fail;
++ }
+ args.rets = &args.args[be32_to_cpu(args.nargs)];
+
+ mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
+@@ -257,9 +268,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
+ fail:
+ /*
+ * We only get here if the guest has called RTAS with a bogus
+- * args pointer. That means we can't get to the args, and so we
+- * can't fail the RTAS call. So fail right out to userspace,
+- * which should kill the guest.
++ * args pointer or nargs/nret values that would overflow the
++ * array. That means we can't get to the args, and so we can't
++ * fail the RTAS call. So fail right out to userspace, which
++ * should kill the guest.
++ *
++ * SLOF should actually pass the hcall return value from the
++ * rtas handler call in r3, so enter_rtas could be modified to
++ * return a failure indication in r3 and we could return such
++ * errors to the guest rather than failing to host userspace.
++ * However old guests that don't test for failure could then
++ * continue silently after errors, so for now we won't do this.
+ */
+ return rc;
+ }
+diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
+index 836c56290499b..6dd874d5ba7bf 100644
+--- a/arch/s390/include/asm/ftrace.h
++++ b/arch/s390/include/asm/ftrace.h
+@@ -19,6 +19,7 @@ void ftrace_caller(void);
+
+ extern char ftrace_graph_caller_end;
+ extern unsigned long ftrace_plt;
++extern void *ftrace_func;
+
+ struct dyn_arch_ftrace { };
+
+diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
+index e0eaf11134b44..6617fae13bd38 100644
+--- a/arch/s390/kernel/ftrace.c
++++ b/arch/s390/kernel/ftrace.c
+@@ -55,6 +55,7 @@
+ * > brasl %r0,ftrace_caller # offset 0
+ */
+
++void *ftrace_func __read_mostly = ftrace_stub;
+ unsigned long ftrace_plt;
+
+ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
+@@ -164,6 +165,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+
+ int ftrace_update_ftrace_func(ftrace_func_t func)
+ {
++ ftrace_func = func;
+ return 0;
+ }
+
+diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
+index 68425e68e65a2..9eb55077896ca 100644
+--- a/arch/s390/kernel/mcount.S
++++ b/arch/s390/kernel/mcount.S
+@@ -56,13 +56,13 @@ ENTRY(ftrace_caller)
+ #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ aghik %r2,%r0,-MCOUNT_INSN_SIZE
+ lgrl %r4,function_trace_op
+- lgrl %r1,ftrace_trace_function
++ lgrl %r1,ftrace_func
+ #else
+ lgr %r2,%r0
+ aghi %r2,-MCOUNT_INSN_SIZE
+ larl %r4,function_trace_op
+ lg %r4,0(%r4)
+- larl %r1,ftrace_trace_function
++ larl %r1,ftrace_func
+ lg %r1,0(%r1)
+ #endif
+ lgr %r3,%r14
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index bcf409997d6dc..c5c3056f4c4a4 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -115,7 +115,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
+ {
+ u32 r1 = reg2hex[b1];
+
+- if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
++ if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
+ jit->seen_reg[r1] = 1;
+ }
+
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 66a5e60f60c41..4fb38927128c4 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -217,6 +217,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+ }
+ }
+
++static inline void fxsave(struct fxregs_state *fx)
++{
++ if (IS_ENABLED(CONFIG_X86_32))
++ asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
++ else
++ asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
++}
++
+ /* These macros all use (%edi)/(%rdi) as the single memory argument. */
+ #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
+ #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
+@@ -286,28 +294,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
+ : "memory")
+
+-/*
+- * This function is called only during boot time when x86 caps are not set
+- * up and alternative can not be used yet.
+- */
+-static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
+-{
+- u64 mask = -1;
+- u32 lmask = mask;
+- u32 hmask = mask >> 32;
+- int err;
+-
+- WARN_ON(system_state != SYSTEM_BOOTING);
+-
+- if (static_cpu_has(X86_FEATURE_XSAVES))
+- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
+- else
+- XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
+-
+- /* We should never fault when copying to a kernel buffer: */
+- WARN_ON_FPU(err);
+-}
+-
+ /*
+ * This function is called only during boot time when x86 caps are not set
+ * up and alternative can not be used yet.
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 3fa200ecca623..1ff1adbc843bb 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -292,6 +292,23 @@ static void __init setup_xstate_comp(void)
+ }
+ }
+
++/*
++ * All supported features have either init state all zeros or are
++ * handled in setup_init_fpu() individually. This is an explicit
++ * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
++ * newly added supported features at build time and make people
++ * actually look at the init state for the new feature.
++ */
++#define XFEATURES_INIT_FPSTATE_HANDLED \
++ (XFEATURE_MASK_FP | \
++ XFEATURE_MASK_SSE | \
++ XFEATURE_MASK_YMM | \
++ XFEATURE_MASK_OPMASK | \
++ XFEATURE_MASK_ZMM_Hi256 | \
++ XFEATURE_MASK_Hi16_ZMM | \
++ XFEATURE_MASK_BNDREGS | \
++ XFEATURE_MASK_BNDCSR)
++
+ /*
+ * setup the xstate image representing the init state
+ */
+@@ -299,6 +316,8 @@ static void __init setup_init_fpu_buf(void)
+ {
+ static int on_boot_cpu = 1;
+
++ BUILD_BUG_ON(XCNTXT_MASK != XFEATURES_INIT_FPSTATE_HANDLED);
++
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
+
+@@ -319,10 +338,22 @@ static void __init setup_init_fpu_buf(void)
+ copy_kernel_to_xregs_booting(&init_fpstate.xsave);
+
+ /*
+- * Dump the init state again. This is to identify the init state
+- * of any feature which is not represented by all zero's.
++ * All components are now in init state. Read the state back so
++ * that init_fpstate contains all non-zero init state. This only
++ * works with XSAVE, but not with XSAVEOPT and XSAVES because
++ * those use the init optimization which skips writing data for
++ * components in init state.
++ *
++ * XSAVE could be used, but that would require to reshuffle the
++ * data when XSAVES is available because XSAVES uses xstate
++ * compaction. But doing so is a pointless exercise because most
++ * components have an all zeros init state except for the legacy
++ * ones (FP and SSE). Those can be saved with FXSAVE into the
++ * legacy area. Adding new features requires to ensure that init
++ * state is all zeroes or if not to add the necessary handling
++ * here.
+ */
+- copy_xregs_to_kernel_booting(&init_fpstate.xsave);
++ fxsave(&init_fpstate.fxsave);
+ }
+
+ static int xfeature_is_supervisor(int xfeature_nr)
+diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
+index 68c9e5478fec8..057c9df500d33 100644
+--- a/drivers/iio/accel/bma180.c
++++ b/drivers/iio/accel/bma180.c
+@@ -49,7 +49,7 @@ struct bma180_part_info {
+
+ u8 int_reset_reg, int_reset_mask;
+ u8 sleep_reg, sleep_mask;
+- u8 bw_reg, bw_mask;
++ u8 bw_reg, bw_mask, bw_offset;
+ u8 scale_reg, scale_mask;
+ u8 power_reg, power_mask, lowpower_val;
+ u8 int_enable_reg, int_enable_mask;
+@@ -105,6 +105,7 @@ struct bma180_part_info {
+
+ #define BMA250_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
+ #define BMA250_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
++#define BMA250_BW_OFFSET 8
+ #define BMA250_SUSPEND_MASK BIT(7) /* chip will sleep */
+ #define BMA250_LOWPOWER_MASK BIT(6)
+ #define BMA250_DATA_INTEN_MASK BIT(4)
+@@ -242,7 +243,8 @@ static int bma180_set_bw(struct bma180_data *data, int val)
+ for (i = 0; i < data->part_info->num_bw; ++i) {
+ if (data->part_info->bw_table[i] == val) {
+ ret = bma180_set_bits(data, data->part_info->bw_reg,
+- data->part_info->bw_mask, i);
++ data->part_info->bw_mask,
++ i + data->part_info->bw_offset);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set bandwidth\n");
+@@ -624,32 +626,53 @@ static const struct iio_chan_spec bma250_channels[] = {
+
+ static const struct bma180_part_info bma180_part_info[] = {
+ [BMA180] = {
+- bma180_channels, ARRAY_SIZE(bma180_channels),
+- bma180_scale_table, ARRAY_SIZE(bma180_scale_table),
+- bma180_bw_table, ARRAY_SIZE(bma180_bw_table),
+- BMA180_CTRL_REG0, BMA180_RESET_INT,
+- BMA180_CTRL_REG0, BMA180_SLEEP,
+- BMA180_BW_TCS, BMA180_BW,
+- BMA180_OFFSET_LSB1, BMA180_RANGE,
+- BMA180_TCO_Z, BMA180_MODE_CONFIG, BMA180_LOW_POWER,
+- BMA180_CTRL_REG3, BMA180_NEW_DATA_INT,
+- BMA180_RESET,
+- bma180_chip_config,
+- bma180_chip_disable,
++ .channels = bma180_channels,
++ .num_channels = ARRAY_SIZE(bma180_channels),
++ .scale_table = bma180_scale_table,
++ .num_scales = ARRAY_SIZE(bma180_scale_table),
++ .bw_table = bma180_bw_table,
++ .num_bw = ARRAY_SIZE(bma180_bw_table),
++ .int_reset_reg = BMA180_CTRL_REG0,
++ .int_reset_mask = BMA180_RESET_INT,
++ .sleep_reg = BMA180_CTRL_REG0,
++ .sleep_mask = BMA180_SLEEP,
++ .bw_reg = BMA180_BW_TCS,
++ .bw_mask = BMA180_BW,
++ .scale_reg = BMA180_OFFSET_LSB1,
++ .scale_mask = BMA180_RANGE,
++ .power_reg = BMA180_TCO_Z,
++ .power_mask = BMA180_MODE_CONFIG,
++ .lowpower_val = BMA180_LOW_POWER,
++ .int_enable_reg = BMA180_CTRL_REG3,
++ .int_enable_mask = BMA180_NEW_DATA_INT,
++ .softreset_reg = BMA180_RESET,
++ .chip_config = bma180_chip_config,
++ .chip_disable = bma180_chip_disable,
+ },
+ [BMA250] = {
+- bma250_channels, ARRAY_SIZE(bma250_channels),
+- bma250_scale_table, ARRAY_SIZE(bma250_scale_table),
+- bma250_bw_table, ARRAY_SIZE(bma250_bw_table),
+- BMA250_INT_RESET_REG, BMA250_INT_RESET_MASK,
+- BMA250_POWER_REG, BMA250_SUSPEND_MASK,
+- BMA250_BW_REG, BMA250_BW_MASK,
+- BMA250_RANGE_REG, BMA250_RANGE_MASK,
+- BMA250_POWER_REG, BMA250_LOWPOWER_MASK, 1,
+- BMA250_INT_ENABLE_REG, BMA250_DATA_INTEN_MASK,
+- BMA250_RESET_REG,
+- bma250_chip_config,
+- bma250_chip_disable,
++ .channels = bma250_channels,
++ .num_channels = ARRAY_SIZE(bma250_channels),
++ .scale_table = bma250_scale_table,
++ .num_scales = ARRAY_SIZE(bma250_scale_table),
++ .bw_table = bma250_bw_table,
++ .num_bw = ARRAY_SIZE(bma250_bw_table),
++ .int_reset_reg = BMA250_INT_RESET_REG,
++ .int_reset_mask = BMA250_INT_RESET_MASK,
++ .sleep_reg = BMA250_POWER_REG,
++ .sleep_mask = BMA250_SUSPEND_MASK,
++ .bw_reg = BMA250_BW_REG,
++ .bw_mask = BMA250_BW_MASK,
++ .bw_offset = BMA250_BW_OFFSET,
++ .scale_reg = BMA250_RANGE_REG,
++ .scale_mask = BMA250_RANGE_MASK,
++ .power_reg = BMA250_POWER_REG,
++ .power_mask = BMA250_LOWPOWER_MASK,
++ .lowpower_val = 1,
++ .int_enable_reg = BMA250_INT_ENABLE_REG,
++ .int_enable_mask = BMA250_DATA_INTEN_MASK,
++ .softreset_reg = BMA250_RESET_REG,
++ .chip_config = bma250_chip_config,
++ .chip_disable = bma250_chip_disable,
+ },
+ };
+
+diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
+index 1b92d836a564f..f0b9899008777 100644
+--- a/drivers/media/pci/ngene/ngene-core.c
++++ b/drivers/media/pci/ngene/ngene-core.c
+@@ -402,7 +402,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
+
+ com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER;
+ com.cmd.hdr.Length = 6;
+- memcpy(&com.cmd.ConfigureBuffers.config, config, 6);
++ memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6);
+ com.in_len = 6;
+ com.out_len = 0;
+
+diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
+index fa30930d70477..da154c4065459 100644
+--- a/drivers/media/pci/ngene/ngene.h
++++ b/drivers/media/pci/ngene/ngene.h
+@@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS {
+
+ struct FW_CONFIGURE_FREE_BUFFERS {
+ struct FW_HEADER hdr;
+- u8 UVI1_BufferLength;
+- u8 UVI2_BufferLength;
+- u8 TVO_BufferLength;
+- u8 AUD1_BufferLength;
+- u8 AUD2_BufferLength;
+- u8 TVA_BufferLength;
++ struct {
++ u8 UVI1_BufferLength;
++ u8 UVI2_BufferLength;
++ u8 TVO_BufferLength;
++ u8 AUD1_BufferLength;
++ u8 AUD2_BufferLength;
++ u8 TVA_BufferLength;
++ } __packed config;
+ } __attribute__ ((__packed__));
+
+ struct FW_CONFIGURE_UART {
+diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
+index 65b984d64350b..26b37ba4feda6 100644
+--- a/drivers/memory/fsl_ifc.c
++++ b/drivers/memory/fsl_ifc.c
+@@ -228,7 +228,8 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
+ fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
+ if (!fsl_ifc_ctrl_dev->regs) {
+ dev_err(&dev->dev, "failed to get memory region\n");
+- return -ENODEV;
++ ret = -ENODEV;
++ goto err;
+ }
+
+ version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
+@@ -305,7 +306,6 @@ err_irq:
+ free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
+ err:
+- iounmap(fsl_ifc_ctrl_dev->gregs);
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 3a6cebff9f426..a1f9f68575f44 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1094,7 +1094,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
+ switch (mode) {
+ case GENET_POWER_PASSIVE:
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
+- EXT_PWR_DOWN_BIAS);
++ EXT_PWR_DOWN_BIAS | EXT_ENERGY_DET_MASK);
+ /* fallthrough */
+ case GENET_POWER_CABLE_SENSE:
+ /* enable APD */
+@@ -2663,15 +2663,21 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
+ /* Returns a reusable dma control register value */
+ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+ {
++ unsigned int i;
+ u32 reg;
+ u32 dma_ctrl;
+
+ /* disable DMA */
+ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
++ for (i = 0; i < priv->hw_params->tx_queues; i++)
++ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
++ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
++ for (i = 0; i < priv->hw_params->rx_queues; i++)
++ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+@@ -2902,12 +2908,6 @@ static int bcmgenet_open(struct net_device *dev)
+
+ bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+- if (priv->internal_phy) {
+- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+- reg |= EXT_ENERGY_DET_MASK;
+- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+- }
+-
+ /* Disable RX/TX DMA and flush TX queues */
+ dma_ctrl = bcmgenet_dma_disable(priv);
+
+@@ -3595,7 +3595,6 @@ static int bcmgenet_resume(struct device *d)
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long dma_ctrl;
+ int ret;
+- u32 reg;
+
+ if (!netif_running(dev))
+ return 0;
+@@ -3630,12 +3629,6 @@ static int bcmgenet_resume(struct device *d)
+
+ bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+- if (priv->internal_phy) {
+- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+- reg |= EXT_ENERGY_DET_MASK;
+- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+- }
+-
+ if (priv->wolopts)
+ bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+index b97122926d3aa..df107ed672206 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+@@ -167,12 +167,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ reg |= CMD_RX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+- if (priv->hw_params->flags & GENET_HAS_EXT) {
+- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+- reg &= ~EXT_ENERGY_DET_MASK;
+- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+- }
+-
+ /* Enable the MPD interrupt */
+ cpu_mask_clear = UMAC_IRQ_MPD_R;
+
+diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+index 5f03ab3dfa191..8fdbc24b3cba9 100644
+--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
++++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+@@ -2503,6 +2503,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ err_ioremap:
+ free_netdev(netdev);
+ err_alloc_etherdev:
++ pci_disable_pcie_error_reporting(pdev);
+ pci_release_regions(pdev);
+ err_pci_reg:
+ err_dma:
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 682f527608987..6ccbf21547d03 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -945,6 +945,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
+ **/
+ static int igb_request_msix(struct igb_adapter *adapter)
+ {
++ unsigned int num_q_vectors = adapter->num_q_vectors;
+ struct net_device *netdev = adapter->netdev;
+ int i, err = 0, vector = 0, free_vector = 0;
+
+@@ -953,7 +954,13 @@ static int igb_request_msix(struct igb_adapter *adapter)
+ if (err)
+ goto err_out;
+
+- for (i = 0; i < adapter->num_q_vectors; i++) {
++ if (num_q_vectors > MAX_Q_VECTORS) {
++ num_q_vectors = MAX_Q_VECTORS;
++ dev_warn(&adapter->pdev->dev,
++ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
++ adapter->num_q_vectors, MAX_Q_VECTORS);
++ }
++ for (i = 0; i < num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+
+ vector++;
+diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
+index 374e691b11da6..295b5176bcf1e 100644
+--- a/drivers/net/ethernet/moxa/moxart_ether.c
++++ b/drivers/net/ethernet/moxa/moxart_ether.c
+@@ -518,10 +518,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ret = register_netdev(ndev);
+- if (ret) {
+- free_netdev(ndev);
++ if (ret)
+ goto init_fail;
+- }
+
+ netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
+ __func__, ndev->irq, ndev->dev_addr);
+diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
+index 399a89f30826e..bc1638b0073ff 100644
+--- a/drivers/net/ethernet/ti/tlan.c
++++ b/drivers/net/ethernet/ti/tlan.c
+@@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev)
+ pci_release_regions(pdev);
+ #endif
+
+- free_netdev(dev);
+-
+ cancel_work_sync(&priv->tlan_tqueue);
++ free_netdev(dev);
+ }
+
+ static void tlan_start(struct net_device *dev)
+diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
+index def3208dd2905..9b5832b46deca 100644
+--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
++++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
+@@ -500,7 +500,7 @@ ahc_inq(struct ahc_softc *ahc, u_int port)
+ return ((ahc_inb(ahc, port))
+ | (ahc_inb(ahc, port+1) << 8)
+ | (ahc_inb(ahc, port+2) << 16)
+- | (ahc_inb(ahc, port+3) << 24)
++ | (((uint64_t)ahc_inb(ahc, port+3)) << 24)
+ | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
+ | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
+ | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index e0159e6a10652..39d03300d3d9a 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -427,39 +427,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
+ struct iscsi_transport *t = iface->transport;
+- int param;
+- int param_type;
++ int param = -1;
+
+ if (attr == &dev_attr_iface_enabled.attr)
+ param = ISCSI_NET_PARAM_IFACE_ENABLE;
+- else if (attr == &dev_attr_iface_vlan_id.attr)
+- param = ISCSI_NET_PARAM_VLAN_ID;
+- else if (attr == &dev_attr_iface_vlan_priority.attr)
+- param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+- else if (attr == &dev_attr_iface_vlan_enabled.attr)
+- param = ISCSI_NET_PARAM_VLAN_ENABLED;
+- else if (attr == &dev_attr_iface_mtu.attr)
+- param = ISCSI_NET_PARAM_MTU;
+- else if (attr == &dev_attr_iface_port.attr)
+- param = ISCSI_NET_PARAM_PORT;
+- else if (attr == &dev_attr_iface_ipaddress_state.attr)
+- param = ISCSI_NET_PARAM_IPADDR_STATE;
+- else if (attr == &dev_attr_iface_delayed_ack_en.attr)
+- param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
+- else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
+- param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
+- else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
+- param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
+- else if (attr == &dev_attr_iface_tcp_wsf.attr)
+- param = ISCSI_NET_PARAM_TCP_WSF;
+- else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
+- param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
+- else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
+- param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
+- else if (attr == &dev_attr_iface_cache_id.attr)
+- param = ISCSI_NET_PARAM_CACHE_ID;
+- else if (attr == &dev_attr_iface_redirect_en.attr)
+- param = ISCSI_NET_PARAM_REDIRECT_EN;
+ else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
+ param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
+ else if (attr == &dev_attr_iface_header_digest.attr)
+@@ -496,6 +467,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+ param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
+ else if (attr == &dev_attr_iface_initiator_name.attr)
+ param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
++
++ if (param != -1)
++ return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
++
++ if (attr == &dev_attr_iface_vlan_id.attr)
++ param = ISCSI_NET_PARAM_VLAN_ID;
++ else if (attr == &dev_attr_iface_vlan_priority.attr)
++ param = ISCSI_NET_PARAM_VLAN_PRIORITY;
++ else if (attr == &dev_attr_iface_vlan_enabled.attr)
++ param = ISCSI_NET_PARAM_VLAN_ENABLED;
++ else if (attr == &dev_attr_iface_mtu.attr)
++ param = ISCSI_NET_PARAM_MTU;
++ else if (attr == &dev_attr_iface_port.attr)
++ param = ISCSI_NET_PARAM_PORT;
++ else if (attr == &dev_attr_iface_ipaddress_state.attr)
++ param = ISCSI_NET_PARAM_IPADDR_STATE;
++ else if (attr == &dev_attr_iface_delayed_ack_en.attr)
++ param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
++ else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
++ param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
++ else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
++ param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
++ else if (attr == &dev_attr_iface_tcp_wsf.attr)
++ param = ISCSI_NET_PARAM_TCP_WSF;
++ else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
++ param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
++ else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
++ param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
++ else if (attr == &dev_attr_iface_cache_id.attr)
++ param = ISCSI_NET_PARAM_CACHE_ID;
++ else if (attr == &dev_attr_iface_redirect_en.attr)
++ param = ISCSI_NET_PARAM_REDIRECT_EN;
+ else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
+ param = ISCSI_NET_PARAM_IPV4_ADDR;
+@@ -586,32 +589,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+ return 0;
+ }
+
+- switch (param) {
+- case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+- case ISCSI_IFACE_PARAM_HDRDGST_EN:
+- case ISCSI_IFACE_PARAM_DATADGST_EN:
+- case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+- case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+- case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+- case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+- case ISCSI_IFACE_PARAM_ERL:
+- case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+- case ISCSI_IFACE_PARAM_FIRST_BURST:
+- case ISCSI_IFACE_PARAM_MAX_R2T:
+- case ISCSI_IFACE_PARAM_MAX_BURST:
+- case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+- case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+- case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+- case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+- case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+- case ISCSI_IFACE_PARAM_INITIATOR_NAME:
+- param_type = ISCSI_IFACE_PARAM;
+- break;
+- default:
+- param_type = ISCSI_NET_PARAM;
+- }
+-
+- return t->attr_is_visible(param_type, param);
++ return t->attr_is_visible(ISCSI_NET_PARAM, param);
+ }
+
+ static struct attribute *iscsi_iface_attrs[] = {
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 6081178193661..a2ffa10e5a411 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -37,7 +37,7 @@
+ #include "target_core_alua.h"
+
+ static sense_reason_t
+-sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
++sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool);
+ static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
+
+ static sense_reason_t
+@@ -311,14 +311,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+ }
+
+ static sense_reason_t
+-sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
++sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
+ {
+ struct se_device *dev = cmd->se_dev;
+ sector_t end_lba = dev->transport->get_blocks(dev) + 1;
+ unsigned int sectors = sbc_get_write_same_sectors(cmd);
+ sense_reason_t ret;
+
+- if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
++ if ((flags & 0x04) || (flags & 0x02)) {
+ pr_err("WRITE_SAME PBDATA and LBDATA"
+ " bits not supported for Block Discard"
+ " Emulation\n");
+@@ -340,7 +340,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ }
+
+ /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
+- if (flags[0] & 0x10) {
++ if (flags & 0x10) {
+ pr_warn("WRITE SAME with ANCHOR not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+@@ -348,7 +348,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
+ * translated into block discard requests within backend code.
+ */
+- if (flags[0] & 0x08) {
++ if (flags & 0x08) {
+ if (!ops->execute_unmap)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+@@ -363,7 +363,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ if (!ops->execute_write_same)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+- ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
++ ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true);
+ if (ret)
+ return ret;
+
+@@ -721,10 +721,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_
+ }
+
+ static sense_reason_t
+-sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
++sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect,
+ u32 sectors, bool is_write)
+ {
+- u8 protect = cdb[1] >> 5;
+ int sp_ops = cmd->se_sess->sup_prot_ops;
+ int pi_prot_type = dev->dev_attrib.pi_prot_type;
+ bool fabric_prot = false;
+@@ -772,7 +771,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
+ /* Fallthrough */
+ default:
+ pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
+- "PROTECT: 0x%02x\n", cdb[0], protect);
++ "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+@@ -847,7 +846,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+- ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
+ if (ret)
+ return ret;
+
+@@ -861,7 +860,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+- ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
+ if (ret)
+ return ret;
+
+@@ -875,7 +874,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+- ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
+ if (ret)
+ return ret;
+
+@@ -896,7 +895,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+- ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
+ if (ret)
+ return ret;
+
+@@ -910,7 +909,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+- ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
+ if (ret)
+ return ret;
+
+@@ -924,7 +923,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+- ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
+ if (ret)
+ return ret;
+
+@@ -983,7 +982,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
+
+- ret = sbc_setup_write_same(cmd, &cdb[10], ops);
++ ret = sbc_setup_write_same(cmd, cdb[10], ops);
+ if (ret)
+ return ret;
+ break;
+@@ -1076,7 +1075,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+
+- ret = sbc_setup_write_same(cmd, &cdb[1], ops);
++ ret = sbc_setup_write_same(cmd, cdb[1], ops);
+ if (ret)
+ return ret;
+ break;
+@@ -1094,7 +1093,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ * Follow sbcr26 with WRITE_SAME (10) and check for the existence
+ * of byte 1 bit 3 UNMAP instead of original reserved field
+ */
+- ret = sbc_setup_write_same(cmd, &cdb[1], ops);
++ ret = sbc_setup_write_same(cmd, cdb[1], ops);
+ if (ret)
+ return ret;
+ break;
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index a6df07786362e..94497787a0764 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -1956,7 +1956,7 @@ unregister:
+ EXPORT_SYMBOL_GPL(thermal_zone_device_register);
+
+ /**
+- * thermal_device_unregister - removes the registered thermal zone device
++ * thermal_zone_device_unregister - removes the registered thermal zone device
+ * @tz: the thermal zone device to remove
+ */
+ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 6e2bf3e69a0ad..6910a6d7c63e1 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3836,6 +3836,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
+ return 0;
+ }
+
++/*
++ * Don't allow device intiated U1/U2 if the system exit latency + one bus
++ * interval is greater than the minimum service interval of any active
++ * periodic endpoint. See USB 3.2 section 9.4.9
++ */
++static bool usb_device_may_initiate_lpm(struct usb_device *udev,
++ enum usb3_link_state state)
++{
++ unsigned int sel; /* us */
++ int i, j;
++
++ if (state == USB3_LPM_U1)
++ sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
++ else if (state == USB3_LPM_U2)
++ sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
++ else
++ return false;
++
++ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
++ struct usb_interface *intf;
++ struct usb_endpoint_descriptor *desc;
++ unsigned int interval;
++
++ intf = udev->actconfig->interface[i];
++ if (!intf)
++ continue;
++
++ for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) {
++ desc = &intf->cur_altsetting->endpoint[j].desc;
++
++ if (usb_endpoint_xfer_int(desc) ||
++ usb_endpoint_xfer_isoc(desc)) {
++ interval = (1 << (desc->bInterval - 1)) * 125;
++ if (sel + 125 > interval)
++ return false;
++ }
++ }
++ }
++ return true;
++}
++
+ /*
+ * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated
+ * U1/U2 entry.
+@@ -3908,20 +3949,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
+ * U1/U2_ENABLE
+ */
+ if (udev->actconfig &&
+- usb_set_device_initiated_lpm(udev, state, true) == 0) {
+- if (state == USB3_LPM_U1)
+- udev->usb3_lpm_u1_enabled = 1;
+- else if (state == USB3_LPM_U2)
+- udev->usb3_lpm_u2_enabled = 1;
+- } else {
+- /* Don't request U1/U2 entry if the device
+- * cannot transition to U1/U2.
+- */
+- usb_set_lpm_timeout(udev, state, 0);
+- hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
++ usb_device_may_initiate_lpm(udev, state)) {
++ if (usb_set_device_initiated_lpm(udev, state, true)) {
++ /*
++ * Request to enable device initiated U1/U2 failed,
++ * better to turn off lpm in this case.
++ */
++ usb_set_lpm_timeout(udev, state, 0);
++ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
++ return;
++ }
+ }
+-}
+
++ if (state == USB3_LPM_U1)
++ udev->usb3_lpm_u1_enabled = 1;
++ else if (state == USB3_LPM_U2)
++ udev->usb3_lpm_u2_enabled = 1;
++}
+ /*
+ * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
+ * U1/U2 entry.
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 3dfd584a1ef3d..2ca6ed207e26e 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -325,10 +325,6 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* DJI CineSSD */
+ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
+- /* Fibocom L850-GL LTE Modem */
+- { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
+- USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+-
+ /* INTEL VALUE SSD */
+ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
+index bd98706d1ce9d..2f8a5fa28a802 100644
+--- a/drivers/usb/host/max3421-hcd.c
++++ b/drivers/usb/host/max3421-hcd.c
+@@ -149,8 +149,6 @@ struct max3421_hcd {
+ */
+ struct urb *curr_urb;
+ enum scheduling_pass sched_pass;
+- struct usb_device *loaded_dev; /* dev that's loaded into the chip */
+- int loaded_epnum; /* epnum whose toggles are loaded */
+ int urb_done; /* > 0 -> no errors, < 0: errno */
+ size_t curr_len;
+ u8 hien;
+@@ -488,39 +486,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
+ * Caller must NOT hold HCD spinlock.
+ */
+ static void
+-max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
+- int force_toggles)
++max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum)
+ {
+- struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+- int old_epnum, same_ep, rcvtog, sndtog;
+- struct usb_device *old_dev;
++ int rcvtog, sndtog;
+ u8 hctl;
+
+- old_dev = max3421_hcd->loaded_dev;
+- old_epnum = max3421_hcd->loaded_epnum;
+-
+- same_ep = (dev == old_dev && epnum == old_epnum);
+- if (same_ep && !force_toggles)
+- return;
+-
+- if (old_dev && !same_ep) {
+- /* save the old end-points toggles: */
+- u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+-
+- rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
+- sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+-
+- /* no locking: HCD (i.e., we) own toggles, don't we? */
+- usb_settoggle(old_dev, old_epnum, 0, rcvtog);
+- usb_settoggle(old_dev, old_epnum, 1, sndtog);
+- }
+ /* setup new endpoint's toggle bits: */
+ rcvtog = usb_gettoggle(dev, epnum, 0);
+ sndtog = usb_gettoggle(dev, epnum, 1);
+ hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
+ BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
+
+- max3421_hcd->loaded_epnum = epnum;
+ spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
+
+ /*
+@@ -528,7 +504,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
+ * address-assignment so it's best to just always load the
+ * address whenever the end-point changed/was forced.
+ */
+- max3421_hcd->loaded_dev = dev;
+ spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
+ }
+
+@@ -663,7 +638,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd)
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb, *curr_urb = NULL;
+ struct max3421_ep *max3421_ep;
+- int epnum, force_toggles = 0;
++ int epnum;
+ struct usb_host_endpoint *ep;
+ struct list_head *pos;
+ unsigned long flags;
+@@ -773,7 +748,6 @@ done:
+ usb_settoggle(urb->dev, epnum, 0, 1);
+ usb_settoggle(urb->dev, epnum, 1, 1);
+ max3421_ep->pkt_state = PKT_STATE_SETUP;
+- force_toggles = 1;
+ } else
+ max3421_ep->pkt_state = PKT_STATE_TRANSFER;
+ }
+@@ -781,7 +755,7 @@ done:
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+ max3421_ep->last_active = max3421_hcd->frame_number;
+- max3421_set_address(hcd, urb->dev, epnum, force_toggles);
++ max3421_set_address(hcd, urb->dev, epnum);
+ max3421_set_speed(hcd, urb->dev);
+ max3421_next_transfer(hcd, 0);
+ return 1;
+@@ -1382,6 +1356,16 @@ max3421_urb_done(struct usb_hcd *hcd)
+ status = 0;
+ urb = max3421_hcd->curr_urb;
+ if (urb) {
++ /* save the old end-points toggles: */
++ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
++ int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
++ int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
++ int epnum = usb_endpoint_num(&urb->ep->desc);
++
++ /* no locking: HCD (i.e., we) own toggles, don't we? */
++ usb_settoggle(urb->dev, epnum, 0, rcvtog);
++ usb_settoggle(urb->dev, epnum, 1, sndtog);
++
+ max3421_hcd->curr_urb = NULL;
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 74d5975bf98f1..6113b9da00c67 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1268,11 +1268,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ * Inform the usbcore about resume-in-progress by returning
+ * a non-zero value even if there are no status changes.
+ */
++ spin_lock_irqsave(&xhci->lock, flags);
++
+ status = bus_state->resuming_ports;
+
+ mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
+
+- spin_lock_irqsave(&xhci->lock, flags);
+ /* For each port, did anything change? If so, set that bit in buf. */
+ for (i = 0; i < max_ports; i++) {
+ temp = readl(port_array[i]);
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index 7d329c6bc65f1..793bd764385af 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -115,6 +115,8 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
+ #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
+ #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
+ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
++static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
++static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
+ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
+ {
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+@@ -138,6 +140,11 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
+ dmaengine_terminate_all(chan);
+ usbhsf_fifo_clear(pipe, fifo);
+ usbhsf_dma_unmap(pkt);
++ } else {
++ if (usbhs_pipe_is_dir_in(pipe))
++ usbhsf_rx_irq_ctrl(pipe, 0);
++ else
++ usbhsf_tx_irq_ctrl(pipe, 0);
+ }
+
+ usbhs_pipe_running(pipe, 0);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 91462ff9a99d0..f039b85d5f57f 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -152,6 +152,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
+ { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
+ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
++ { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */
+ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
+ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
+ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+@@ -199,8 +200,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
+ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
+ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
+- { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */
+- { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */
++ { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */
++ { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */
+ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
+ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 6faa9ac538877..b9017e85cc1ab 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_UC15 0x9090
+ /* These u-blox products use Qualcomm's vendor ID */
+ #define UBLOX_PRODUCT_R410M 0x90b2
++#define UBLOX_PRODUCT_R6XX 0x90fa
+ /* These Yuga products use Qualcomm's vendor ID */
+ #define YUGA_PRODUCT_CLM920_NC5 0x9625
+
+@@ -1098,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
+ /* u-blox products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
+ .driver_info = RSVD(1) | RSVD(3) },
++ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
++ .driver_info = RSVD(3) },
+ /* Quectel products using Quectel vendor ID */
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+ .driver_info = RSVD(4) },
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 7f4245b01baee..648130903b034 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -54,6 +54,13 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
++/* Reported-by: Julian Sikorski <belegdol@gmail.com> */
++UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
++ "LaCie",
++ "Rugged USB3-FW",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_IGNORE_UAS),
++
+ /*
+ * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+ * commands in UAS mode. Observed with the 1.28 firmware; are there others?
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 6d846ff696fb3..da4ad006739db 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -476,7 +476,7 @@ again:
+ * inode has not been flagged as nocompress. This flag can
+ * change at any time if we discover bad compression ratios.
+ */
+- if (inode_need_compress(inode)) {
++ if (nr_pages > 1 && inode_need_compress(inode)) {
+ WARN_ON(pages);
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
+ if (!pages) {
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index b1ff8eb618021..4d68f5a9e4aa3 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -887,7 +887,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
+ flags |= FOLL_WRITE;
+
+ while (count > 0) {
+- int this_len = min_t(int, count, PAGE_SIZE);
++ size_t this_len = min_t(size_t, count, PAGE_SIZE);
+
+ if (write && copy_from_user(page, buf, this_len)) {
+ copied = -EFAULT;
+diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
+index 30a56ab2ccfb0..4cee368cb91c2 100644
+--- a/include/net/dst_metadata.h
++++ b/include/net/dst_metadata.h
+@@ -31,7 +31,9 @@ static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
+ return &md_dst->u.tun_info;
+
+ dst = skb_dst(skb);
+- if (dst && dst->lwtstate)
++ if (dst && dst->lwtstate &&
++ (dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
++ dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
+ return lwt_tun_info(dst->lwtstate);
+
+ return NULL;
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index 8d0a9b1fc39a3..69e226324f681 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -181,7 +181,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
+ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
+
+-static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
++static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb)
+ {
+ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+ inet6_sk(skb->sk) : NULL;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 15952d0e340be..e00f17070cb2c 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3852,7 +3852,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
+ static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
+ {
+ struct hrtimer *refresh_timer = &cfs_b->period_timer;
+- u64 remaining;
++ s64 remaining;
+
+ /* if the call-back is running a quota refresh is already occurring */
+ if (hrtimer_callback_running(refresh_timer))
+@@ -3860,7 +3860,7 @@ static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
+
+ /* is a quota refresh about to occur? */
+ remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
+- if (remaining < min_expire)
++ if (remaining < (s64)min_expire)
+ return 1;
+
+ return 0;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 1ec760f6bf58b..19b30ff90cc4b 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3086,10 +3086,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
+ if (unlikely(!head))
+ return true;
+
+- return reader->read == rb_page_commit(reader) &&
+- (commit == reader ||
+- (commit == head &&
+- head->read == rb_page_commit(commit)));
++ /* Reader should exhaust content in reader page */
++ if (reader->read != rb_page_commit(reader))
++ return false;
++
++ /*
++ * If writers are committing on the reader page, knowing all
++ * committed content has been read, the ring buffer is empty.
++ */
++ if (commit == reader)
++ return true;
++
++ /*
++ * If writers are committing on a page other than reader page
++ * and head page, there should always be content to read.
++ */
++ if (commit != head)
++ return false;
++
++ /*
++ * Writers are committing on the head page, we just need
++ * to care about there're committed data, and the reader will
++ * swap reader page with head page when it is to read data.
++ */
++ return rb_page_commit(commit) == 0;
+ }
+
+ /**
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index aa209b1066c96..3cfd413aa2c88 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -539,7 +539,8 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg,
+ goto err;
+
+ ret = -EINVAL;
+- if (unlikely(msg->msg_iter.iov->iov_base == NULL))
++ if (unlikely(msg->msg_iter.nr_segs == 0) ||
++ unlikely(msg->msg_iter.iov->iov_base == NULL))
+ goto err;
+ noblock = msg->msg_flags & MSG_DONTWAIT;
+
+diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
+index 9d8fcdefefc01..ee297964fcd26 100644
+--- a/net/decnet/af_decnet.c
++++ b/net/decnet/af_decnet.c
+@@ -823,7 +823,7 @@ static int dn_auto_bind(struct socket *sock)
+ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
+ {
+ struct dn_scp *scp = DN_SK(sk);
+- DEFINE_WAIT(wait);
++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int err;
+
+ if (scp->state != DN_CR)
+@@ -833,11 +833,11 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
+ scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
+ dn_send_conn_conf(sk, allocation);
+
+- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
++ add_wait_queue(sk_sleep(sk), &wait);
+ for(;;) {
+ release_sock(sk);
+ if (scp->state == DN_CC)
+- *timeo = schedule_timeout(*timeo);
++ *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
+ lock_sock(sk);
+ err = 0;
+ if (scp->state == DN_RUN)
+@@ -851,9 +851,8 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
+ err = -EAGAIN;
+ if (!*timeo)
+ break;
+- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ }
+- finish_wait(sk_sleep(sk), &wait);
++ remove_wait_queue(sk_sleep(sk), &wait);
+ if (err == 0) {
+ sk->sk_socket->state = SS_CONNECTED;
+ } else if (scp->state != DN_CC) {
+@@ -865,7 +864,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
+ static int dn_wait_run(struct sock *sk, long *timeo)
+ {
+ struct dn_scp *scp = DN_SK(sk);
+- DEFINE_WAIT(wait);
++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int err = 0;
+
+ if (scp->state == DN_RUN)
+@@ -874,11 +873,11 @@ static int dn_wait_run(struct sock *sk, long *timeo)
+ if (!*timeo)
+ return -EALREADY;
+
+- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
++ add_wait_queue(sk_sleep(sk), &wait);
+ for(;;) {
+ release_sock(sk);
+ if (scp->state == DN_CI || scp->state == DN_CC)
+- *timeo = schedule_timeout(*timeo);
++ *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
+ lock_sock(sk);
+ err = 0;
+ if (scp->state == DN_RUN)
+@@ -892,9 +891,8 @@ static int dn_wait_run(struct sock *sk, long *timeo)
+ err = -ETIMEDOUT;
+ if (!*timeo)
+ break;
+- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ }
+- finish_wait(sk_sleep(sk), &wait);
++ remove_wait_queue(sk_sleep(sk), &wait);
+ out:
+ if (err == 0) {
+ sk->sk_socket->state = SS_CONNECTED;
+@@ -1039,16 +1037,16 @@ static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
+
+ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
+ {
+- DEFINE_WAIT(wait);
++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct sk_buff *skb = NULL;
+ int err = 0;
+
+- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
++ add_wait_queue(sk_sleep(sk), &wait);
+ for(;;) {
+ release_sock(sk);
+ skb = skb_dequeue(&sk->sk_receive_queue);
+ if (skb == NULL) {
+- *timeo = schedule_timeout(*timeo);
++ *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
+ skb = skb_dequeue(&sk->sk_receive_queue);
+ }
+ lock_sock(sk);
+@@ -1063,9 +1061,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
+ err = -EAGAIN;
+ if (!*timeo)
+ break;
+- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ }
+- finish_wait(sk_sleep(sk), &wait);
++ remove_wait_queue(sk_sleep(sk), &wait);
+
+ return skb == NULL ? ERR_PTR(err) : skb;
+ }
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 3826745a160e5..bb6d251ce103c 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -277,7 +277,7 @@ void tcp_v4_mtu_reduced(struct sock *sk)
+
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+- mtu = tcp_sk(sk)->mtu_info;
++ mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
+ dst = inet_csk_update_pmtu(sk, mtu);
+ if (!dst)
+ return;
+@@ -444,7 +444,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ if (sk->sk_state == TCP_LISTEN)
+ goto out;
+
+- tp->mtu_info = info;
++ WRITE_ONCE(tp->mtu_info, info);
+ if (!sock_owned_by_user(sk)) {
+ tcp_v4_mtu_reduced(sk);
+ } else {
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index db037082e6f25..3f061bbf842af 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1353,6 +1353,7 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
+ return __tcp_mtu_to_mss(sk, pmtu) -
+ (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
+ }
++EXPORT_SYMBOL(tcp_mtu_to_mss);
+
+ /* Inverse of above */
+ int tcp_mss_to_mtu(struct sock *sk, int mss)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 53e15514d90d2..68c9d033c7182 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -308,11 +308,20 @@ failure:
+ static void tcp_v6_mtu_reduced(struct sock *sk)
+ {
+ struct dst_entry *dst;
++ u32 mtu;
+
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+
+- dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
++ mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
++
++ /* Drop requests trying to increase our current mss.
++ * Check done in __ip6_rt_update_pmtu() is too late.
++ */
++ if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
++ return;
++
++ dst = inet6_csk_update_pmtu(sk, mtu);
+ if (!dst)
+ return;
+
+@@ -391,6 +400,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ }
+
+ if (type == ICMPV6_PKT_TOOBIG) {
++ u32 mtu = ntohl(info);
++
+ /* We are not interested in TCP_LISTEN and open_requests
+ * (SYN-ACKs send out by Linux are always <576bytes so
+ * they should go through unfragmented).
+@@ -401,7 +412,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ if (!ip6_sk_accept_pmtu(sk))
+ goto out;
+
+- tp->mtu_info = ntohl(info);
++ if (mtu < IPV6_MIN_MTU)
++ goto out;
++
++ WRITE_ONCE(tp->mtu_info, mtu);
++
+ if (!sock_owned_by_user(sk))
+ tcp_v6_mtu_reduced(sk);
+ else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
+diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
+index b2dc9a820c6a5..ef6cc9eb0e45e 100644
+--- a/net/ipv6/xfrm6_output.c
++++ b/net/ipv6/xfrm6_output.c
+@@ -141,7 +141,7 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x = dst->xfrm;
+- int mtu;
++ unsigned int mtu;
+ bool toobig;
+
+ #ifdef CONFIG_NETFILTER
+diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
+index f0ecaec1ff3da..d1a0b70567432 100644
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -125,11 +125,9 @@ static void nr_heartbeat_expiry(unsigned long param)
+ is accepted() it isn't 'dead' so doesn't get removed. */
+ if (sock_flag(sk, SOCK_DESTROY) ||
+ (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+- sock_hold(sk);
+ bh_unlock_sock(sk);
+ nr_destroy_socket(sk);
+- sock_put(sk);
+- return;
++ goto out;
+ }
+ break;
+
+@@ -150,6 +148,8 @@ static void nr_heartbeat_expiry(unsigned long param)
+
+ nr_start_heartbeat(sk);
+ bh_unlock_sock(sk);
++out:
++ sock_put(sk);
+ }
+
+ static void nr_t2timer_expiry(unsigned long param)
+@@ -163,6 +163,7 @@ static void nr_t2timer_expiry(unsigned long param)
+ nr_enquiry_response(sk);
+ }
+ bh_unlock_sock(sk);
++ sock_put(sk);
+ }
+
+ static void nr_t4timer_expiry(unsigned long param)
+@@ -172,6 +173,7 @@ static void nr_t4timer_expiry(unsigned long param)
+ bh_lock_sock(sk);
+ nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY;
+ bh_unlock_sock(sk);
++ sock_put(sk);
+ }
+
+ static void nr_idletimer_expiry(unsigned long param)
+@@ -200,6 +202,7 @@ static void nr_idletimer_expiry(unsigned long param)
+ sock_set_flag(sk, SOCK_DEAD);
+ }
+ bh_unlock_sock(sk);
++ sock_put(sk);
+ }
+
+ static void nr_t1timer_expiry(unsigned long param)
+@@ -212,8 +215,7 @@ static void nr_t1timer_expiry(unsigned long param)
+ case NR_STATE_1:
+ if (nr->n2count == nr->n2) {
+ nr_disconnect(sk, ETIMEDOUT);
+- bh_unlock_sock(sk);
+- return;
++ goto out;
+ } else {
+ nr->n2count++;
+ nr_write_internal(sk, NR_CONNREQ);
+@@ -223,8 +225,7 @@ static void nr_t1timer_expiry(unsigned long param)
+ case NR_STATE_2:
+ if (nr->n2count == nr->n2) {
+ nr_disconnect(sk, ETIMEDOUT);
+- bh_unlock_sock(sk);
+- return;
++ goto out;
+ } else {
+ nr->n2count++;
+ nr_write_internal(sk, NR_DISCREQ);
+@@ -234,8 +235,7 @@ static void nr_t1timer_expiry(unsigned long param)
+ case NR_STATE_3:
+ if (nr->n2count == nr->n2) {
+ nr_disconnect(sk, ETIMEDOUT);
+- bh_unlock_sock(sk);
+- return;
++ goto out;
+ } else {
+ nr->n2count++;
+ nr_requeue_frames(sk);
+@@ -244,5 +244,7 @@ static void nr_t1timer_expiry(unsigned long param)
+ }
+
+ nr_start_t1timer(sk);
++out:
+ bh_unlock_sock(sk);
++ sock_put(sk);
+ }
+diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
+index 6fdc97ef6023d..cb73747002edb 100755
+--- a/scripts/mkcompile_h
++++ b/scripts/mkcompile_h
+@@ -82,15 +82,23 @@ UTS_TRUNCATE="cut -b -$UTS_LEN"
+ # Only replace the real compile.h if the new one is different,
+ # in order to preserve the timestamp and avoid unnecessary
+ # recompilations.
+-# We don't consider the file changed if only the date/time changed.
++# We don't consider the file changed if only the date/time changed,
++# unless KBUILD_BUILD_TIMESTAMP was explicitly set (e.g. for
++# reproducible builds with that value referring to a commit timestamp).
+ # A kernel config change will increase the generation number, thus
+ # causing compile.h to be updated (including date/time) due to the
+ # changed comment in the
+ # first line.
+
++if [ -z "$KBUILD_BUILD_TIMESTAMP" ]; then
++ IGNORE_PATTERN="UTS_VERSION"
++else
++ IGNORE_PATTERN="NOT_A_PATTERN_TO_BE_MATCHED"
++fi
++
+ if [ -r $TARGET ] && \
+- grep -v 'UTS_VERSION' $TARGET > .tmpver.1 && \
+- grep -v 'UTS_VERSION' .tmpcompile > .tmpver.2 && \
++ grep -v $IGNORE_PATTERN $TARGET > .tmpver.1 && \
++ grep -v $IGNORE_PATTERN .tmpcompile > .tmpver.2 && \
+ cmp -s .tmpver.1 .tmpver.2; then
+ rm -f .tmpcompile
+ else
+diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
+index 90fa57ad14c04..23834691f4d32 100644
+--- a/sound/isa/sb/sb16_csp.c
++++ b/sound/isa/sb/sb16_csp.c
+@@ -828,6 +828,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
+ mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
+ snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
+ snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
++ spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
+
+ spin_lock(&p->chip->reg_lock);
+ set_mode_register(p->chip, 0xc0); /* c0 = STOP */
+@@ -867,6 +868,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
+ spin_unlock(&p->chip->reg_lock);
+
+ /* restore PCM volume */
++ spin_lock_irqsave(&p->chip->mixer_lock, flags);
+ snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
+ snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
+ spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
+@@ -892,6 +894,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
+ mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
+ snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
+ snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
++ spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
+
+ spin_lock(&p->chip->reg_lock);
+ if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
+@@ -906,6 +909,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
+ spin_unlock(&p->chip->reg_lock);
+
+ /* restore PCM volume */
++ spin_lock_irqsave(&p->chip->mixer_lock, flags);
+ snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
+ snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
+ spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
+diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
+index 6ebfdee3e2c6d..661cca25ae5d5 100644
+--- a/tools/perf/tests/bpf.c
++++ b/tools/perf/tests/bpf.c
+@@ -1,4 +1,5 @@
+ #include <stdio.h>
++#include <stdlib.h>
+ #include <sys/epoll.h>
+ #include <util/bpf-loader.h>
+ #include <util/evlist.h>
+@@ -176,6 +177,7 @@ static int __test__bpf(int idx)
+ bpf_testcase_table[idx].target_func,
+ bpf_testcase_table[idx].expect_result);
+ out:
++ free(obj_buf);
+ bpf__clear();
+ return ret;
+ }
+diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
+index e3b3b92e44587..7476757680ed9 100644
+--- a/tools/perf/util/probe-file.c
++++ b/tools/perf/util/probe-file.c
+@@ -318,10 +318,10 @@ int probe_file__del_events(int fd, struct strfilter *filter)
+
+ ret = probe_file__get_events(fd, filter, namelist);
+ if (ret < 0)
+- return ret;
++ goto out;
+
+ ret = probe_file__del_strlist(fd, namelist);
++out:
+ strlist__delete(namelist);
+-
+ return ret;
+ }